forked from openshift/origin
-
Notifications
You must be signed in to change notification settings - Fork 1
/
ipfailover.go
233 lines (193 loc) · 9.99 KB
/
ipfailover.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
package ipfailover
import (
"fmt"
"io"
"os"
"strings"
"github.com/spf13/cobra"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
kapi "k8s.io/kubernetes/pkg/api"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
kcmdutil "k8s.io/kubernetes/pkg/kubectl/cmd/util"
"github.com/openshift/origin/pkg/cmd/server/bootstrappolicy"
"github.com/openshift/origin/pkg/cmd/templates"
cmdutil "github.com/openshift/origin/pkg/cmd/util"
"github.com/openshift/origin/pkg/cmd/util/clientcmd"
"github.com/openshift/origin/pkg/cmd/util/variable"
configcmd "github.com/openshift/origin/pkg/config/cmd"
"github.com/openshift/origin/pkg/ipfailover"
"github.com/openshift/origin/pkg/ipfailover/keepalived"
"github.com/openshift/origin/pkg/security/legacyclient"
)
var (
ipFailover_long = templates.LongDesc(`
Configure or view IP Failover configuration
This command helps to setup an IP failover configuration for the
cluster. An administrator can configure IP failover on an entire
cluster or on a subset of nodes (as defined via a labeled selector).
If an IP failover configuration does not exist with the given name,
the --create flag can be passed to create a deployment configuration that
will provide IP failover capability. If you are running in production, it is
recommended that the labeled selector for the nodes matches at least 2 nodes
to ensure you have failover protection, and that you provide a --replicas=<n>
value that matches the number of nodes for the given labeled selector.`)
ipFailover_example = templates.Examples(`
# Check the default IP failover configuration ("ipfailover"):
%[1]s %[2]s
# See what the IP failover configuration would look like if it is created:
%[1]s %[2]s -o json
# Create an IP failover configuration if it does not already exist:
%[1]s %[2]s ipf --virtual-ips="10.1.1.1-4" --create
# Create an IP failover configuration on a selection of nodes labeled
# "router=us-west-ha" (on 4 nodes with 7 virtual IPs monitoring a service
# listening on port 80, such as the router process).
%[1]s %[2]s ipfailover --selector="router=us-west-ha" --virtual-ips="1.2.3.4,10.1.1.100-104,5.6.7.8" --watch-port=80 --replicas=4 --create
# Use a different IP failover config image and see the configuration:
%[1]s %[2]s ipf-alt --selector="hagroup=us-west-ha" --virtual-ips="1.2.3.4" -o yaml --images=myrepo/myipfailover:mytag`)
)
func NewCmdIPFailoverConfig(f *clientcmd.Factory, parentName, name string, out, errout io.Writer) *cobra.Command {
options := &ipfailover.IPFailoverConfigCmdOptions{
Action: configcmd.BulkAction{
Out: out,
ErrOut: errout,
},
ImageTemplate: variable.NewDefaultImageTemplate(),
ServiceAccount: "ipfailover",
Selector: ipfailover.DefaultSelector,
ServicePort: ipfailover.DefaultServicePort,
WatchPort: ipfailover.DefaultWatchPort,
NetworkInterface: ipfailover.DefaultInterface,
VRRPIDOffset: 0,
Replicas: 1,
}
cmd := &cobra.Command{
Use: fmt.Sprintf("%s [NAME]", name),
Short: "Install an IP failover group to a set of nodes",
Long: ipFailover_long,
Example: fmt.Sprintf(ipFailover_example, parentName, name),
Run: func(cmd *cobra.Command, args []string) {
err := Run(f, options, cmd, args)
if err == cmdutil.ErrExit {
os.Exit(1)
}
kcmdutil.CheckErr(err)
},
}
cmd.Flags().StringVar(&options.Type, "type", ipfailover.DefaultType, "The type of IP failover configurator to use.")
cmd.Flags().StringVar(&options.ImageTemplate.Format, "images", options.ImageTemplate.Format, "The image to base this IP failover configurator on - ${component} will be replaced based on --type.")
cmd.Flags().BoolVar(&options.ImageTemplate.Latest, "latest-images", options.ImageTemplate.Latest, "If true, attempt to use the latest images instead of the current release")
cmd.Flags().StringVarP(&options.Selector, "selector", "l", options.Selector, "Selector (label query) to filter nodes on.")
cmd.Flags().StringVar(&options.ServiceAccount, "service-account", options.ServiceAccount, "Name of the service account to use to run the ipfailover pod.")
cmd.Flags().BoolVar(&options.Create, "create", options.Create, "If true, create the configuration if it does not exist.")
cmd.Flags().StringVar(&options.VirtualIPs, "virtual-ips", "", "A set of virtual IP ranges and/or addresses that the routers bind and serve on and provide IP failover capability for.")
cmd.Flags().StringVar(&options.NotifyScript, "notify-script", "", "Run this script when state changes.")
cmd.Flags().StringVar(&options.CheckScript, "check-script", "", "Run this script at the check-interval to verify service is OK")
cmd.Flags().IntVar(&options.CheckInterval, "check-interval", ipfailover.DefaultCheckInterval, "Run the check-script at this interval (seconds)")
cmd.Flags().StringVar(&options.Preemption, "preemption-strategy", "preempt_delay 300", "Normlly VRRP will preempt a lower priority machine when a higher priority one comes online. 'nopreempt' allows the lower priority machine to maintain its MASTER status. The default 'preempt_delay 300' causes MASTER to switch after 5 min.")
cmd.Flags().StringVar(&options.IptablesChain, "iptables-chain", ipfailover.DefaultIptablesChain, "Add a rule to this iptables chain to accept 224.0.0.28 multicast packets if no rule exists. When iptables-chain is empty do not change iptables.")
cmd.Flags().StringVarP(&options.NetworkInterface, "interface", "i", "", "Network interface bound by VRRP to use for the set of virtual IP ranges/addresses specified.")
cmd.Flags().IntVarP(&options.WatchPort, "watch-port", "w", ipfailover.DefaultWatchPort, "Port to monitor or watch for resource availability.")
cmd.Flags().IntVar(&options.VRRPIDOffset, "vrrp-id-offset", options.VRRPIDOffset, "Offset to use for setting ids of VRRP instances (default offset is 0). This allows multiple ipfailover instances to run within the same cluster.")
cmd.Flags().Int32VarP(&options.Replicas, "replicas", "r", options.Replicas, "The replication factor of this IP failover configuration; commonly 2 when high availability is desired. Please ensure this matches the number of nodes that satisfy the selector (or default selector) specified.")
options.Action.BindForOutput(cmd.Flags())
cmd.Flags().String("output-version", "", "The preferred API versions of the output objects")
return cmd
}
// Get configuration name - argv[1].
func getConfigurationName(args []string) (string, error) {
name := ipfailover.DefaultName
switch len(args) {
case 0:
// Do nothing - use default name.
case 1:
name = args[0]
default:
return "", fmt.Errorf("Please pass zero or one arguments to provide a name for this configuration.")
}
return name, nil
}
// Get the configurator based on the ipfailover type.
func getPlugin(name string, f *clientcmd.Factory, options *ipfailover.IPFailoverConfigCmdOptions) (ipfailover.IPFailoverConfiguratorPlugin, error) {
if options.Type == ipfailover.DefaultType {
plugin, err := keepalived.NewIPFailoverConfiguratorPlugin(name, f, options)
if err != nil {
return nil, fmt.Errorf("IPFailoverConfigurator %q plugin error: %v", options.Type, err)
}
return plugin, nil
}
return nil, fmt.Errorf("No plugins available to handle type %q", options.Type)
}
// Run runs the ipfailover command.
func Run(f *clientcmd.Factory, options *ipfailover.IPFailoverConfigCmdOptions, cmd *cobra.Command, args []string) error {
name, err := getConfigurationName(args)
if err != nil {
return err
}
if options.VRRPIDOffset < 0 || options.VRRPIDOffset > 254 {
return fmt.Errorf("The vrrp-id-offset must be in the range 0..254")
}
// The ipfailover pods for a given configuration must run on different nodes.
// We are using the ServicePort as a mechanism to prevent multiple pods for
// same configuration starting on the same node. Since pods for different
// configurations can run on the same node a different ServicePort is used
// for each configuration.
// In the future, this may be changed to pod anti-affinity.
options.ServicePort = options.ServicePort + options.VRRPIDOffset
options.Action.Bulk.Mapper = clientcmd.ResourceMapper(f)
options.Action.Bulk.Op = configcmd.Create
if err := ipfailover.ValidateCmdOptions(options); err != nil {
return err
}
p, err := getPlugin(name, f, options)
if err != nil {
return err
}
list, err := p.Generate()
if err != nil {
return err
}
namespace, _, err := f.DefaultNamespace()
if err != nil {
return err
}
_, kClient, err := f.Clients()
if err != nil {
return fmt.Errorf("error getting client: %v", err)
}
if err := validateServiceAccount(kClient, namespace, options.ServiceAccount); err != nil {
return fmt.Errorf("ipfailover could not be created; %v", err)
}
configList := []runtime.Object{
&kapi.ServiceAccount{ObjectMeta: metav1.ObjectMeta{Name: options.ServiceAccount}},
}
list.Items = append(configList, list.Items...)
if options.Action.ShouldPrint() {
mapper, _ := f.Object()
return cmdutil.VersionedPrintObject(f.PrintObject, cmd, mapper, options.Action.Out)(list)
}
if errs := options.Action.WithMessage(fmt.Sprintf("Creating IP failover %s", name), "created").Run(list, namespace); len(errs) > 0 {
return cmdutil.ErrExit
}
return nil
}
func validateServiceAccount(client kclientset.Interface, ns string, serviceAccount string) error {
sccList, err := legacyclient.NewFromClient(client.Core().RESTClient()).List(metav1.ListOptions{})
if err != nil {
if !errors.IsUnauthorized(err) {
return fmt.Errorf("could not retrieve list of security constraints to verify service account %q: %v", serviceAccount, err)
}
}
for _, scc := range sccList.Items {
if scc.AllowPrivilegedContainer {
for _, user := range scc.Users {
if strings.Contains(user, serviceAccount) {
return nil
}
}
}
}
errMsg := "service account %q does not have sufficient privileges, grant access with oadm policy add-scc-to-user %s -z %s"
return fmt.Errorf(errMsg, serviceAccount, bootstrappolicy.SecurityContextConstraintPrivileged, serviceAccount)
}