forked from openshift/origin
-
Notifications
You must be signed in to change notification settings - Fork 0
/
master_node.go
177 lines (153 loc) · 5.92 KB
/
master_node.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
package cluster
import (
"errors"
"fmt"
"net"
"net/url"
"strings"
kapi "k8s.io/kubernetes/pkg/api"
kclient "k8s.io/kubernetes/pkg/client/unversioned"
authorizationapi "github.com/openshift/origin/pkg/authorization/api"
osclient "github.com/openshift/origin/pkg/client"
configapilatest "github.com/openshift/origin/pkg/cmd/server/api/latest"
"github.com/openshift/origin/pkg/diagnostics/types"
)
const masterNotRunningAsANode = `Unable to find a node matching the cluster server IP.
This may indicate the master is not also running a node, and is unable
to proxy to pods over the Open vSwitch SDN.
`
const ovsSubnetPluginName = "redhat/openshift-ovs-subnet"
const ovsMultiTenantPluginName = "redhat/openshift-ovs-multitenant"
// MasterNode is a Diagnostic for checking that the OpenShift master is also running as node.
// This is currently required to have the master on the Open vSwitch SDN and able to communicate
// with other nodes.
type MasterNode struct {
KubeClient *kclient.Client
OsClient *osclient.Client
ServerUrl string
MasterConfigFile string // may often be empty if not being run on the host
}
const MasterNodeName = "MasterNode"
func (d *MasterNode) Name() string {
return MasterNodeName
}
func (d *MasterNode) Description() string {
return "Check if master is also running node (for Open vSwitch)"
}
func (d *MasterNode) CanRun() (bool, error) {
if d.KubeClient == nil || d.OsClient == nil {
return false, errors.New("must have kube and os client")
}
if d.ServerUrl == "" {
return false, errors.New("must have a server URL")
}
// If there is a master config file available, we'll perform an additional
// check to see if an OVS network plugin is in use. If no master config,
// we assume this is the case for now and let the check run anyhow.
if len(d.MasterConfigFile) > 0 {
// Parse the master config and check the network plugin name:
masterCfg, masterErr := configapilatest.ReadAndResolveMasterConfig(d.MasterConfigFile)
if masterErr != nil {
return false, types.DiagnosticError{ID: "DClu3008",
LogMessage: fmt.Sprintf("Master config provided but unable to parse: %s", masterErr), Cause: masterErr}
}
networkPluginName := masterCfg.NetworkConfig.NetworkPluginName
// Make sure this is an OVS network plugin:
ovsNetworkPlugins := [2]string{ovsSubnetPluginName, ovsMultiTenantPluginName}
usingOvsNetworkPlugin := false
for _, plugin := range ovsNetworkPlugins {
if plugin == networkPluginName {
usingOvsNetworkPlugin = true
}
}
if !usingOvsNetworkPlugin {
return false, errors.New(fmt.Sprintf("Network plugin does not require master to also run node: %s", networkPluginName))
}
}
can, err := userCan(d.OsClient, authorizationapi.AuthorizationAttributes{
Verb: "list",
Group: kapi.GroupName,
Resource: "nodes",
})
if err != nil {
return false, types.DiagnosticError{ID: "DClu3000", LogMessage: fmt.Sprintf(clientErrorGettingNodes, err), Cause: err}
} else if !can {
return false, types.DiagnosticError{ID: "DClu3001", LogMessage: "Client does not have access to see node status", Cause: err}
}
return true, nil
}
func (d *MasterNode) Check() types.DiagnosticResult {
r := types.NewDiagnosticResult(MasterNodeName)
nodes, err := d.KubeClient.Nodes().List(kapi.ListOptions{})
if err != nil {
r.Error("DClu3002", err, fmt.Sprintf(clientErrorGettingNodes, err))
return r
}
// Provide the actual net.LookupHost as the DNS resolver:
serverIps, err := resolveServerIP(d.ServerUrl, net.LookupHost)
if err != nil {
r.Error("DClu3007", err, "Error resolving servers IP")
return r
}
return searchNodesForIP(nodes.Items, serverIps)
}
// Define a resolve callback function type, use to swap in a dummy implementation
// in tests and avoid actual DNS calls.
type dnsResolver func(string) ([]string, error)
// resolveServerIP extracts the hostname portion of the API server URL passed in,
// and attempts dns resolution. It also attempts to catch server URL's that already
// contain both IPv4 and IPv6 addresses.
func resolveServerIP(serverUrl string, fn dnsResolver) ([]string, error) {
// Extract the hostname from the API server URL:
u, err := url.Parse(serverUrl)
if err != nil || u.Host == "" {
return nil, errors.New(fmt.Sprintf("Unable to parse hostname from URL: %s", serverUrl))
}
// Trim the port, if one exists, and watchout for IPv6 URLs.
if strings.Count(u.Host, ":") > 1 {
// Check if this is an IPv6 address as is to avoid problems with splitting
// off the port:
ipv6 := net.ParseIP(u.Host)
if ipv6 != nil {
return []string{ipv6.String()}, nil
}
}
hostname, _, err := net.SplitHostPort(u.Host)
if err != nil && hostname == "" {
// Likely didn't have a port, carry on:
hostname = u.Host
}
// Check if the hostname already looks like an IPv4 or IPv6 address:
goIp := net.ParseIP(hostname)
if goIp != nil {
return []string{goIp.String()}, nil
}
// If not, attempt a DNS lookup. We may get multiple addresses for the hostname,
// we'll return them all and search for any match in Kube nodes:
ips, err := fn(hostname)
if err != nil {
return nil, errors.New(fmt.Sprintf("Unable to perform DNS lookup for: %s", hostname))
}
return ips, nil
}
func searchNodesForIP(nodes []kapi.Node, ips []string) types.DiagnosticResult {
r := types.NewDiagnosticResult(MasterNodeName)
r.Debug("DClu3005", fmt.Sprintf("Seaching for a node with master IP: %s", ips))
// Loops = # of nodes * number of IPs per node (2 commonly) * # of IPs the
// server hostname resolves to. (should usually be 1)
for _, node := range nodes {
for _, address := range node.Status.Addresses {
for _, ipAddress := range ips {
r.Debug("DClu3006", fmt.Sprintf("Checking node %s address %s",
node.ObjectMeta.Name, address.Address))
if address.Address == ipAddress {
r.Info("DClu3003", fmt.Sprintf("Found a node with same IP as master: %s",
node.ObjectMeta.Name))
return r
}
}
}
}
r.Warn("DClu3004", nil, masterNotRunningAsANode)
return r
}