forked from openshift/origin
-
Notifications
You must be signed in to change notification settings - Fork 0
/
client.go
75 lines (68 loc) · 3.21 KB
/
client.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
package diagnostics
import (
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
clientdiags "github.com/openshift/origin/pkg/diagnostics/client"
networkdiags "github.com/openshift/origin/pkg/diagnostics/network"
"github.com/openshift/origin/pkg/diagnostics/types"
)
var (
// availableClientDiagnostics contains the names of client diagnostics that can be executed
// during a single run of diagnostics. Add more diagnostics to the list as they are defined.
availableClientDiagnostics = sets.NewString(clientdiags.ConfigContextsName, clientdiags.DiagnosticPodName, networkdiags.NetworkDiagnosticName)
)
// buildClientDiagnostics builds client Diagnostic objects based on the rawConfig passed in.
// Returns the Diagnostics built, "ok" bool for whether to proceed or abort, and an error if any was encountered during the building of diagnostics.) {
func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, bool, error) {
available := availableClientDiagnostics
osClient, kubeClient, clientErr := o.Factory.Clients()
if clientErr != nil {
o.Logger.Notice("CED0001", "Could not configure a client, so client diagnostics are limited to testing configuration and connection")
available = sets.NewString(clientdiags.ConfigContextsName)
}
diagnostics := []types.Diagnostic{}
requestedDiagnostics := available.Intersection(sets.NewString(o.RequestedDiagnostics...)).List()
for _, diagnosticName := range requestedDiagnostics {
switch diagnosticName {
case clientdiags.ConfigContextsName:
seen := map[string]bool{}
for contextName := range rawConfig.Contexts {
diagnostic := clientdiags.ConfigContext{RawConfig: rawConfig, ContextName: contextName}
if clusterUser, defined := diagnostic.ContextClusterUser(); !defined {
// definitely want to diagnose the broken context
diagnostics = append(diagnostics, diagnostic)
} else if !seen[clusterUser] {
seen[clusterUser] = true // avoid validating same user for multiple projects
diagnostics = append(diagnostics, diagnostic)
}
}
case clientdiags.DiagnosticPodName:
diagnostics = append(diagnostics, &clientdiags.DiagnosticPod{
KubeClient: kubeClient,
Namespace: rawConfig.Contexts[rawConfig.CurrentContext].Namespace,
Level: o.LogOptions.Level,
Factory: o.Factory,
PreventModification: o.PreventModification,
ImageTemplate: o.ImageTemplate,
})
case networkdiags.NetworkDiagnosticName:
diagnostics = append(diagnostics, &networkdiags.NetworkDiagnostic{
KubeClient: kubeClient,
OSClient: osClient,
ClientFlags: o.ClientFlags,
Level: o.LogOptions.Level,
Factory: o.Factory,
PreventModification: o.PreventModification,
LogDir: o.NetworkOptions.LogDir,
PodImage: o.NetworkOptions.PodImage,
TestPodImage: o.NetworkOptions.TestPodImage,
TestPodProtocol: o.NetworkOptions.TestPodProtocol,
TestPodPort: o.NetworkOptions.TestPodPort,
})
default:
return nil, false, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
}
}
return diagnostics, true, clientErr
}