forked from openshift/origin
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathclient.go
66 lines (59 loc) · 2.94 KB
/
client.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
package diagnostics
import (
"fmt"
"k8s.io/apimachinery/pkg/util/sets"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
kclientset "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset"
clientdiags "github.com/openshift/origin/pkg/oc/cli/admin/diagnostics/diagnostics/client"
"github.com/openshift/origin/pkg/oc/cli/admin/diagnostics/diagnostics/client/pod"
"github.com/openshift/origin/pkg/oc/cli/admin/diagnostics/diagnostics/types"
)
// availableClientDiagnostics returns definitions of client diagnostics that can be executed
// during a single run of diagnostics. Add more diagnostics to the list as they are defined.
func availableClientDiagnostics() types.DiagnosticList {
return types.DiagnosticList{clientdiags.ConfigContext{}, &pod.DiagnosticPod{}}
}
// buildClientDiagnostics builds client Diagnostic objects based on the rawConfig passed in.
// Returns the Diagnostics built, and any fatal errors encountered during the building of diagnostics.
func (o DiagnosticsOptions) buildClientDiagnostics(rawConfig *clientcmdapi.Config) ([]types.Diagnostic, error) {
available := availableClientDiagnostics().Names()
clientConfig, clientErr := o.Factory.ToRESTConfig()
if clientErr != nil {
o.Logger().Notice("CED0001", "Could not configure a client, so client diagnostics are limited to testing configuration and connection")
available = sets.NewString(clientdiags.ConfigContextsName)
}
kubeClient, clientErr := kclientset.NewForConfig(clientConfig)
if clientErr != nil {
o.Logger().Notice("CED0001", "Could not configure a client, so client diagnostics are limited to testing configuration and connection")
available = sets.NewString(clientdiags.ConfigContextsName)
}
diagnostics := []types.Diagnostic{}
requestedDiagnostics := available.Intersection(sets.NewString(o.RequestedDiagnostics.List()...)).List()
for _, diagnosticName := range requestedDiagnostics {
switch diagnosticName {
case clientdiags.ConfigContextsName:
seen := map[string]bool{}
for contextName := range rawConfig.Contexts {
diagnostic := clientdiags.ConfigContext{RawConfig: rawConfig, ContextName: contextName}
if clusterUser, defined := diagnostic.ContextClusterUser(); !defined {
// definitely want to diagnose the broken context
diagnostics = append(diagnostics, diagnostic)
} else if !seen[clusterUser] {
seen[clusterUser] = true // avoid validating same user for multiple projects
diagnostics = append(diagnostics, diagnostic)
}
}
case pod.DiagnosticPodName:
dp := o.ParameterizedDiagnostics[diagnosticName].(*pod.DiagnosticPod)
dp.KubeClient = kubeClient
dp.Namespace = rawConfig.Contexts[rawConfig.CurrentContext].Namespace
dp.Level = o.LogOptions.Level
dp.Factory = o.Factory
dp.PreventModification = dp.PreventModification || o.PreventModification
diagnostics = append(diagnostics, dp)
default:
return nil, fmt.Errorf("unknown diagnostic: %v", diagnosticName)
}
}
return diagnostics, clientErr
}