/
exec.go
123 lines (107 loc) · 3.3 KB
/
exec.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
// Copyright Contributors to the Open Cluster Management project
package contexts
import (
"context"
"fmt"
"github.com/spf13/cobra"
"github.com/stolostron/cm-cli/pkg/clusterpoolhost"
"github.com/stolostron/cm-cli/pkg/managedcluster"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/cli-runtime/pkg/genericclioptions"
"k8s.io/client-go/tools/clientcmd"
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
clusterv1 "open-cluster-management.io/api/cluster/v1"
clusterclientset "open-cluster-management.io/api/client/cluster/clientset/versioned"
)
func (o *Options) complete(cmd *cobra.Command, args []string) (err error) {
return nil
}
func (o *Options) validate() error {
return nil
}
func (o *Options) run(streams genericclioptions.IOStreams) (err error) {
cmdAPIConfig := &clientcmdapi.Config{
Kind: "Config",
APIVersion: "v1",
AuthInfos: make(map[string]*clientcmdapi.AuthInfo),
Contexts: make(map[string]*clientcmdapi.Context),
Clusters: make(map[string]*clientcmdapi.Cluster),
}
currentCmdAPIConfig, _, err := clusterpoolhost.GetConfigAPI()
if err != nil {
return err
}
currentContext := currentCmdAPIConfig.CurrentContext
cmdAPIConfig.AuthInfos[currentContext] = currentCmdAPIConfig.AuthInfos[currentContext]
cmdAPIConfig.Contexts[currentContext] = currentCmdAPIConfig.Contexts[currentContext]
cmdAPIConfig.Clusters[currentContext] = currentCmdAPIConfig.Clusters[currentContext]
cmdAPIConfig.CurrentContext = currentCmdAPIConfig.CurrentContext
if !o.Current {
cphs, err := clusterpoolhost.GetClusterPoolHosts()
if err != nil {
return err
}
var cph *clusterpoolhost.ClusterPoolHost
if len(cphs.ClusterPoolHosts) != 0 {
cph, err = clusterpoolhost.GetClusterPoolHostOrCurrent(o.ClusterPoolHost)
if err != nil {
fmt.Println("no clusterpoolhost found, will only get the contexts of hive generated clusters")
}
}
dynamicClient, err := o.CMFlags.KubectlFactory.DynamicClient()
if err != nil {
return err
}
kubeClient, err := o.CMFlags.KubectlFactory.KubernetesClientSet()
if err != nil {
return err
}
restConfig, err := o.CMFlags.KubectlFactory.ToRESTConfig()
if err != nil {
return err
}
clusterClient, err := clusterclientset.NewForConfig(restConfig)
if err != nil {
return err
}
mcs, err := clusterClient.ClusterV1().ManagedClusters().List(context.TODO(), metav1.ListOptions{})
if err != nil {
return err
}
for _, mc := range mcs.Items {
if mc.Name == "local-cluster" {
continue
}
clusterCmdAPIConfig, err := managedcluster.GetCmdAPIConfig(dynamicClient, kubeClient, mc, cph)
if err != nil {
return err
}
if clusterCmdAPIConfig == nil {
fmt.Fprintf(streams.ErrOut, "no kubeconfig found for managedcluster %s\n", mc.Name)
}
addCluster(mc, cmdAPIConfig, clusterCmdAPIConfig)
}
}
// return err
data, err := clientcmd.Write(*cmdAPIConfig)
if err != nil {
return err
}
fmt.Println(string(data))
return nil
}
func addCluster(mc clusterv1.ManagedCluster, configs, config *clientcmdapi.Config) {
if config != nil {
for _, v := range config.AuthInfos {
configs.AuthInfos[mc.Name] = v
}
for _, v := range config.Clusters {
configs.Clusters[mc.Name] = v
}
for _, v := range config.Contexts {
v.AuthInfo = mc.Name
v.Cluster = mc.Name
configs.Contexts[mc.Name] = v
}
}
}