forked from argoproj/argo-cd
-
Notifications
You must be signed in to change notification settings - Fork 0
/
cluster.go
118 lines (105 loc) · 3.5 KB
/
cluster.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
package cluster
import (
"reflect"
"golang.org/x/net/context"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
appv1 "github.com/argoproj/argo-cd/pkg/apis/application/v1alpha1"
"github.com/argoproj/argo-cd/util/db"
"github.com/argoproj/argo-cd/util/grpc"
"github.com/argoproj/argo-cd/util/kube"
"github.com/argoproj/argo-cd/util/rbac"
)
// Server provides a Cluster service
type Server struct {
db db.ArgoDB
enf *rbac.Enforcer
}
// NewServer returns a new instance of the Cluster service
func NewServer(db db.ArgoDB, enf *rbac.Enforcer) *Server {
return &Server{
db: db,
enf: enf,
}
}
// List returns list of clusters
func (s *Server) List(ctx context.Context, q *ClusterQuery) (*appv1.ClusterList, error) {
clusterList, err := s.db.ListClusters(ctx)
if clusterList != nil {
newItems := make([]appv1.Cluster, 0)
for _, clust := range clusterList.Items {
if s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "get", clust.Server) {
newItems = append(newItems, *redact(&clust))
}
}
clusterList.Items = newItems
}
return clusterList, err
}
// Create creates a cluster
func (s *Server) Create(ctx context.Context, q *ClusterCreateRequest) (*appv1.Cluster, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "create", q.Cluster.Server) {
return nil, grpc.ErrPermissionDenied
}
c := q.Cluster
err := kube.TestConfig(q.Cluster.RESTConfig())
if err != nil {
return nil, err
}
c.ConnectionState = appv1.ConnectionState{Status: appv1.ConnectionStatusSuccessful}
clust, err := s.db.CreateCluster(ctx, c)
if status.Convert(err).Code() == codes.AlreadyExists {
// act idempotent if existing spec matches new spec
existing, getErr := s.db.GetCluster(ctx, c.Server)
if getErr != nil {
return nil, status.Errorf(codes.Internal, "unable to check existing cluster details: %v", getErr)
}
// cluster ConnectionState may differ, so make consistent before testing
existing.ConnectionState = c.ConnectionState
if reflect.DeepEqual(existing, c) {
clust, err = existing, nil
} else if q.Upsert {
return s.Update(ctx, &ClusterUpdateRequest{Cluster: c})
} else {
return nil, status.Errorf(codes.InvalidArgument, "existing cluster spec is different; use upsert flag to force update")
}
}
return redact(clust), err
}
// Get returns a cluster from a query
func (s *Server) Get(ctx context.Context, q *ClusterQuery) (*appv1.Cluster, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "get", q.Server) {
return nil, grpc.ErrPermissionDenied
}
clust, err := s.db.GetCluster(ctx, q.Server)
return redact(clust), err
}
// Update updates a cluster
func (s *Server) Update(ctx context.Context, q *ClusterUpdateRequest) (*appv1.Cluster, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "update", q.Cluster.Server) {
return nil, grpc.ErrPermissionDenied
}
err := kube.TestConfig(q.Cluster.RESTConfig())
if err != nil {
return nil, err
}
clust, err := s.db.UpdateCluster(ctx, q.Cluster)
return redact(clust), err
}
// Delete deletes a cluster by name
func (s *Server) Delete(ctx context.Context, q *ClusterQuery) (*ClusterResponse, error) {
if !s.enf.EnforceClaims(ctx.Value("claims"), "clusters", "delete", q.Server) {
return nil, grpc.ErrPermissionDenied
}
err := s.db.DeleteCluster(ctx, q.Server)
return &ClusterResponse{}, err
}
func redact(clust *appv1.Cluster) *appv1.Cluster {
if clust == nil {
return nil
}
clust.Config.Password = ""
clust.Config.BearerToken = ""
clust.Config.TLSClientConfig.KeyData = nil
return clust
}