-
Notifications
You must be signed in to change notification settings - Fork 354
/
clusters.go
226 lines (201 loc) · 7.27 KB
/
clusters.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
package service
import (
"encoding/json"
"errors"
"github.com/databrickslabs/databricks-terraform/client/model"
"log"
"net/http"
"time"
)
// ClustersAPI is a struct that contains the Databricks api client to perform queries
type ClustersAPI struct {
Client DBApiClient
}
// Create creates a new Spark cluster
func (a ClustersAPI) Create(cluster model.Cluster) (model.ClusterInfo, error) {
var clusterInfo model.ClusterInfo
resp, err := a.Client.performQuery(http.MethodPost, "/clusters/create", "2.0", nil, cluster, nil)
if err != nil {
return clusterInfo, err
}
err = json.Unmarshal(resp, &clusterInfo)
return clusterInfo, err
}
// Edit edits the configuration of a cluster to match the provided attributes and size
func (a ClustersAPI) Edit(clusterInfo model.Cluster) error {
_, err := a.Client.performQuery(http.MethodPost, "/clusters/edit", "2.0", nil, clusterInfo, nil)
return err
}
// ListZones returns the zones info sent by the cloud service provider
func (a ClustersAPI) ListZones() (model.ZonesInfo, error) {
var zonesInfo model.ZonesInfo
resp, err := a.Client.performQuery(http.MethodGet, "/clusters/list-zones", "2.0", nil, nil, nil)
if err != nil {
return zonesInfo, err
}
err = json.Unmarshal(resp, &zonesInfo)
return zonesInfo, err
}
// Start starts a terminated Spark cluster given its ID
func (a ClustersAPI) Start(clusterID string) error {
data := struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}{
clusterID,
}
_, err := a.Client.performQuery(http.MethodPost, "/clusters/start", "2.0", nil, data, nil)
return err
}
// Restart restart a Spark cluster given its ID. If the cluster is not in a RUNNING state, nothing will happen.
func (a ClustersAPI) Restart(clusterID string) error {
data := struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}{
clusterID,
}
_, err := a.Client.performQuery(http.MethodPost, "/clusters/restart", "2.0", nil, data, nil)
return err
}
// WaitForClusterRunning will block main thread and wait till cluster is in a RUNNING state
func (a ClustersAPI) WaitForClusterRunning(clusterID string, sleepDurationSeconds time.Duration, timeoutDurationMinutes time.Duration) error {
errChan := make(chan error, 1)
go func() {
for {
clusterInfo, err := a.Get(clusterID)
if err != nil {
errChan <- err
return
}
if clusterInfo.State == model.ClusterStateRunning {
errChan <- nil
return
} else if model.ContainsClusterState(model.ClusterStateNonRunnable, clusterInfo.State) {
errChan <- errors.New("Cluster is in a non runnable state will not be able to transition to running, needs " +
"to be started again. Current state: " + string(clusterInfo.State))
return
}
log.Println("Waiting for cluster to go to running, current state is: " + string(clusterInfo.State))
time.Sleep(sleepDurationSeconds * time.Second)
}
}()
select {
case err := <-errChan:
return err
case <-time.After(timeoutDurationMinutes * time.Minute):
return errors.New("Timed out cluster has not reached running state")
}
}
// WaitForClusterTerminated will block main thread and wait till cluster is in a TERMINATED state
func (a ClustersAPI) WaitForClusterTerminated(clusterID string, sleepDurationSeconds time.Duration, timeoutDurationMinutes time.Duration) error {
errChan := make(chan error, 1)
go func() {
for {
clusterInfo, err := a.Get(clusterID)
if err != nil {
errChan <- err
return
}
if clusterInfo.State == model.ClusterStateTerminated {
errChan <- nil
return
} else if model.ContainsClusterState(model.ClusterStateNonTerminating, clusterInfo.State) {
errChan <- errors.New("Cluster is in a non runnable state will not be able to transition to terminated, needs " +
"to be terminated again. Current state: " + string(clusterInfo.State))
return
}
log.Println("Waiting for cluster to go to terminate, current state is: " + string(clusterInfo.State))
time.Sleep(sleepDurationSeconds * time.Second)
}
}()
select {
case err := <-errChan:
return err
case <-time.After(timeoutDurationMinutes * time.Minute):
return errors.New("Timed out cluster has not reached terminated state")
}
}
// Terminate terminates a Spark cluster given its ID
func (a ClustersAPI) Terminate(clusterID string) error {
data := struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}{
clusterID,
}
_, err := a.Client.performQuery(http.MethodPost, "/clusters/delete", "2.0", nil, data, nil)
return err
}
// Delete is an alias of Terminate
func (a ClustersAPI) Delete(clusterID string) error {
return a.Terminate(clusterID)
}
// PermanentDelete permanently delete a cluster
func (a ClustersAPI) PermanentDelete(clusterID string) error {
data := struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}{
clusterID,
}
_, err := a.Client.performQuery(http.MethodPost, "/clusters/permanent-delete", "2.0", nil, data, nil)
return err
}
// Get retrieves the information for a cluster given its identifier
func (a ClustersAPI) Get(clusterID string) (model.ClusterInfo, error) {
var clusterInfo model.ClusterInfo
data := struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}{
clusterID,
}
resp, err := a.Client.performQuery(http.MethodGet, "/clusters/get", "2.0", nil, data, nil)
if err != nil {
return clusterInfo, err
}
err = json.Unmarshal(resp, &clusterInfo)
return clusterInfo, err
}
// Pin ensure that an interactive cluster configuration is retained even after a cluster has been terminated for more than 30 days
func (a ClustersAPI) Pin(clusterID string) error {
data := struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}{
clusterID,
}
_, err := a.Client.performQuery(http.MethodPost, "/clusters/pin", "2.0", nil, data, nil)
return err
}
// Unpin allows the cluster to eventually be removed from the list returned by the List API
func (a ClustersAPI) Unpin(clusterID string) error {
data := struct {
ClusterID string `json:"cluster_id,omitempty" url:"cluster_id,omitempty"`
}{
clusterID,
}
_, err := a.Client.performQuery(http.MethodPost, "/clusters/unpin", "2.0", nil, data, nil)
return err
}
// List return information about all pinned clusters, currently active clusters,
// up to 70 of the most recently terminated interactive clusters in the past 30 days,
// and up to 30 of the most recently terminated job clusters in the past 30 days
func (a ClustersAPI) List() ([]model.ClusterInfo, error) {
var clusterList = struct {
Clusters []model.ClusterInfo `json:"clusters,omitempty" url:"clusters,omitempty"`
}{}
resp, err := a.Client.performQuery(http.MethodGet, "/clusters/list", "2.0", nil, nil, nil)
if err != nil {
return clusterList.Clusters, err
}
err = json.Unmarshal(resp, &clusterList)
return clusterList.Clusters, err
}
// ListNodeTypes returns a list of supported Spark node types
func (a ClustersAPI) ListNodeTypes() ([]model.NodeType, error) {
var nodeTypeList = struct {
NodeTypes []model.NodeType `json:"node_types,omitempty" url:"node_types,omitempty"`
}{}
resp, err := a.Client.performQuery(http.MethodGet, "/clusters/list-node-types", "2.0", nil, nil, nil)
if err != nil {
return nodeTypeList.NodeTypes, err
}
err = json.Unmarshal(resp, &nodeTypeList)
return nodeTypeList.NodeTypes, err
}