forked from cloudfoundry/bosh-cli
-
Notifications
You must be signed in to change notification settings - Fork 0
/
manager.go
152 lines (133 loc) · 4.12 KB
/
manager.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
package instance
import (
"fmt"
"time"
biblobstore "github.com/cloudfoundry/bosh-cli/blobstore"
bicloud "github.com/cloudfoundry/bosh-cli/cloud"
bidisk "github.com/cloudfoundry/bosh-cli/deployment/disk"
bideplmanifest "github.com/cloudfoundry/bosh-cli/deployment/manifest"
bisshtunnel "github.com/cloudfoundry/bosh-cli/deployment/sshtunnel"
bivm "github.com/cloudfoundry/bosh-cli/deployment/vm"
biinstallmanifest "github.com/cloudfoundry/bosh-cli/installation/manifest"
bistemcell "github.com/cloudfoundry/bosh-cli/stemcell"
biui "github.com/cloudfoundry/bosh-cli/ui"
bosherr "github.com/cloudfoundry/bosh-utils/errors"
boshlog "github.com/cloudfoundry/bosh-utils/logger"
)
type Manager interface {
FindCurrent() ([]Instance, error)
Create(
jobName string,
id int,
deploymentManifest bideplmanifest.Manifest,
cloudStemcell bistemcell.CloudStemcell,
registryConfig biinstallmanifest.Registry,
eventLoggerStage biui.Stage,
) (Instance, []bidisk.Disk, error)
DeleteAll(
pingTimeout time.Duration,
pingDelay time.Duration,
eventLoggerStage biui.Stage,
) error
}
type manager struct {
cloud bicloud.Cloud
vmManager bivm.Manager
blobstore biblobstore.Blobstore
sshTunnelFactory bisshtunnel.Factory
instanceFactory Factory
logger boshlog.Logger
logTag string
}
func NewManager(
cloud bicloud.Cloud,
vmManager bivm.Manager,
blobstore biblobstore.Blobstore,
sshTunnelFactory bisshtunnel.Factory,
instanceFactory Factory,
logger boshlog.Logger,
) Manager {
return &manager{
cloud: cloud,
vmManager: vmManager,
blobstore: blobstore,
sshTunnelFactory: sshTunnelFactory,
instanceFactory: instanceFactory,
logger: logger,
logTag: "vmDeployer",
}
}
func (m *manager) FindCurrent() ([]Instance, error) {
instances := []Instance{}
// Only one current instance will exist (for now)
vm, found, err := m.vmManager.FindCurrent()
if err != nil {
return instances, bosherr.WrapError(err, "Finding currently deployed instances")
}
if found {
// TODO: store the name of the job for each instance in the repo, so that we can print it when deleting
jobName := "unknown"
instanceID := 0
instance := m.instanceFactory.NewInstance(
jobName,
instanceID,
vm,
m.vmManager,
m.sshTunnelFactory,
m.blobstore,
m.logger,
)
instances = append(instances, instance)
}
return instances, nil
}
func (m *manager) Create(
jobName string,
id int,
deploymentManifest bideplmanifest.Manifest,
cloudStemcell bistemcell.CloudStemcell,
registryConfig biinstallmanifest.Registry,
eventLoggerStage biui.Stage,
) (Instance, []bidisk.Disk, error) {
var vm bivm.VM
stepName := fmt.Sprintf("Creating VM for instance '%s/%d' from stemcell '%s'", jobName, id, cloudStemcell.CID())
err := eventLoggerStage.Perform(stepName, func() error {
var err error
vm, err = m.vmManager.Create(cloudStemcell, deploymentManifest)
if err != nil {
return bosherr.WrapError(err, "Creating VM")
}
if err = cloudStemcell.PromoteAsCurrent(); err != nil {
return bosherr.WrapErrorf(err, "Promoting stemcell as current '%s'", cloudStemcell.CID())
}
return nil
})
if err != nil {
return nil, []bidisk.Disk{}, err
}
instance := m.instanceFactory.NewInstance(jobName, id, vm, m.vmManager, m.sshTunnelFactory, m.blobstore, m.logger)
if err := instance.WaitUntilReady(registryConfig, eventLoggerStage); err != nil {
return instance, []bidisk.Disk{}, bosherr.WrapError(err, "Waiting until instance is ready")
}
disks, err := instance.UpdateDisks(deploymentManifest, eventLoggerStage)
if err != nil {
return instance, disks, bosherr.WrapError(err, "Updating instance disks")
}
return instance, disks, err
}
func (m *manager) DeleteAll(
pingTimeout time.Duration,
pingDelay time.Duration,
eventLoggerStage biui.Stage,
) error {
instances, err := m.FindCurrent()
if err != nil {
return err
}
for _, instance := range instances {
if err = instance.Delete(pingTimeout, pingDelay, eventLoggerStage); err != nil {
return bosherr.WrapErrorf(err, "Deleting existing instance '%s/%d'", instance.JobName(), instance.ID())
}
}
return nil
}