-
Notifications
You must be signed in to change notification settings - Fork 925
/
Copy pathdeploy_ova.py
executable file
·384 lines (322 loc) · 11.8 KB
/
deploy_ova.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
#!/usr/bin/env python
"""
Written by Nathan Prziborowski
Github: https://github.com/prziborowski
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Deploy an ova file either from a local path or a URL.
Most of the functionality is similar to ovf except that
that an OVA file is a "tarball" so tarfile module is leveraged.
"""
import os
import os.path
import ssl
import sys
import tarfile
import time
from threading import Timer
from six.moves.urllib.request import Request, urlopen
from tools import cli, service_instance
from pyVmomi import vim, vmodl
__author__ = 'prziborowski'
def main():
parser = cli.Parser()
parser.add_optional_arguments(cli.Argument.OVA_PATH, cli.Argument.DATACENTER_NAME,
cli.Argument.RESOURCE_POOL, cli.Argument.DATASTORE_NAME)
args = parser.get_args()
si = service_instance.connect(args)
if args.datacenter_name:
datacenter = get_dc(si, args.datacenter_name)
else:
datacenter = si.content.rootFolder.childEntity[0]
if args.resource_pool:
resource_pool = get_rp(si, datacenter, args.resource_pool)
else:
resource_pool = get_largest_free_rp(si, datacenter)
if args.datastore_name:
datastore = get_ds(datacenter, args.datastore_name)
else:
datastore = get_largest_free_ds(datacenter)
ovf_handle = OvfHandler(args.ova_path)
ovf_manager = si.content.ovfManager
# CreateImportSpecParams can specify many useful things such as
# diskProvisioning (thin/thick/sparse/etc)
# networkMapping (to map to networks)
# propertyMapping (descriptor specific properties)
cisp = vim.OvfManager.CreateImportSpecParams()
cisr = ovf_manager.CreateImportSpec(
ovf_handle.get_descriptor(), resource_pool, datastore, cisp)
# These errors might be handleable by supporting the parameters in
# CreateImportSpecParams
if cisr.error:
print("The following errors will prevent import of this OVA:")
for error in cisr.error:
print("%s" % error)
return 1
ovf_handle.set_spec(cisr)
lease = resource_pool.ImportVApp(cisr.importSpec, datacenter.vmFolder)
while lease.state == vim.HttpNfcLease.State.initializing:
print("Waiting for lease to be ready...")
time.sleep(1)
if lease.state == vim.HttpNfcLease.State.error:
print("Lease error: %s" % lease.error)
return 1
if lease.state == vim.HttpNfcLease.State.done:
return 0
print("Starting deploy...")
return ovf_handle.upload_disks(lease, args.host)
def get_dc(si, name):
"""
Get a datacenter by its name.
"""
for datacenter in si.content.rootFolder.childEntity:
if datacenter.name == name:
return datacenter
raise Exception('Failed to find datacenter named %s' % name)
def get_rp(si, datacenter, name):
"""
Get a resource pool in the datacenter by its names.
"""
view_manager = si.content.viewManager
container_view = view_manager.CreateContainerView(datacenter, [vim.ResourcePool], True)
try:
for resource_pool in container_view.view:
if resource_pool.name == name:
return resource_pool
finally:
container_view.Destroy()
raise Exception("Failed to find resource pool %s in datacenter %s" %
(name, datacenter.name))
def get_largest_free_rp(si, datacenter):
"""
Get the resource pool with the largest unreserved memory for VMs.
"""
view_manager = si.content.viewManager
container_view = view_manager.CreateContainerView(datacenter, [vim.ResourcePool], True)
largest_rp = None
unreserved_for_vm = 0
try:
for resource_pool in container_view.view:
if resource_pool.runtime.memory.unreservedForVm > unreserved_for_vm:
largest_rp = resource_pool
unreserved_for_vm = resource_pool.runtime.memory.unreservedForVm
finally:
container_view.Destroy()
if largest_rp is None:
raise Exception("Failed to find a resource pool in datacenter %s" % datacenter.name)
return largest_rp
def get_ds(datacenter, name):
"""
Pick a datastore by its name.
"""
for datastore in datacenter.datastore:
try:
if datastore.name == name:
return datastore
except Exception: # Ignore datastores that have issues
pass
raise Exception("Failed to find %s on datacenter %s" % (name, datacenter.name))
def get_largest_free_ds(datacenter):
"""
Pick the datastore that is accessible with the largest free space.
"""
largest = None
largest_free = 0
for datastore in datacenter.datastore:
try:
free_space = datastore.summary.freeSpace
if free_space > largest_free and datastore.summary.accessible:
largest_free = free_space
largest = datastore
except Exception: # Ignore datastores that have issues
pass
if largest is None:
raise Exception('Failed to find any free datastores on %s' % datacenter.name)
return largest
def get_tarfile_size(tarfile):
"""
Determine the size of a file inside the tarball.
If the object has a size attribute, use that. Otherwise seek to the end
and report that.
"""
if hasattr(tarfile, 'size'):
return tarfile.size
size = tarfile.seek(0, 2)
tarfile.seek(0, 0)
return size
class OvfHandler(object):
"""
OvfHandler handles most of the OVA operations.
It processes the tarfile, matches disk keys to files and
uploads the disks, while keeping the progress up to date for the lease.
"""
def __init__(self, ovafile):
"""
Performs necessary initialization, opening the OVA file,
processing the files and reading the embedded ovf file.
"""
self.handle = self._create_file_handle(ovafile)
self.tarfile = tarfile.open(fileobj=self.handle)
ovffilename = list(filter(lambda x: x.endswith(".ovf"),
self.tarfile.getnames()))[0]
ovffile = self.tarfile.extractfile(ovffilename)
self.descriptor = ovffile.read().decode()
def _create_file_handle(self, entry):
"""
A simple mechanism to pick whether the file is local or not.
This is not very robust.
"""
if os.path.exists(entry):
return FileHandle(entry)
return WebHandle(entry)
def get_descriptor(self):
return self.descriptor
def set_spec(self, spec):
"""
The import spec is needed for later matching disks keys with
file names.
"""
self.spec = spec
def get_disk(self, file_item):
"""
Does translation for disk key to file name, returning a file handle.
"""
ovffilename = list(filter(lambda x: x == file_item.path,
self.tarfile.getnames()))[0]
return self.tarfile.extractfile(ovffilename)
def get_device_url(self, file_item, lease):
for device_url in lease.info.deviceUrl:
if device_url.importKey == file_item.deviceId:
return device_url
raise Exception("Failed to find deviceUrl for file %s" % file_item.path)
def upload_disks(self, lease, host):
"""
Uploads all the disks, with a progress keep-alive.
"""
self.lease = lease
try:
self.start_timer()
for fileItem in self.spec.fileItem:
self.upload_disk(fileItem, lease, host)
lease.Complete()
print("Finished deploy successfully.")
return 0
except vmodl.MethodFault as ex:
print("Hit an error in upload: %s" % ex)
lease.Abort(ex)
except Exception as ex:
print("Lease: %s" % lease.info)
print("Hit an error in upload: %s" % ex)
lease.Abort(vmodl.fault.SystemError(reason=str(ex)))
return 1
def upload_disk(self, file_item, lease, host):
"""
Upload an individual disk. Passes the file handle of the
disk directly to the urlopen request.
"""
ovffile = self.get_disk(file_item)
if ovffile is None:
return
device_url = self.get_device_url(file_item, lease)
url = device_url.url.replace('*', host)
headers = {'Content-length': get_tarfile_size(ovffile)}
if hasattr(ssl, '_create_unverified_context'):
ssl_context = ssl._create_unverified_context()
else:
ssl_context = None
req = Request(url, ovffile, headers)
urlopen(req, context=ssl_context)
def start_timer(self):
"""
A simple way to keep updating progress while the disks are transferred.
"""
Timer(5, self.timer).start()
def timer(self):
"""
Update the progress and reschedule the timer if not complete.
"""
try:
prog = self.handle.progress()
self.lease.Progress(prog)
if self.lease.state not in [vim.HttpNfcLease.State.done,
vim.HttpNfcLease.State.error]:
self.start_timer()
sys.stderr.write("Progress: %d%%\r" % prog)
except Exception: # Any exception means we should stop updating progress.
pass
class FileHandle(object):
def __init__(self, filename):
self.filename = filename
self.fh = open(filename, 'rb')
self.st_size = os.stat(filename).st_size
self.offset = 0
def __del__(self):
self.fh.close()
def tell(self):
return self.fh.tell()
def seek(self, offset, whence=0):
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.st_size - offset
return self.fh.seek(offset, whence)
def seekable(self):
return True
def read(self, amount):
self.offset += amount
result = self.fh.read(amount)
return result
# A slightly more accurate percentage
def progress(self):
return int(100.0 * self.offset / self.st_size)
class WebHandle(object):
def __init__(self, url):
self.url = url
r = urlopen(url)
if r.code != 200:
raise FileNotFoundError(url)
self.headers = self._headers_to_dict(r)
if 'accept-ranges' not in self.headers:
raise Exception("Site does not accept ranges")
self.st_size = int(self.headers['content-length'])
self.offset = 0
def _headers_to_dict(self, r):
result = {}
if hasattr(r, 'getheaders'):
for n, v in r.getheaders():
result[n.lower()] = v.strip()
else:
for line in r.info().headers:
if line.find(':') != -1:
n, v = line.split(': ', 1)
result[n.lower()] = v.strip()
return result
def tell(self):
return self.offset
def seek(self, offset, whence=0):
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.st_size - offset
return self.offset
def seekable(self):
return True
def read(self, amount):
start = self.offset
end = self.offset + amount - 1
req = Request(self.url,
headers={'Range': 'bytes=%d-%d' % (start, end)})
r = urlopen(req)
self.offset += amount
result = r.read(amount)
r.close()
return result
# A slightly more accurate percentage
def progress(self):
return int(100.0 * self.offset / self.st_size)
if __name__ == "__main__":
sys.exit(main())