Skip to content
Permalink
master
Switch branches/tags

Name already in use

A tag already exists with the provided branch name. Many Git commands accept both tag and branch names, so creating this branch may cause unexpected behavior. Are you sure you want to create this branch?
Go to file
 
 
Cannot retrieve contributors at this time
executable file 384 lines (322 sloc) 11.8 KB
#!/usr/bin/env python
"""
Written by Nathan Prziborowski
Github: https://github.com/prziborowski
This code is released under the terms of the Apache 2
http://www.apache.org/licenses/LICENSE-2.0.html
Deploy an ova file either from a local path or a URL.
Most of the functionality is similar to ovf except that
that an OVA file is a "tarball" so tarfile module is leveraged.
"""
import os
import os.path
import ssl
import sys
import tarfile
import time
from threading import Timer
from six.moves.urllib.request import Request, urlopen
from tools import cli, service_instance
from pyVmomi import vim, vmodl
__author__ = 'prziborowski'
def main():
parser = cli.Parser()
parser.add_optional_arguments(cli.Argument.OVA_PATH, cli.Argument.DATACENTER_NAME,
cli.Argument.RESOURCE_POOL, cli.Argument.DATASTORE_NAME)
args = parser.get_args()
si = service_instance.connect(args)
if args.datacenter_name:
datacenter = get_dc(si, args.datacenter_name)
else:
datacenter = si.content.rootFolder.childEntity[0]
if args.resource_pool:
resource_pool = get_rp(si, datacenter, args.resource_pool)
else:
resource_pool = get_largest_free_rp(si, datacenter)
if args.datastore_name:
datastore = get_ds(datacenter, args.datastore_name)
else:
datastore = get_largest_free_ds(datacenter)
ovf_handle = OvfHandler(args.ova_path)
ovf_manager = si.content.ovfManager
# CreateImportSpecParams can specify many useful things such as
# diskProvisioning (thin/thick/sparse/etc)
# networkMapping (to map to networks)
# propertyMapping (descriptor specific properties)
cisp = vim.OvfManager.CreateImportSpecParams()
cisr = ovf_manager.CreateImportSpec(
ovf_handle.get_descriptor(), resource_pool, datastore, cisp)
# These errors might be handleable by supporting the parameters in
# CreateImportSpecParams
if cisr.error:
print("The following errors will prevent import of this OVA:")
for error in cisr.error:
print("%s" % error)
return 1
ovf_handle.set_spec(cisr)
lease = resource_pool.ImportVApp(cisr.importSpec, datacenter.vmFolder)
while lease.state == vim.HttpNfcLease.State.initializing:
print("Waiting for lease to be ready...")
time.sleep(1)
if lease.state == vim.HttpNfcLease.State.error:
print("Lease error: %s" % lease.error)
return 1
if lease.state == vim.HttpNfcLease.State.done:
return 0
print("Starting deploy...")
return ovf_handle.upload_disks(lease, args.host)
def get_dc(si, name):
"""
Get a datacenter by its name.
"""
for datacenter in si.content.rootFolder.childEntity:
if datacenter.name == name:
return datacenter
raise Exception('Failed to find datacenter named %s' % name)
def get_rp(si, datacenter, name):
"""
Get a resource pool in the datacenter by its names.
"""
view_manager = si.content.viewManager
container_view = view_manager.CreateContainerView(datacenter, [vim.ResourcePool], True)
try:
for resource_pool in container_view.view:
if resource_pool.name == name:
return resource_pool
finally:
container_view.Destroy()
raise Exception("Failed to find resource pool %s in datacenter %s" %
(name, datacenter.name))
def get_largest_free_rp(si, datacenter):
"""
Get the resource pool with the largest unreserved memory for VMs.
"""
view_manager = si.content.viewManager
container_view = view_manager.CreateContainerView(datacenter, [vim.ResourcePool], True)
largest_rp = None
unreserved_for_vm = 0
try:
for resource_pool in container_view.view:
if resource_pool.runtime.memory.unreservedForVm > unreserved_for_vm:
largest_rp = resource_pool
unreserved_for_vm = resource_pool.runtime.memory.unreservedForVm
finally:
container_view.Destroy()
if largest_rp is None:
raise Exception("Failed to find a resource pool in datacenter %s" % datacenter.name)
return largest_rp
def get_ds(datacenter, name):
"""
Pick a datastore by its name.
"""
for datastore in datacenter.datastore:
try:
if datastore.name == name:
return datastore
except Exception: # Ignore datastores that have issues
pass
raise Exception("Failed to find %s on datacenter %s" % (name, datacenter.name))
def get_largest_free_ds(datacenter):
"""
Pick the datastore that is accessible with the largest free space.
"""
largest = None
largest_free = 0
for datastore in datacenter.datastore:
try:
free_space = datastore.summary.freeSpace
if free_space > largest_free and datastore.summary.accessible:
largest_free = free_space
largest = datastore
except Exception: # Ignore datastores that have issues
pass
if largest is None:
raise Exception('Failed to find any free datastores on %s' % datacenter.name)
return largest
def get_tarfile_size(tarfile):
"""
Determine the size of a file inside the tarball.
If the object has a size attribute, use that. Otherwise seek to the end
and report that.
"""
if hasattr(tarfile, 'size'):
return tarfile.size
size = tarfile.seek(0, 2)
tarfile.seek(0, 0)
return size
class OvfHandler(object):
"""
OvfHandler handles most of the OVA operations.
It processes the tarfile, matches disk keys to files and
uploads the disks, while keeping the progress up to date for the lease.
"""
def __init__(self, ovafile):
"""
Performs necessary initialization, opening the OVA file,
processing the files and reading the embedded ovf file.
"""
self.handle = self._create_file_handle(ovafile)
self.tarfile = tarfile.open(fileobj=self.handle)
ovffilename = list(filter(lambda x: x.endswith(".ovf"),
self.tarfile.getnames()))[0]
ovffile = self.tarfile.extractfile(ovffilename)
self.descriptor = ovffile.read().decode()
def _create_file_handle(self, entry):
"""
A simple mechanism to pick whether the file is local or not.
This is not very robust.
"""
if os.path.exists(entry):
return FileHandle(entry)
return WebHandle(entry)
def get_descriptor(self):
return self.descriptor
def set_spec(self, spec):
"""
The import spec is needed for later matching disks keys with
file names.
"""
self.spec = spec
def get_disk(self, file_item):
"""
Does translation for disk key to file name, returning a file handle.
"""
ovffilename = list(filter(lambda x: x == file_item.path,
self.tarfile.getnames()))[0]
return self.tarfile.extractfile(ovffilename)
def get_device_url(self, file_item, lease):
for device_url in lease.info.deviceUrl:
if device_url.importKey == file_item.deviceId:
return device_url
raise Exception("Failed to find deviceUrl for file %s" % file_item.path)
def upload_disks(self, lease, host):
"""
Uploads all the disks, with a progress keep-alive.
"""
self.lease = lease
try:
self.start_timer()
for fileItem in self.spec.fileItem:
self.upload_disk(fileItem, lease, host)
lease.Complete()
print("Finished deploy successfully.")
return 0
except vmodl.MethodFault as ex:
print("Hit an error in upload: %s" % ex)
lease.Abort(ex)
except Exception as ex:
print("Lease: %s" % lease.info)
print("Hit an error in upload: %s" % ex)
lease.Abort(vmodl.fault.SystemError(reason=str(ex)))
return 1
def upload_disk(self, file_item, lease, host):
"""
Upload an individual disk. Passes the file handle of the
disk directly to the urlopen request.
"""
ovffile = self.get_disk(file_item)
if ovffile is None:
return
device_url = self.get_device_url(file_item, lease)
url = device_url.url.replace('*', host)
headers = {'Content-length': get_tarfile_size(ovffile)}
if hasattr(ssl, '_create_unverified_context'):
ssl_context = ssl._create_unverified_context()
else:
ssl_context = None
req = Request(url, ovffile, headers)
urlopen(req, context=ssl_context)
def start_timer(self):
"""
A simple way to keep updating progress while the disks are transferred.
"""
Timer(5, self.timer).start()
def timer(self):
"""
Update the progress and reschedule the timer if not complete.
"""
try:
prog = self.handle.progress()
self.lease.Progress(prog)
if self.lease.state not in [vim.HttpNfcLease.State.done,
vim.HttpNfcLease.State.error]:
self.start_timer()
sys.stderr.write("Progress: %d%%\r" % prog)
except Exception: # Any exception means we should stop updating progress.
pass
class FileHandle(object):
def __init__(self, filename):
self.filename = filename
self.fh = open(filename, 'rb')
self.st_size = os.stat(filename).st_size
self.offset = 0
def __del__(self):
self.fh.close()
def tell(self):
return self.fh.tell()
def seek(self, offset, whence=0):
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.st_size - offset
return self.fh.seek(offset, whence)
def seekable(self):
return True
def read(self, amount):
self.offset += amount
result = self.fh.read(amount)
return result
# A slightly more accurate percentage
def progress(self):
return int(100.0 * self.offset / self.st_size)
class WebHandle(object):
def __init__(self, url):
self.url = url
r = urlopen(url)
if r.code != 200:
raise FileNotFoundError(url)
self.headers = self._headers_to_dict(r)
if 'accept-ranges' not in self.headers:
raise Exception("Site does not accept ranges")
self.st_size = int(self.headers['content-length'])
self.offset = 0
def _headers_to_dict(self, r):
result = {}
if hasattr(r, 'getheaders'):
for n, v in r.getheaders():
result[n.lower()] = v.strip()
else:
for line in r.info().headers:
if line.find(':') != -1:
n, v = line.split(': ', 1)
result[n.lower()] = v.strip()
return result
def tell(self):
return self.offset
def seek(self, offset, whence=0):
if whence == 0:
self.offset = offset
elif whence == 1:
self.offset += offset
elif whence == 2:
self.offset = self.st_size - offset
return self.offset
def seekable(self):
return True
def read(self, amount):
start = self.offset
end = self.offset + amount - 1
req = Request(self.url,
headers={'Range': 'bytes=%d-%d' % (start, end)})
r = urlopen(req)
self.offset += amount
result = r.read(amount)
r.close()
return result
# A slightly more accurate percentage
def progress(self):
return int(100.0 * self.offset / self.st_size)
if __name__ == "__main__":
sys.exit(main())