Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

vmware_inventory script improvements #17142

Merged
merged 4 commits into from
Aug 19, 2016
Merged
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
213 changes: 94 additions & 119 deletions contrib/inventory/vmware_inventory.py
Original file line number Diff line number Diff line change
Expand Up @@ -108,6 +108,7 @@ def __init__(self, load=True):
if self.args.refresh_cache or not cache_valid:
self.do_api_calls_update_cache()
else:
self.debugl('# loading inventory from cache')
self.inventory = self.get_inventory_from_cache()

def debugl(self, text):
Expand Down Expand Up @@ -284,7 +285,6 @@ def get_instances(self):
kwargs['sslContext'] = context

instances = self._get_instances(kwargs)
self.debugl("### INSTANCES RETRIEVED")
return instances


Expand All @@ -294,57 +294,36 @@ def _get_instances(self, inkwargs):

instances = []
si = SmartConnect(**inkwargs)


self.debugl('# retrieving instances')
if not si:
print("Could not connect to the specified host using specified "
"username and password")
return -1
atexit.register(Disconnect, si)
content = si.RetrieveContent()
for child in content.rootFolder.childEntity:
instances += self._get_instances_from_children(child)
if self.args.max_instances:
if len(instances) >= (self.args.max_instances+1):
instances = instances[0:(self.args.max_instances+1)]

# Create a search container for virtualmachines
container = content.rootFolder
viewType = [vim.VirtualMachine]
recursive = True
containerView = content.viewManager.CreateContainerView(container, viewType, recursive)
children = containerView.view
for child in children:
# If requested, limit the total number of instances
if self.args.max_instances:
if len(instances) >= (self.args.max_instances):
break
instances.append(child)
self.debugl("# total instances retrieved %s" % len(instances))

instance_tuples = []
for instance in sorted(instances):
ifacts = self.facts_from_vobj(instance)
instance_tuples.append((instance, ifacts))
return instance_tuples


def _get_instances_from_children(self, child):
instances = []

if hasattr(child, 'childEntity'):
self.debugl("CHILDREN: %s" % child.childEntity)
instances += self._get_instances_from_children(child.childEntity)
elif hasattr(child, 'vmFolder'):
self.debugl("FOLDER: %s" % child)
instances += self._get_instances_from_children(child.vmFolder)
elif hasattr(child, 'index'):
self.debugl("LIST: %s" % child)
for x in sorted(child):
self.debugl("LIST_ITEM: %s" % x)
instances += self._get_instances_from_children(x)
elif hasattr(child, 'guest'):
self.debugl("GUEST: %s" % child)
instances.append(child)
elif hasattr(child, 'vm'):
# resource pools
self.debugl("RESOURCEPOOL: %s" % child.vm)
if child.vm:
instances += self._get_instances_from_children(child.vm)
else:
self.debugl("ELSE ...")
try:
self.debugl(child.__dict__)
except Exception as e:
pass
self.debugl(child)
return instances


def instances_to_inventory(self, instances):

''' Convert a list of vm objects into a json compliant inventory '''
Expand All @@ -366,21 +345,28 @@ def instances_to_inventory(self, instances):
inventory['_meta']['hostvars'][thisid] = idata.copy()
inventory['_meta']['hostvars'][thisid]['ansible_uuid'] = thisid

# Make a map of the uuid to the name the user wants
# Make a map of the uuid to the alias the user wants
name_mapping = self.create_template_mapping(inventory,
self.config.get('vmware', 'alias_pattern'))

# Make a map of the uuid to the ssh hostname the user wants
host_mapping = self.create_template_mapping(inventory,
self.config.get('vmware', 'host_pattern'))


# Reset the inventory keys
for k,v in name_mapping.iteritems():

if not host_mapping or not k in host_mapping:
continue

# set ansible_host (2.x)
inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
# 1.9.x backwards compliance
inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k]
try:
inventory['_meta']['hostvars'][k]['ansible_host'] = host_mapping[k]
# 1.9.x backwards compliance
inventory['_meta']['hostvars'][k]['ansible_ssh_host'] = host_mapping[k]
except Exception as e:
continue

if k == v:
continue
Expand All @@ -393,24 +379,24 @@ def instances_to_inventory(self, instances):
inventory['all']['hosts'].remove(k)
inventory['_meta']['hostvars'].pop(k, None)

self.debugl('PREFILTER_HOSTS:')
self.debugl('# pre-filtered hosts:')
for i in inventory['all']['hosts']:
self.debugl(i)
self.debugl('# * %s' % i)
# Apply host filters
for hf in self.host_filters:
if not hf:
continue
self.debugl('FILTER: %s' % hf)
self.debugl('# filter: %s' % hf)
filter_map = self.create_template_mapping(inventory, hf, dtype='boolean')
for k,v in filter_map.iteritems():
if not v:
# delete this host
inventory['all']['hosts'].remove(k)
inventory['_meta']['hostvars'].pop(k, None)

self.debugl('POSTFILTER_HOSTS:')
self.debugl('# post-filter hosts:')
for i in inventory['all']['hosts']:
self.debugl(i)
self.debugl('# * %s' % i)

# Create groups
for gbp in self.groupby_patterns:
Expand Down Expand Up @@ -438,7 +424,6 @@ def create_template_mapping(self, inventory, pattern, dtype='string'):
newkey = newkey.strip()
except Exception as e:
self.debugl(e)
#import epdb; epdb.st()
if not newkey:
continue
elif dtype == 'integer':
Expand All @@ -461,100 +446,90 @@ def facts_from_vobj(self, vobj, level=0):
# pyvmomi objects are not yet serializable, but may be one day ...
# https://github.com/vmware/pyvmomi/issues/21

rdata = {}

# Do not serialize self
if hasattr(vobj, '__name__'):
if vobj.__name__ == 'VMWareInventory':
return rdata
# WARNING:
# Accessing an object attribute will trigger a SOAP call to the remote.
# Increasing the attributes collected or the depth of recursion greatly
# increases runtime duration and potentially memory+network utilization.

# Exit early if maxlevel is reached
if level > self.maxlevel:
return rdata
if level == 0:
try:
self.debugl("# get facts: %s" % vobj.name)
except Exception as e:
self.debugl(e)

# Objects usually have a dict property
if hasattr(vobj, '__dict__') and not level == 0:
rdata = {}

keys = sorted(vobj.__dict__.keys())
for k in keys:
v = vobj.__dict__[k]
# Skip private methods
if k.startswith('_'):
continue
methods = dir(vobj)
methods = [str(x) for x in methods if not x.startswith('_')]
methods = [x for x in methods if not x in self.bad_types]
methods = sorted(methods)

if k.lower() in self.skip_keys:
continue
for method in methods:
# Attempt to get the method, skip on fail
try:
methodToCall = getattr(vobj, method)
except Exception as e:
continue
# Skip callable methods
if callable(methodToCall):
continue
if self.lowerkeys:
method = method.lower()
rdata[method] = self._process_object_types(methodToCall)

if self.lowerkeys:
k = k.lower()
return rdata

rdata[k] = self._process_object_types(v, level=level)

else:
def _process_object_types(self, vobj, level=0):
''' Serialize an object '''
rdata = {}

if vobj is None:
rdata = None
elif issubclass(type(vobj), str) or isinstance(vobj, str):
rdata = vobj
elif issubclass(type(vobj), bool) or isinstance(vobj, bool):
rdata = vobj
elif issubclass(type(vobj), int) or isinstance(vobj, int):
rdata = vobj
elif issubclass(type(vobj), float) or isinstance(vobj, float):
rdata = vobj
elif issubclass(type(vobj), long) or isinstance(vobj, long):
rdata = vobj
elif issubclass(type(vobj), list) or issubclass(type(vobj), tuple):
rdata = []
try:
vobj = sorted(vobj)
except Exception as e:
pass
for vi in vobj:
if (level+1 <= self.maxlevel):
#vid = self.facts_from_vobj(vi, level=(level+1))
vid = self._process_object_types(vi, level=(level+1))
if vid:
rdata.append(vid)
elif issubclass(type(vobj), dict):
pass
elif issubclass(type(vobj), object):
methods = dir(vobj)
methods = [str(x) for x in methods if not x.startswith('_')]
methods = [x for x in methods if not x in self.bad_types]
methods = sorted(methods)

for method in methods:

if method in rdata:
continue

# Attempt to get the method, skip on fail
try:
methodToCall = getattr(vobj, method)
except Exception as e:
continue

# Skip callable methods
if callable(methodToCall):
continue

if self.lowerkeys:
method = method.lower()

rdata[method] = self._process_object_types(methodToCall, level=level)

return rdata


def _process_object_types(self, vobj, level=0):

rdata = {}

self.debugl("PROCESSING: %s" % vobj)

if type(vobj) in self.safe_types:
try:
rdata = vobj
except Exception as e:
self.debugl(e)

elif hasattr(vobj, 'append'):
rdata = []
for vi in sorted(vobj):
if type(vi) in self.safe_types:
rdata.append(vi)
else:
if (level+1 <= self.maxlevel):
vid = self.facts_from_vobj(vi, level=(level+1))
if vid:
rdata.append(vid)

elif hasattr(vobj, '__dict__'):
if (level+1 <= self.maxlevel):
md = None
md = self.facts_from_vobj(vobj, level=(level+1))
if md:
rdata = md
elif not vobj or type(vobj) in self.safe_types:
rdata = vobj
elif type(vobj) == datetime.datetime:
rdata = str(vobj)
if (level+1 <= self.maxlevel):
rdata[method] = self._process_object_types(methodToCall, level=(level+1))
else:
self.debugl("unknown datatype: %s" % type(vobj))
pass

if not rdata:
rdata = None
Expand Down