diff --git a/INSTALL b/INSTALL
index 6a14d1ec..ac4b13c5 100644
--- a/INSTALL
+++ b/INSTALL
@@ -1 +1,3 @@
-Currently building this package outside of the XenServer build environment is somewhat broken. The goal is to replace it with a simple "make"/"make install" process.
+Currently building this package outside of the XenServer build environment is
+somewhat broken. The goal is to replace it with a simple "make"/"make install"
+process.
diff --git a/Makefile b/Makefile
index d00d4a61..14225785 100755
--- a/Makefile
+++ b/Makefile
@@ -56,6 +56,7 @@ SM_LIBS += lcache
SM_LIBS += resetvdis
SM_LIBS += B_util
SM_LIBS += wwid_conf
+SM_LIBS += trim_util
UDEV_RULES = 40-multipath
MPATH_DAEMON = sm-multipath
@@ -158,6 +159,8 @@ install: precheck
install -m 755 drivers/tapdisk-pause $(SM_STAGING)$(PLUGIN_SCRIPT_DEST)
install -m 755 drivers/vss_control $(SM_STAGING)$(PLUGIN_SCRIPT_DEST)
install -m 755 drivers/intellicache-clean $(SM_STAGING)$(PLUGIN_SCRIPT_DEST)
+ install -m 755 drivers/enable-borehamwood $(SM_STAGING)$(SM_DEST)
+ install -m 755 drivers/trim $(SM_STAGING)$(PLUGIN_SCRIPT_DEST)
ln -sf $(PLUGIN_SCRIPT_DEST)vss_control $(SM_STAGING)$(SM_DEST)
install -m 755 drivers/iscsilib.py $(SM_STAGING)$(SM_DEST)
mkdir -p $(SM_STAGING)$(LIBEXEC)
diff --git a/README.md b/README.md
index e9dfede5..dcdc602f 100644
--- a/README.md
+++ b/README.md
@@ -3,6 +3,6 @@ Storage Manager for XenServer
[![Build Status](https://travis-ci.org/xapi-project/sm.svg?branch=master)](https://travis-ci.org/xapi-project/sm)
[![Coverage Status](https://coveralls.io/repos/xapi-project/sm/badge.png?branch=master)](https://coveralls.io/r/xapi-project/sm?branch=master)
-This repository contains the code which forms the Storage Management layer for XenSever. It consists of a series of "plug-ins" to xapi (the Xen management layer) which are written primarily in python.
-
-
+This repository contains the code which forms the Storage Management layer for
+XenSever. It consists of a series of "plug-ins" to xapi (the Xen management
+layer) which are written primarily in python.
diff --git a/drivers/EXTSR.py b/drivers/EXTSR.py
index 1df87829..466a7b34 100755
--- a/drivers/EXTSR.py
+++ b/drivers/EXTSR.py
@@ -70,6 +70,8 @@ def load(self, sr_uuid):
self.remotepath = os.path.join("/dev",self.vgname,sr_uuid)
self.attached = self._checkmount()
+ self._check_o_direct()
+
def delete(self, sr_uuid):
super(EXTSR, self).delete(sr_uuid)
diff --git a/drivers/FileSR.py b/drivers/FileSR.py
index fd8a6db3..a3b7458f 100755
--- a/drivers/FileSR.py
+++ b/drivers/FileSR.py
@@ -75,6 +75,14 @@ def handles(srtype):
return srtype == 'file'
handles = staticmethod(handles)
+ def _check_o_direct(self):
+ if self.sr_ref and self.session is not None:
+ other_config = self.session.xenapi.SR.get_other_config(self.sr_ref)
+ o_direct = other_config.get("o_direct")
+ self.o_direct = o_direct is not None and o_direct == "true"
+ else:
+ self.o_direct = True
+
def load(self, sr_uuid):
self.ops_exclusive = OPS_EXCLUSIVE
self.lock = Lock(vhdutil.LOCK_TYPE_SR, self.uuid)
@@ -85,6 +93,8 @@ def load(self, sr_uuid):
self.path = os.path.join(SR.MOUNT_BASE, sr_uuid)
self.attached = False
+ self._check_o_direct()
+
def create(self, sr_uuid, size):
""" Create the SR. The path must not already exist, or if it does,
it must be empty. (This accounts for the case where the user has
@@ -381,6 +391,8 @@ class FileVDI(VDI.VDI):
def load(self, vdi_uuid):
self.lock = self.sr.lock
+ self.sr.srcmd.params['o_direct'] = self.sr.o_direct
+
if self.sr.srcmd.cmd == "vdi_create":
self.vdi_type = vhdutil.VDI_TYPE_VHD
if self.sr.srcmd.params.has_key("vdi_sm_config") and \
diff --git a/drivers/HBASR.py b/drivers/HBASR.py
index 017ecd30..cb6bbcad 100755
--- a/drivers/HBASR.py
+++ b/drivers/HBASR.py
@@ -41,8 +41,6 @@
'configuration': CONFIGURATION
}
-HBA_CLI = "/opt/Citrix/StorageLink/bin/csl_hbascan"
-
class HBASR(SR.SR):
"""HBA storage repository"""
def handles(type):
@@ -82,7 +80,7 @@ def _init_hba_hostname(self):
nodewwnval = str(nodewwn[0].firstChild.nodeValue)
break
except:
- raise xs_errors.XenError('CSLGXMLParse', opterr='HBA Host WWN scanning failed')
+ raise xs_errors.XenError('XMLParse', opterr='HBA Host WWN scanning failed')
return nodewwnval
def _init_hbas(self):
@@ -104,7 +102,7 @@ def _init_hbas(self):
devpath = str(devnames[0].firstChild.nodeValue).split('/')[-1]
adt[devpath] = portval.split()[0]
except:
- raise xs_errors.XenError('CSLGXMLParse', \
+ raise xs_errors.XenError('XMLParse', \
opterr='HBA scanning failed')
return adt
@@ -166,7 +164,7 @@ def _probe_hba(self):
return dom.toxml()
except:
- raise xs_errors.XenError('CSLGXMLParse', \
+ raise xs_errors.XenError('XMLParse', \
opterr='HBA probe failed')
def attach(self, sr_uuid):
diff --git a/drivers/ISCSISR.py b/drivers/ISCSISR.py
index 9daf7b47..9198b544 100755
--- a/drivers/ISCSISR.py
+++ b/drivers/ISCSISR.py
@@ -626,13 +626,17 @@ def _getLUNbySMconfig(self, sm_config):
def print_LUNs(self):
self.LUNs = {}
if os.path.exists(self.path):
+ dom0_disks = util.dom0_disks()
for file in util.listdir(self.path):
if file.find("LUN") != -1 and file.find("_") == -1:
vdi_path = os.path.join(self.path,file)
- LUNid = file.replace("LUN","")
- obj = self.vdi(self.uuid)
- obj._query(vdi_path, LUNid)
- self.LUNs[obj.uuid] = obj
+ if os.path.realpath(vdi_path) in dom0_disks:
+ util.SMlog("Hide dom0 boot disk LUN")
+ else:
+ LUNid = file.replace("LUN","")
+ obj = self.vdi(self.uuid)
+ obj._query(vdi_path, LUNid)
+ self.LUNs[obj.uuid] = obj
def print_entries(self, map):
dom = xml.dom.minidom.Document()
diff --git a/drivers/ISOSR.py b/drivers/ISOSR.py
index 6081e267..450801c3 100755
--- a/drivers/ISOSR.py
+++ b/drivers/ISOSR.py
@@ -31,8 +31,9 @@
[ [ 'location', 'path to mount (required) (e.g. server:/path)' ],
[ 'options',
'extra options to pass to mount (deprecated) (e.g. \'-o ro\')' ],
- [ 'type','cifs or nfs'] ]
-
+ [ 'type','cifs or nfs'],
+ nfs.NFS_VERSION]
+
DRIVER_INFO = {
'name': 'ISO',
'description': 'Handles CD images stored as files in iso format',
@@ -209,6 +210,9 @@ def load(self, sr_uuid):
else:
self.path = self.mountpoint
+ # Handle optional dconf attributes
+ self.nfsversion = nfs.validate_nfsversion(self.dconf.get('nfsversion'))
+
# Some info we need:
self.sr_vditype = 'phy'
self.credentials = None
@@ -261,7 +265,8 @@ def attach(self, sr_uuid):
# to the process waiting.
if self.dconf.has_key('type') and self.dconf['type']!='cifs':
serv_path = location.split(':')
- nfs.soft_mount(self.mountpoint, serv_path[0], serv_path[1], 'tcp')
+ nfs.soft_mount(self.mountpoint, serv_path[0], serv_path[1],
+ 'tcp', nfsversion=self.nfsversion)
else:
util.pread(mountcmd, True)
except util.CommandException, inst:
diff --git a/drivers/LVHDSR.py b/drivers/LVHDSR.py
index b80c44d7..83de1c4c 100755
--- a/drivers/LVHDSR.py
+++ b/drivers/LVHDSR.py
@@ -52,7 +52,7 @@
DEV_MAPPER_ROOT = os.path.join('/dev/mapper', lvhdutil.VG_PREFIX)
geneology = {}
-CAPABILITIES = ["SR_PROBE","SR_UPDATE",
+CAPABILITIES = ["SR_PROBE","SR_UPDATE", "SR_TRIM",
"VDI_CREATE","VDI_DELETE","VDI_ATTACH", "VDI_DETACH",
"VDI_CLONE", "VDI_SNAPSHOT", "VDI_RESIZE", "ATOMIC_PAUSE",
"VDI_RESET_ON_BOOT/2", "VDI_UPDATE"]
@@ -542,11 +542,48 @@ def delete(self, uuid):
raise xs_errors.XenError('LVMMaster')
cleanup.gc_force(self.session, self.uuid)
+ success = True
+ for fileName in \
+ filter(lambda x: util.extractSRFromDevMapper(x) == self.uuid, \
+ glob.glob(DEV_MAPPER_ROOT + '*')):
+ if util.doesFileHaveOpenHandles(fileName):
+ util.SMlog("LVHDSR.delete: The dev mapper entry %s has open " \
+ "handles" % fileName)
+ success = False
+ continue
+
+ # Now attempt to remove the dev mapper entry
+ if not lvutil.removeDevMapperEntry(fileName):
+ success = False
+ continue
+
+ try:
+ lvname = os.path.basename(fileName.replace('-','/').\
+ replace('//', '-'))
+ os.unlink(os.path.join(self.path, lvname))
+ except Exception, e:
+ util.SMlog("LVHDSR.delete: failed to remove the symlink for " \
+ "file %s. Error: %s" % (fileName, str(e)))
+ success = False
+
+ if success:
+ try:
+ if util.pathexists(self.path):
+ os.rmdir(self.path)
+ except Exception, e:
+ util.SMlog("LVHDSR.delete: failed to remove the symlink " \
+ "directory %s. Error: %s" % (self.path, str(e)))
+ success = False
+
self._removeMetadataVolume()
self.lvmCache.refresh()
if len(lvhdutil.getLVInfo(self.lvmCache)) > 0:
raise xs_errors.XenError('SRNotEmpty')
+ if not success:
+ raise Exception("LVHDSR delete failed, please refer to the log " \
+ "for details.")
+
lvutil.removeVG(self.root, self.vgname)
self._cleanup()
@@ -1122,8 +1159,9 @@ def _undoAllInflateJournals(self):
lvhdutil.deflate(self.lvmCache, vdi.lvname, int(val))
if vdi.readonly:
self.lvmCache.setReadonly(vdi.lvname, True)
- lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
- self.vgname, vdi.lvname, uuid)
+ if "true" == self.session.xenapi.SR.get_shared(self.sr_ref):
+ lvhdutil.lvRefreshOnAllSlaves(self.session, self.uuid,
+ self.vgname, vdi.lvname, uuid)
self.journaler.remove(lvhdutil.JRN_INFLATE, uuid)
delattr(self,"vdiInfo")
delattr(self,"allVDIs")
@@ -1313,13 +1351,18 @@ def create(self, sr_uuid, vdi_uuid, size):
self.sm_config = self.sr.srcmd.params["vdi_sm_config"]
self.sr._ensureSpaceAvailable(lvSize)
- self.sr.lvmCache.create(self.lvname, lvSize)
- if self.vdi_type == vhdutil.VDI_TYPE_RAW:
- self.size = self.sr.lvmCache.getSize(self.lvname)
- else:
- vhdutil.create(self.path, long(size), False, lvhdutil.MSIZE_MB)
- self.size = vhdutil.getSizeVirt(self.path)
- self.sr.lvmCache.deactivateNoRefcount(self.lvname)
+ try:
+ self.sr.lvmCache.create(self.lvname, lvSize)
+ if self.vdi_type == vhdutil.VDI_TYPE_RAW:
+ self.size = self.sr.lvmCache.getSize(self.lvname)
+ else:
+ vhdutil.create(self.path, long(size), False, lvhdutil.MSIZE_MB)
+ self.size = vhdutil.getSizeVirt(self.path)
+ self.sr.lvmCache.deactivateNoRefcount(self.lvname)
+ except util.CommandException, e:
+ util.SMlog("Unable to create VDI");
+ self.sr.lvmCache.remove(self.lvname)
+ raise xs_errors.XenError('VDICreate', opterr='error %d' % e.code)
self.utilisation = lvSize
self.sm_config["vdi_type"] = self.vdi_type
@@ -2083,12 +2126,7 @@ def update(self, sr_uuid, vdi_uuid):
self.session.xenapi.VDI.get_metadata_of_pool(vdi_ref)
LVMMetadataHandler(self.sr.mdpath).updateMetadata(update_map)
-try:
- if __name__ == '__main__':
- SRCommand.run(LVHDSR, DRIVER_INFO)
- else:
- SR.registerSR(LVHDSR)
-except Exception:
- util.logException("LVHD")
- raise
-
+if __name__ == '__main__':
+ SRCommand.run(LVHDSR, DRIVER_INFO)
+else:
+ SR.registerSR(LVHDSR)
diff --git a/drivers/LVHDoHBASR.py b/drivers/LVHDoHBASR.py
index c4a1eaa7..1914b2cd 100755
--- a/drivers/LVHDoHBASR.py
+++ b/drivers/LVHDoHBASR.py
@@ -31,10 +31,10 @@
import glob
import mpp_luncheck
-CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_METADATA", "VDI_CREATE",
- "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH",
- "VDI_GENERATE_CONFIG", "VDI_SNAPSHOT", "VDI_CLONE",
- "VDI_RESIZE", "ATOMIC_PAUSE", "VDI_RESET_ON_BOOT/2",
+CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_METADATA", "SR_TRIM",
+ "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH",
+ "VDI_GENERATE_CONFIG", "VDI_SNAPSHOT", "VDI_CLONE",
+ "VDI_RESIZE", "ATOMIC_PAUSE", "VDI_RESET_ON_BOOT/2",
"VDI_UPDATE"]
CONFIGURATION = [ [ 'SCSIid', 'The scsi_id of the destination LUN' ], \
diff --git a/drivers/LVHDoISCSISR.py b/drivers/LVHDoISCSISR.py
index c09244c9..9ac1405d 100755
--- a/drivers/LVHDoISCSISR.py
+++ b/drivers/LVHDoISCSISR.py
@@ -29,8 +29,8 @@
import scsiutil
import xml.dom.minidom
-CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_METADATA", "VDI_CREATE",
- "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH",
+CAPABILITIES = ["SR_PROBE", "SR_UPDATE", "SR_METADATA", "SR_TRIM",
+ "VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH",
"VDI_GENERATE_CONFIG", "VDI_CLONE", "VDI_SNAPSHOT",
"VDI_RESIZE", "ATOMIC_PAUSE", "VDI_RESET_ON_BOOT/2",
"VDI_UPDATE"]
@@ -320,12 +320,8 @@ def print_LUNs_XML(self):
dom = xml.dom.minidom.Document()
element = dom.createElement("iscsi-target")
dom.appendChild(element)
- # Omit the scsi-id used by iSL
- isl_scsiids = util.get_isl_scsiids(self.session)
for uuid in self.LUNs:
val = self.LUNs[uuid]
- if getattr(val,'SCSIid') in isl_scsiids:
- continue
entry = dom.createElement('LUN')
element.appendChild(entry)
diff --git a/drivers/NFSSR.py b/drivers/NFSSR.py
index 0feb3981..cbed9875 100755
--- a/drivers/NFSSR.py
+++ b/drivers/NFSSR.py
@@ -35,8 +35,9 @@
"VDI_GENERATE_CONFIG",
"VDI_RESET_ON_BOOT/2", "ATOMIC_PAUSE"]
-CONFIGURATION = [ [ 'server', 'hostname or IP address of NFS server (required)' ], \
- [ 'serverpath', 'path on remote server (required)' ] ]
+CONFIGURATION = [['server', 'hostname or IP address of NFS server (required)'],
+ ['serverpath', 'path on remote server (required)'],
+ nfs.NFS_VERSION]
DRIVER_INFO = {
@@ -83,13 +84,16 @@ def load(self, sr_uuid):
self.nosubdir = self.sm_config.get('nosubdir') == "true"
if self.dconf.has_key('serverpath'):
self.remotepath = os.path.join(self.dconf['serverpath'],
- not self.nosubdir and sr_uuid or "")
+ not self.nosubdir and sr_uuid or "").encode('utf-8')
self.path = os.path.join(SR.MOUNT_BASE, sr_uuid)
- # Test for the optional 'nfsoptions' dconf attribute
+ # Handle optional dconf attributes
self.transport = DEFAULT_TRANSPORT
if self.dconf.has_key('useUDP') and self.dconf['useUDP'] == 'true':
self.transport = "udp"
+ self.nfsversion = nfs.validate_nfsversion(self.dconf.get('nfsversion'))
+
+ self._check_o_direct()
def validate_remotepath(self, scan):
@@ -106,7 +110,7 @@ def validate_remotepath(self, scan):
def check_server(self):
try:
- nfs.check_server_tcp(self.remoteserver)
+ nfs.check_server_tcp(self.remoteserver, self.nfsversion)
except nfs.NfsException, exc:
raise xs_errors.XenError('NFSVersion',
opterr=exc.errstr)
@@ -114,8 +118,9 @@ def check_server(self):
def mount(self, mountpoint, remotepath, timeout = 0):
try:
- nfs.soft_mount(mountpoint, self.remoteserver, remotepath,
- self.transport, timeout)
+ nfs.soft_mount(
+ mountpoint, self.remoteserver, remotepath, self.transport,
+ timeout=timeout, nfsversion=self.nfsversion)
except nfs.NfsException, exc:
raise xs_errors.XenError('NFSMount', opterr=exc.errstr)
@@ -180,7 +185,7 @@ def create(self, sr_uuid, size):
# Set the target path temporarily to the base dir
# so that we can create the target SR directory
- self.remotepath = self.dconf['serverpath']
+ self.remotepath = self.dconf['serverpath'].encode('utf-8')
try:
self.mount_remotepath(sr_uuid)
except Exception, exn:
@@ -216,7 +221,7 @@ def delete(self, sr_uuid):
# Set the target path temporarily to the base dir
# so that we can remove the target SR directory
- self.remotepath = self.dconf['serverpath']
+ self.remotepath = self.dconf['serverpath'].encode('utf-8')
self.mount_remotepath(sr_uuid)
if not self.nosubdir:
newpath = os.path.join(self.path, sr_uuid)
diff --git a/drivers/RawHBASR.py b/drivers/RawHBASR.py
index 9d675b79..a596685c 100755
--- a/drivers/RawHBASR.py
+++ b/drivers/RawHBASR.py
@@ -334,6 +334,8 @@ def _query(self, path, id, uuid=None, scsi_id=None):
sm_config = util.default(self, "sm_config", lambda: {})
sm_config['LUNid'] = str(self.LUNid)
sm_config['SCSIid'] = self.SCSIid
+ # Make sure to use kernel blkback (not blktap3) for raw LUNs
+ sm_config['backend-kind'] = 'vbd'
self.sm_config = sm_config
def attach(self, sr_uuid, vdi_uuid):
diff --git a/drivers/SR.py b/drivers/SR.py
index 41c6ce77..699524e2 100755
--- a/drivers/SR.py
+++ b/drivers/SR.py
@@ -24,6 +24,7 @@
import xs_errors
import XenAPI, xmlrpclib, util
import copy, os
+import traceback
MOUNT_BASE = '/var/run/sr-mount'
DEFAULT_TAP = 'vhd'
@@ -111,6 +112,8 @@ def __init__(self, srcmd, sr_uuid):
if self.dconf.get("SRmaster") == "true":
os.environ['LVM_SYSTEM_DIR'] = MASTER_LVM_CONF
+ except TypeError:
+ raise Exception(traceback.format_exc())
except Exception, e:
raise e
raise xs_errors.XenError('SRBadXML')
diff --git a/drivers/SRCommand.py b/drivers/SRCommand.py
index ce5ba1a3..16d1e3b8 100755
--- a/drivers/SRCommand.py
+++ b/drivers/SRCommand.py
@@ -185,7 +185,8 @@ def _run(self, sr, target):
caching_params = dict((k, self.params.get(k)) for k in \
[blktap2.VDI.CONF_KEY_ALLOW_CACHING,
blktap2.VDI.CONF_KEY_MODE_ON_BOOT,
- blktap2.VDI.CONF_KEY_CACHE_SR])
+ blktap2.VDI.CONF_KEY_CACHE_SR,
+ blktap2.VDI.CONF_KEY_O_DIRECT])
if self.cmd == 'vdi_create':
# These are the fields owned by the backend, passed on the
@@ -347,8 +348,20 @@ def run(driver, driver_info):
print util.return_nil ()
else:
print ret
- sys.exit(0)
-
- except SR.SRException, inst:
- print inst.toxml()
- sys.exit(0)
+
+ except Exception, e:
+ try:
+ util.logException(driver_info['name'])
+ except KeyError:
+ util.SMlog('driver_info does not contain a \'name\' key.')
+ except:
+ pass
+
+ # If exception is of type SR.SRException,
+ # pass to xapi, else re-raise.
+ if isinstance(e, SR.SRException):
+ print e.toxml()
+ else:
+ raise
+
+ sys.exit(0)
diff --git a/drivers/XE_SR_ERRORCODES.xml b/drivers/XE_SR_ERRORCODES.xml
index 181d347b..ac637062 100755
--- a/drivers/XE_SR_ERRORCODES.xml
+++ b/drivers/XE_SR_ERRORCODES.xml
@@ -708,194 +708,16 @@
203
-
- CSLGConfigServerMissing
- The CSLG server name or IP address is missing
- 400
-
-
- CSLGConfigSSIDMissing
- The Storage System ID is missing
- 401
-
-
- CSLGConfigPoolIDMissing
- The Storage Pool ID is missing
- 402
-
-
-
- CSLGProtocolCheck
- The gssi operation to the CSLG server failed
- 410
-
-
- CSLGLoadSR
- The SR loading operation failed
- 411
-
-
- CSLGInvalidProtocol
- An invalid storage protocol was specified
- 412
-
-
- CSLGXMLParse
+ XMLParse
Unable to parse XML
413
-
- CSLGProbe
- Failed to probe SR
- 414
-
-
- CSLGSnapClone
- Snapshot/Clone failed
- 416
-
-
- CSLGAssign
- Storage assignment failed
- 417
-
-
- CSLGUnassign
- Storage un-assignment failed
- 418
-
-
- CSLGAllocate
- Storage allocation failed
- 419
-
-
- CSLGDeallocate
- Storage deallocation failed
- 420
-
-
- CSLGHBAQuery
- HBA Query failed
- 421
-
-
- CSLGISCSIInit
- IQN/ISCSI initialisation failed
- 422
-
-
- CSLGDeviceScan
- SCSI device scan failed
- 423
-
-
- CSLGServer
- Failed to connect to Target, Please verify hostname or IP address
- 424
-
-
- CSLGConfigSVIDMissing
- The Storage Node ID is missing
- 425
-
-
- CSLGIntroduce
- The VDI failed to be introduced to the database
- 426
-
-
- CSLGNotInstalled
- The CSLG software doesn't seem to be installed
- 427
-
-
- CSLGPoolCreate
- Failed to create sub-pools from parent pool
- 428
-
-
- CSLGOldXML
- Current XML definition is newer version
- 429
-
MultipathdCommsFailure
Failed to communicate with the multipath daemon
430
-
- CSL_Integrated
- Error in storage adapter communication
- 431
-
-
- CSLGConfigAdapterMissing
- The adapter id is missing or unknown
- 432
-
-
- CSLGConfigUsernameMissing
- The username is missing
- 433
-
-
- CSLGConfigPasswordMissing
- The password is missing
- 434
-
-
- CSLGInvalidSSID
- An invalid storage system ID was specified
- 435
-
-
- CSLGSysInfo
- Failed to collect storage system information
- 436
-
-
- CSLGPoolInfo
- Failed to collect storage pool information
- 437
-
-
- CSLGPoolDelete
- Failed to delete storage pool
- 438
-
-
- CSLGLunInfo
- Failed to collect storage volume information
- 439
-
-
- CSLGLunList
- Failed to list storage volume
- 440
-
-
- CSLGResizeLun
- Failed to resize storage volume
- 441
-
-
- CSLGTargetPorts
- Failed to list storage target ports
- 442
-
-
- CSLGPoolList
- Failed to list storage pool
- 443
-
-
- TapdiskFailed
- The tapdisk failed
- 444
-
-
TapdiskAlreadyRunning
The tapdisk is already running
@@ -939,4 +761,11 @@
OCFS filesystem creation error
452
+
+
+ TapdiskFailed
+ tapdisk experienced an error
+ 453
+
+
diff --git a/drivers/blktap2.py b/drivers/blktap2.py
index a0a25b34..ad895896 100755
--- a/drivers/blktap2.py
+++ b/drivers/blktap2.py
@@ -958,6 +958,7 @@ class VDI(object):
CONF_KEY_ALLOW_CACHING = "vdi_allow_caching"
CONF_KEY_MODE_ON_BOOT = "vdi_on_boot"
CONF_KEY_CACHE_SR = "local_cache_sr"
+ CONF_KEY_O_DIRECT = "o_direct"
LOCK_CACHE_SETUP = "cachesetup"
ATTACH_DETACH_RETRY_SECS = 120
@@ -998,6 +999,7 @@ def _tap_type(vdi_type):
'iso' : 'aio', # for ISO SR
'aio' : 'aio', # for LVHD
'file' : 'aio',
+ 'phy' : 'aio'
} [vdi_type]
def get_tap_type(self):
@@ -1037,7 +1039,10 @@ def tap_wanted(self):
raise self.UnexpectedVDIType(vdi_type,
self.target.vdi)
- if plug_type == 'tap': return True
+ if plug_type == 'tap':
+ return True
+ elif self.target.vdi.sr.handles('udev'):
+ return True
# 2. Otherwise, there may be more reasons
#
@@ -1555,6 +1560,12 @@ def _activate(self, sr_uuid, vdi_uuid, options):
# Maybe launch a tapdisk on the physical link
if self.tap_wanted():
vdi_type = self.target.get_vdi_type()
+ if util.read_caching_is_restricted(self._session):
+ options["o_direct"] = True
+ else:
+ options["o_direct"] = options.get(self.CONF_KEY_O_DIRECT)
+ if options["o_direct"] is None:
+ options["o_direct"] = True
dev_path = self._tap_activate(phy_path, vdi_type, sr_uuid,
options,
self._get_pool_config(sr_uuid).get("mem-pool-size"))
diff --git a/drivers/cleanup.py b/drivers/cleanup.py
index 3ab279f0..31ccb507 100755
--- a/drivers/cleanup.py
+++ b/drivers/cleanup.py
@@ -497,9 +497,22 @@ def pause(self, failfast=False):
self.uuid, failfast):
raise util.SMException("Failed to pause VDI %s" % self)
+ def _report_tapdisk_unpause_error(self):
+ try:
+ xapi = self.sr.xapi.session.xenapi
+ sr_ref = xapi.SR.get_by_uuid(self.sr.uuid)
+ msg_name = "failed to unpause tapdisk"
+ msg_body = "Failed to unpause tapdisk for VDI %s, " \
+ "VMs using this tapdisk have lost access " \
+ "to the corresponding disk(s)" % self.uuid
+ xapi.message.create(msg_name, "4", "SR", self.sr.uuid, msg_body)
+ except Exception, e:
+ util.SMlog("failed to generate message: %s" % e)
+
def unpause(self):
if not blktap2.VDI.tap_unpause(self.sr.xapi.session, self.sr.uuid,
self.uuid):
+ self._report_tapdisk_unpause_error()
raise util.SMException("Failed to unpause VDI %s" % self)
def refresh(self, ignoreNonexistent = True):
@@ -509,6 +522,7 @@ def refresh(self, ignoreNonexistent = True):
try:
if not blktap2.VDI.tap_refresh(self.sr.xapi.session,
self.sr.uuid, self.uuid):
+ self._report_tapdisk_unpause_error()
raise util.SMException("Failed to refresh %s" % self)
except XenAPI.Failure, e:
if util.isInvalidVDI(e) and ignoreNonexistent:
@@ -1639,18 +1653,18 @@ def lock(self):
if not self._srLock:
return
- self._locked += 1
- if self._locked > 1:
- return
+ if self._locked == 0 :
+ abortFlag = IPCFlag(self.uuid)
+ for i in range(SR.LOCK_RETRY_ATTEMPTS_LOCK):
+ if self._srLock.acquireNoblock():
+ self._locked += 1
+ return
+ if abortFlag.test(FLAG_TYPE_ABORT):
+ raise AbortException("Abort requested")
+ time.sleep(SR.LOCK_RETRY_INTERVAL)
+ raise util.SMException("Unable to acquire the SR lock")
- abortFlag = IPCFlag(self.uuid)
- for i in range(SR.LOCK_RETRY_ATTEMPTS_LOCK):
- if self._srLock.acquireNoblock():
- return
- if abortFlag.test(FLAG_TYPE_ABORT):
- raise AbortException("Abort requested")
- time.sleep(SR.LOCK_RETRY_INTERVAL)
- raise util.SMException("Unable to acquire the SR lock")
+ self._locked += 1
def unlock(self):
if not self._srLock:
diff --git a/drivers/devscan.py b/drivers/devscan.py
index 86d0fc0d..eb4aac8e 100644
--- a/drivers/devscan.py
+++ b/drivers/devscan.py
@@ -29,6 +29,8 @@
DRIVER_BLACKLIST = ['^(s|p|)ata_.*', '^ahci$', '^pdc_adma$', '^iscsi_tcp$']
+INVALID_DEVICE_NAME = ''
+
def getManufacturer(s):
(rc,stdout,stderr) = util.doexec(['/sbin/modinfo', '-d', s])
if stdout:
@@ -36,6 +38,11 @@ def getManufacturer(s):
else:
return "Unknown"
+def update_devs_dict(devs, dev, entry):
+ if dev != INVALID_DEVICE_NAME:
+ devs[dev] = entry
+
+
def adapters(filterstr="any"):
dict = {}
devs = {}
@@ -73,7 +80,7 @@ def adapters(filterstr="any"):
else:
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
- devs[dev] = entry
+ update_devs_dict(devs, dev, entry)
# for new qlogic sysfs layout (rport under device, then target)
for i in filter(match_rport,os.listdir(path)):
newpath = os.path.join(path, i)
@@ -85,7 +92,7 @@ def adapters(filterstr="any"):
continue
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
- devs[dev] = entry
+ update_devs_dict(devs, dev, entry)
# for new mptsas sysfs entries, check for phy* node
for i in filter(match_phy,os.listdir(path)):
@@ -97,7 +104,7 @@ def adapters(filterstr="any"):
continue
dir = os.path.join(sysfs,lun,"device")
(dev, entry) = _extract_dev(dir, proc, id, lun)
- devs[dev] = entry
+ update_devs_dict(devs, dev, entry)
if path.startswith(SYSFS_PATH2):
os.path.join(path,"device","block:*")
dev = _extract_dev_name(os.path.join(path, 'device'))
@@ -106,7 +113,7 @@ def adapters(filterstr="any"):
hbtl = os.path.basename(path)
(h,b,t,l) = hbtl.split(':')
entry = {'procname':proc, 'host':id, 'target':l}
- devs[dev] = entry
+ update_devs_dict(devs, dev, entry)
dict['devs'] = devs
dict['adt'] = adt
@@ -223,13 +230,20 @@ def _extract_dev_name(device_dir):
return dev.lstrip('block:')
elif kernel_version.startswith('3.'):
# directory for device name lives inside block directory e.g. block/sdx
- dev = glob.glob(os.path.join(device_dir, 'block/*'))[0]
- # prune path to extract the device name
- return os.path.basename(dev)
+
+ return _get_block_device_name_with_kernel_3x(device_dir)
else:
msg = 'Kernel version detected: %s' % kernel_version
raise xs_errors.XenError('UnsupportedKernel', msg)
+def _get_block_device_name_with_kernel_3x(device_dir):
+ devs = glob.glob(os.path.join(device_dir, 'block/*'))
+ if len(devs):
+ # prune path to extract the device name
+ return os.path.basename(devs[0])
+ else:
+ return INVALID_DEVICE_NAME
+
def _extract_dev(device_dir, procname, host, target):
"""Returns device name and creates dictionary entry for it"""
dev = _extract_dev_name(device_dir)
diff --git a/drivers/enable-borehamwood b/drivers/enable-borehamwood
new file mode 100755
index 00000000..c0014acd
--- /dev/null
+++ b/drivers/enable-borehamwood
@@ -0,0 +1,54 @@
+#!/usr/bin/python
+# Copyright (C) 2014 Citrix Ltd.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; version 2.1 only.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+
+import os, sys
+import subprocess
+
+DIR="/opt/xensource/sm/"
+FILENAME="RawHBASR"
+
+def usage():
+ print "Usage: %s enable" % (sys.argv[0])
+
+if __name__ == "__main__":
+ if len(sys.argv) != 2:
+ usage()
+ sys.exit(1)
+
+ try:
+ os.chdir(DIR)
+ os.symlink(FILENAME + ".py", FILENAME)
+ except OSError, e:
+ print "Error: %s [errno=%s]" % (e.args)
+ sys.exit(1)
+
+ print "%s%s symlink created" % (DIR,FILENAME)
+
+ try:
+ ret = subprocess.call(["xe-toolstack-restart"])
+ except OSError, e:
+ print "Error: %s [errno=%s]" % (e.args)
+ ret = 1 # make the following block to cleanup
+
+ if ret != 0:
+ print "Failed toolstack restart, rolling back symlink creation"
+ try:
+ os.unlink(FILENAME)
+ except OSError, e:
+ print "Error: %s [errno=%s], fix manually" % (e.args)
+ sys.exit(1)
+ else:
+ print "toolstack restarted"
+
+ sys.exit(0)
+
diff --git a/drivers/lock.py b/drivers/lock.py
index 6d367b35..52c05b94 100755
--- a/drivers/lock.py
+++ b/drivers/lock.py
@@ -39,17 +39,36 @@ def _open(self):
# one directory per namespace
self.nspath = os.path.join(Lock.BASE_DIR, self.ns)
- self._mkdirs(self.nspath)
# the lockfile inside that namespace directory per namespace
self.lockpath = os.path.join(self.nspath, self.name)
- if not os.path.exists(self.lockpath):
- util.SMlog("lock: creating lock file %s" % self.lockpath)
- self.lockfile = file(self.lockpath, "w+")
+
+ number_of_enoent_retries = 10
+
+ while True:
+ self._mkdirs(self.nspath)
+
+ try:
+ self._open_lockfile()
+ except IOError, e:
+ # If another lock within the namespace has already
+ # cleaned up the namespace by removing the directory,
+ # _open_lockfile raises an ENOENT, in this case we retry.
+ if e.errno == errno.ENOENT:
+ if number_of_enoent_retries > 0:
+ number_of_enoent_retries -= 1
+ continue
+ raise
+ break
fd = self.lockfile.fileno()
self.lock = flock.WriteLock(fd)
+ def _open_lockfile(self):
+ """Provide a seam, so extreme situations could be tested"""
+ util.SMlog("lock: opening lock file %s" % self.lockpath)
+ self.lockfile = file(self.lockpath, "w+")
+
def _close(self):
"""Close the lock, which implies releasing the lock."""
if self.lockfile is not None:
@@ -70,6 +89,7 @@ def _mknamespace(ns):
_mknamespace = staticmethod(_mknamespace)
def __init__(self, name, ns=None):
+ self.lockfile = None
self.ns = Lock._mknamespace(ns)
diff --git a/drivers/lvutil.py b/drivers/lvutil.py
index 43ee6ec2..32fa02b2 100755
--- a/drivers/lvutil.py
+++ b/drivers/lvutil.py
@@ -400,18 +400,21 @@ def setActiveVG(path, active):
cmd = [CMD_VGCHANGE, "-a" + val, path]
text = util.pread2(cmd)
-def create(name, size, vgname, tag = None):
- size_mb = size / 1024 / 1024
- cmd = [CMD_LVCREATE, "-n", name, "-L", str(size_mb), vgname]
+def create(name, size, vgname, tag=None, size_in_percentage=None):
+ if size_in_percentage:
+ cmd = [CMD_LVCREATE, "-n", name, "-l", size_in_percentage, vgname]
+ else:
+ size_mb = size / 1024 / 1024
+ cmd = [CMD_LVCREATE, "-n", name, "-L", str(size_mb), vgname]
if tag:
cmd.extend(["--addtag", tag])
util.pread2(cmd)
-def remove(path):
+def remove(path, config_param=None):
# see deactivateNoRefcount()
for i in range(LVM_FAIL_RETRIES):
try:
- _remove(path)
+ _remove(path, config_param)
break
except util.CommandException, e:
if i >= LVM_FAIL_RETRIES - 1:
@@ -419,8 +422,11 @@ def remove(path):
util.SMlog("*** lvremove failed on attempt #%d" % i)
_lvmBugCleanup(path)
-def _remove(path):
+def _remove(path, config_param=None):
+ CONFIG_TAG = "--config"
cmd = [CMD_LVREMOVE, "-f", path]
+ if config_param:
+ cmd.extend([CONFIG_TAG, "devices{" + config_param + "}"])
ret = util.pread2(cmd)
def rename(path, newName):
@@ -434,6 +440,15 @@ def setReadonly(path, readonly):
cmd = [CMD_LVCHANGE, path, "-p", val]
ret = util.pread(cmd)
+def exists(path):
+ cmd = [CMD_LVS, "--noheadings", path]
+ try:
+ ret = util.pread2(cmd)
+ return True
+ except util.CommandException, e:
+ util.SMlog("Ignoring exception for LV check: %s !" % path)
+ return False
+
#def getSize(path):
# return _getLVsize(path)
# #cmd = [CMD_LVS, "--noheadings", "--units", "B", path]
diff --git a/drivers/mpathcount.py b/drivers/mpathcount.py
index 966c4e78..92e8dbd2 100755
--- a/drivers/mpathcount.py
+++ b/drivers/mpathcount.py
@@ -24,7 +24,7 @@
import mpp_mpathutil
import glob
-supported = ['iscsi','lvmoiscsi','rawhba','lvmohba', 'ocfsohba', 'ocfsoiscsi', 'netapp','cslg']
+supported = ['iscsi','lvmoiscsi','rawhba','lvmohba', 'ocfsohba', 'ocfsoiscsi', 'netapp']
LOCK_TYPE_HOST = "host"
LOCK_NS1 = "mpathcount1"
diff --git a/drivers/mpp_luncheck.py b/drivers/mpp_luncheck.py
index 9771ee44..5f8cd541 100755
--- a/drivers/mpp_luncheck.py
+++ b/drivers/mpp_luncheck.py
@@ -17,13 +17,18 @@
import sys, os
import glob
+import util
DEVBYMPPPATH = "/dev/disk/by-mpp"
def is_RdacLun(scsi_id):
path = os.path.join(DEVBYMPPPATH,"%s" % scsi_id)
mpppath = glob.glob(path)
if len(mpppath):
- return True
+ # Support for RDAC LUNs discontinued
+ # Always return False
+ util.SMlog("Found unsupported RDAC LUN at %s" % mpppath,
+ priority=util.LOG_WARNING)
+ return False
else:
return False
diff --git a/drivers/nfs.py b/drivers/nfs.py
index ef4a5e8c..301506a9 100644
--- a/drivers/nfs.py
+++ b/drivers/nfs.py
@@ -43,25 +43,40 @@
RPCINFO_BIN = "/usr/sbin/rpcinfo"
SHOWMOUNT_BIN = "/usr/sbin/showmount"
+DEFAULT_NFSVERSION = '3'
+
+NFS_VERSION = [
+ 'nfsversion', 'for type=nfs, NFS protocol version - 3, 4 (optional)']
+
class NfsException(Exception):
def __init__(self, errstr):
self.errstr = errstr
-def check_server_tcp(server):
+def check_server_tcp(server, nfsversion=DEFAULT_NFSVERSION):
"""Make sure that NFS over TCP/IP V3 is supported on the server.
Returns True if everything is OK, False otherwise."""
try:
util.ioretry(lambda: util.pread([RPCINFO_BIN,"-t",
- "%s" % server, "nfs","3"]),
+ "%s" % server, "nfs", nfsversion]),
errlist=[errno.EPERM], maxretry=2, nofail=True)
except util.CommandException, inst:
raise NfsException("rpcinfo failed or timed out: return code %d" %
inst.code)
-def soft_mount(mountpoint, remoteserver, remotepath, transport, timeout = 0):
+def validate_nfsversion(nfsversion):
+ if not nfsversion:
+ nfsversion = DEFAULT_NFSVERSION
+ else:
+ if nfsversion not in ['3', '4']:
+ raise NfsException("Invalid nfsversion.")
+ return nfsversion
+
+
+def soft_mount(mountpoint, remoteserver, remotepath, transport, timeout=0,
+ nfsversion=DEFAULT_NFSVERSION):
"""Mount the remote NFS export at 'mountpoint'.
The 'timeout' param here is in seconds"""
try:
@@ -71,17 +86,22 @@ def soft_mount(mountpoint, remoteserver, remotepath, transport, timeout = 0):
raise NfsException("Failed to make directory: code is %d" %
inst.code)
+ mountcommand = 'mount.nfs'
+ if nfsversion == '4':
+ mountcommand = 'mount.nfs4'
+
if timeout < 1:
timeout = SOFTMOUNT_TIMEOUT
- options = "soft,timeo=%d,retrans=%d,%s" % (timeout * 10,
+ options = "soft,timeo=%d,retrans=%d,proto=%s" % (
+ timeout * 10,
SOFTMOUNT_RETRANS,
transport)
- options += ',actimeo=0'
+ options += ',acdirmin=0,acdirmax=0'
try:
util.ioretry(lambda:
- util.pread(["mount.nfs", "%s:%s"
+ util.pread([mountcommand, "%s:%s"
% (remoteserver, remotepath),
mountpoint, "-o", options]),
errlist=[errno.EPIPE, errno.EIO],
diff --git a/drivers/refcounter.py b/drivers/refcounter.py
index 1c732176..0e4c8952 100644
--- a/drivers/refcounter.py
+++ b/drivers/refcounter.py
@@ -150,9 +150,11 @@ def _set(ns, obj, count, binaryCount):
if count == 0 and binaryCount == 0:
RefCounter._removeObject(ns, obj)
else:
- RefCounter._createNamespace(ns)
objFile = os.path.join(RefCounter.BASE_DIR, ns, obj)
- RefCounter._writeCount(objFile, count, binaryCount)
+
+ while not RefCounter._writeCount(objFile, count, binaryCount):
+ RefCounter._createNamespace(ns)
+
_set = staticmethod(_set)
def _getSafeNames(obj, ns):
@@ -185,13 +187,16 @@ def _removeObject(ns, obj):
os.unlink(objFile)
except OSError:
raise RefCounterException("failed to remove '%s'" % objFile)
-
+
try:
os.rmdir(nsDir)
except OSError, e:
- # Having a listdir wont help since there could be other vdi related
- # operations that could create a file in between the python calls.
- if e.errno != errno.ENOTEMPTY:
+ namespaceAlreadyCleanedUp = e.errno == errno.ENOENT
+ newObjectAddedToNamespace = e.errno == errno.ENOTEMPTY
+
+ if namespaceAlreadyCleanedUp or newObjectAddedToNamespace:
+ pass
+ else:
raise RefCounterException("failed to remove '%s'" % nsDir)
_removeObject = staticmethod(_removeObject)
@@ -230,7 +235,11 @@ def _writeCount(fn, count, binaryCount):
f = open(fn, 'w')
f.write("%d %d\n" % (count, binaryCount))
f.close()
+ return True
except IOError, e:
+ fileNotFound = e.errno == errno.ENOENT
+ if fileNotFound:
+ return False
raise RefCounterException("failed to write '(%d %d)' to '%s': %s" \
% (count, binaryCount, fn, e))
_writeCount = staticmethod(_writeCount)
diff --git a/drivers/trim b/drivers/trim
new file mode 100755
index 00000000..1f4726f7
--- /dev/null
+++ b/drivers/trim
@@ -0,0 +1,27 @@
+#!/usr/bin/python
+#
+# Copyright (C) Citrix Systems Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; version 2.1 only.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# A plugin for enabling trim on LVM based SRs to free up storage space
+# in Storage arrays.
+
+import sys
+import XenAPIPlugin
+
+if __name__ == "__main__":
+ sys.path.append("/opt/xensource/sm/")
+ import trim_util
+ XenAPIPlugin.dispatch({"do_trim": trim_util.do_trim})
diff --git a/drivers/trim_util.py b/drivers/trim_util.py
new file mode 100755
index 00000000..1726b5c7
--- /dev/null
+++ b/drivers/trim_util.py
@@ -0,0 +1,136 @@
+#!/usr/bin/python
+#
+# Copyright (C) Citrix Systems Inc.
+#
+# This program is free software; you can redistribute it and/or modify
+# it under the terms of the GNU Lesser General Public License as published
+# by the Free Software Foundation; version 2.1 only.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public License
+# along with this program; if not, write to the Free Software Foundation, Inc.,
+# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+#
+# A plugin for enabling trim on LVM based SRs to free up storage space
+# in Storage arrays.
+
+import xml
+import sys
+import os
+import time
+import util
+import lock
+import lvhdutil
+import vhdutil
+import lvutil
+import xs_errors
+import xmlrpclib
+
+TRIM_LV_TAG = "_trim_lv"
+TRIM_CAP = "SR_TRIM"
+LOCK_RETRY_ATTEMPTS = 3
+LOCK_RETRY_INTERVAL = 1
+ERROR_CODE_KEY = "errcode"
+ERROR_MSG_KEY = "errmsg"
+
+TRIM_LAST_TRIGGERED_KEY = "trim_last_triggered"
+
+def _vg_by_sr_uuid(sr_uuid):
+ return lvhdutil.VG_PREFIX + sr_uuid
+
+def _lvpath_by_vg_lv_name(vg_name, lv_name):
+ return os.path.join(lvhdutil.VG_LOCATION, vg_name, lv_name)
+
+def to_xml(d):
+
+ dom = xml.dom.minidom.Document()
+ trim_response = dom.createElement("trim_response")
+ dom.appendChild(trim_response)
+
+ for key, value in sorted(d.items()):
+ key_value_element = dom.createElement("key_value_pair")
+ trim_response.appendChild(key_value_element)
+
+ key_element = dom.createElement("key")
+ key_text_node = dom.createTextNode(key)
+ key_element.appendChild(key_text_node)
+ key_value_element.appendChild(key_element)
+
+ value_element = dom.createElement("value")
+ value_text_mode = dom.createTextNode(value)
+ value_element.appendChild(value_text_mode)
+ key_value_element.appendChild(value_element)
+
+
+ return dom.toxml()
+
+# Note: This function is expected to be called from a context where
+# the SR is locked by the thread calling the function; therefore removing
+# any risk of a race condition updating the LAST_TRIGGERED value.
+def _log_last_triggered(session, sr_uuid):
+ try:
+ sr_ref = session.xenapi.SR.get_by_uuid(sr_uuid)
+ other_config = session.xenapi.SR.get_other_config(sr_ref)
+ if other_config.has_key(TRIM_LAST_TRIGGERED_KEY):
+ session.xenapi.SR.remove_from_other_config(sr_ref, TRIM_LAST_TRIGGERED_KEY)
+ session.xenapi.SR.add_to_other_config(sr_ref, TRIM_LAST_TRIGGERED_KEY, str(time.time()))
+ except:
+ util.logException("Unable to set other-config:%s" % TRIM_LAST_TRIGGERED_KEY)
+
+def do_trim(session, args):
+ """Attempt to trim the given LVHDSR"""
+ util.SMlog("do_trim: %s" % args)
+ sr_uuid = args["sr_uuid"]
+
+ if TRIM_CAP not in util.sr_get_capability(sr_uuid):
+ util.SMlog("Trim command ignored on unsupported SR %s" % sr_uuid)
+ err_msg = {ERROR_CODE_KEY: 'UnsupportedSRForTrim',
+ ERROR_MSG_KEY: 'Trim on [%s] not supported' % sr_uuid}
+ return to_xml(err_msg)
+
+ # Lock SR, get vg empty space details
+ sr_lock = lock.Lock(vhdutil.LOCK_TYPE_SR, sr_uuid)
+ got_lock = False
+ for i in range(LOCK_RETRY_ATTEMPTS):
+ got_lock = sr_lock.acquireNoblock()
+ if got_lock:
+ break
+ time.sleep(LOCK_RETRY_INTERVAL)
+
+ if got_lock:
+ try:
+ vg_name = _vg_by_sr_uuid(sr_uuid)
+ lv_name = sr_uuid + TRIM_LV_TAG
+ lv_path = _lvpath_by_vg_lv_name(vg_name, lv_name)
+
+ # Clean trim LV in case the previous trim attemp failed
+ if lvutil.exists(lv_path):
+ lvutil.remove(lv_path)
+
+ # Perform a lvcreate and lvremove to trigger trim on the array
+ lvutil.create(lv_name, 0, vg_name, size_in_percentage="100%F")
+ lvutil.remove(lv_path, config_param="issue_discards=1")
+ util.SMlog("Trim on SR: %s complete. " % sr_uuid)
+ result = str(True)
+ except:
+ err_msg = {
+ ERROR_CODE_KEY: 'UnknownTrimException',
+ ERROR_MSG_KEY: 'Unknown Exception: trim failed on SR [%s]'
+ % sr_uuid
+ }
+ result = to_xml(err_msg)
+
+ _log_last_triggered(session, sr_uuid)
+
+ sr_lock.release()
+ return result
+ else:
+ util.SMlog("Could not complete Trim on %s, Lock unavailable !" \
+ % sr_uuid)
+ err_msg = {ERROR_CODE_KEY: 'SRUnavailable',
+ ERROR_MSG_KEY: 'Unable to get SR lock [%s]' % sr_uuid}
+ return to_xml(err_msg)
diff --git a/drivers/udevSR.py b/drivers/udevSR.py
index ae22db53..ff9a0427 100755
--- a/drivers/udevSR.py
+++ b/drivers/udevSR.py
@@ -57,8 +57,20 @@ def type(self, sr_uuid):
def vdi(self, uuid):
util.SMlog("params = %s" % (self.srcmd.params.keys()))
- return udevVDI(self, self.srcmd.params['vdi_location'])
-
+
+ if 'vdi_location' in self.srcmd.params:
+ vdi_location = self.srcmd.params['vdi_location']
+ else:
+ vdi_location = self.get_vdi_location(uuid)
+
+ return udevVDI(self, vdi_location)
+
+ def get_vdi_location(self, uuid):
+ import XenAPI
+ vdi = self.session.xenapi.VDI
+ vdi_ref = vdi.get_by_uuid(uuid)
+ return vdi.get_location(vdi_ref)
+
def load(self, sr_uuid):
# First of all, check we've got the correct keys in dconf
if not self.dconf.has_key('location'):
diff --git a/drivers/updatempppathd.py b/drivers/updatempppathd.py
index 3eaed867..77f670ac 100755
--- a/drivers/updatempppathd.py
+++ b/drivers/updatempppathd.py
@@ -26,6 +26,7 @@
import mpath_dmp
import mpp_mpathutil
import gc
+import mpp_luncheck
DEBUG_OUT = False
DAEMONISE = True
@@ -67,8 +68,7 @@ def UpdatePaths():
for filename in fileList:
# extract the SCSI ID from the file name.
scsiid = filename.rsplit("/")[len(filename.rsplit("/")) - 1].split('-')[0]
- links=glob.glob('/dev/disk/by-mpp/%s' % scsiid)
- if not (len(links)):
+ if not (mpp_luncheck.is_RdacLun(scsiid)):
continue
# Get the cached value for the total and active paths for this SCSI ID
diff --git a/drivers/util.py b/drivers/util.py
index 485b7379..caeca96c 100755
--- a/drivers/util.py
+++ b/drivers/util.py
@@ -287,9 +287,9 @@ def ioretry(f, errlist=[errno.EIO], maxretry=IORETRY_MAX, period=IORETRY_PERIOD,
try:
return f()
except OSError, inst:
- errno = int(inst.errno)
- if not errno in errlist:
- raise CommandException(errno, str(f), "OSError")
+ err = int(inst.errno)
+ if not err in errlist:
+ raise CommandException(err, str(f), "OSError")
except CommandException, inst:
if not int(inst.code) in errlist:
raise
@@ -314,6 +314,21 @@ def ioretry_stat(f, maxretry=IORETRY_MAX):
retries += 1
raise CommandException(errno.EIO, str(f))
+def sr_get_capability(sr_uuid):
+ result = []
+ session = get_localAPI_session()
+ sr_ref = session.xenapi.SR.get_by_uuid(sr_uuid)
+ sm_type = session.xenapi.SR.get_record(sr_ref)['type']
+ sm_rec = session.xenapi.SM.get_all_records_where( \
+ "field \"type\" = \"%s\"" % sm_type)
+
+ # SM expects atleast one entry of any SR type
+ if len(sm_rec) > 0:
+ result = sm_rec.values()[0]['capabilities']
+
+ session.xenapi.logout()
+ return result
+
def sr_get_driver_info(driver_info):
results = {}
# first add in the vanilla stuff
@@ -1673,3 +1688,18 @@ def open_atomic(path, mode=None):
def isInvalidVDI(exception):
return exception.details[0] == "HANDLE_INVALID" or \
exception.details[0] == "UUID_INVALID"
+
+def get_pool_restrictions(session):
+ """Returns pool restrictions as a map, @session must be already
+ established."""
+ return session.xenapi.pool.get_all_records().values()[0]['restrictions']
+
+def read_caching_is_restricted(session):
+ """Tells whether read caching is restricted."""
+ if session is None or (isinstance(session, str) and session == ""):
+ return True
+ restrictions = get_pool_restrictions(session)
+ if 'restrict_read_caching' in restrictions and \
+ restrictions['restrict_read_caching'] == "true":
+ return True
+ return False
diff --git a/mk/Makefile b/mk/Makefile
index 5ca91f61..f90e8529 100644
--- a/mk/Makefile
+++ b/mk/Makefile
@@ -22,12 +22,11 @@ ERRORCODES_XML := XE_SR_ERRORCODES.xml
SM_TESTS := $(MY_OUTPUT_DIR)/storage-manager-tests.tar
SM_UNIT_TESTS := $(MY_OUTPUT_DIR)/smunittests.tar
REPO := $(call git_loc,sm)
-SUPPORT_PACK := $(MY_OUTPUT_DIR)/borehamwood-supp-pack.iso
-BUILD := $(BUILD_NUMBER)
+
.PHONY: build
build: sources $(SM_STAMP) $(SM_TESTS) $(SM_UNIT_TESTS) \
- $(MY_OUTPUT_DIR)/$(ERRORCODES_XML) $(SUPPORT_PACK)
+ $(MY_OUTPUT_DIR)/$(ERRORCODES_XML)
$(MY_SOURCES)/MANIFEST: $(RPM_SRPMSDIR)/$(SM_SRPM) $(MY_SOURCES_DIRSTAMP) $(RPM_BUILD_COOKIE)
rm -f $@
@@ -62,8 +61,6 @@ $(SM_STAMP): $(RPM_SRPMSDIR)/$(SM_SRPM)
cp $(RPM_RPMSDIR)/$(DOMAIN0_ARCH_OPTIMIZED)/sm-*.rpm $(MY_MAIN_PACKAGES)
# Deliberately omit the debuginfo RPM (sm-debuginfo-...)
rm -f $(MY_MAIN_PACKAGES)/sm-debuginfo-*.rpm
- # sm-rawhba is packaged as supp-pack. Skip it.
- rm -f $(MY_MAIN_PACKAGES)/sm-rawhba-*.rpm
touch $@
$(SM_TESTS):
@@ -77,6 +74,3 @@ $(SM_UNIT_TESTS):
$(MY_OUTPUT_DIR)/$(ERRORCODES_XML):
rm -f $@
cp $(REPO)/drivers/$(ERRORCODES_XML) $@
-
-$(SUPPORT_PACK): $(SM_STAMP)
- python setup.py --out $(dir $@) --pdn $(PRODUCT_BRAND) --pdv $(PRODUCT_VERSION) --bld $(BUILD) --spn "borehamwood-supp-pack" --spd "XenServer RawHBA implementation" --rxv "6.1.0" $(RPM_RPMSDIR)/$(DOMAIN0_ARCH_OPTIMIZED)/sm-rawhba-*.$(DOMAIN0_ARCH_OPTIMIZED).rpm
diff --git a/mk/setup.py b/mk/setup.py
deleted file mode 100644
index 80b2a0bd..00000000
--- a/mk/setup.py
+++ /dev/null
@@ -1,20 +0,0 @@
-from xcp.supplementalpack import *
-from optparse import OptionParser
-
-parser = OptionParser()
-parser.add_option('--pdn', dest="product_name")
-parser.add_option('--pdv', dest="product_version")
-parser.add_option('--bld', dest="build")
-parser.add_option('--out', dest="outdir")
-parser.add_option('--spn', dest="sp_name")
-parser.add_option('--spd', dest="sp_description")
-parser.add_option('--rxv', dest="req_xs_version")
-(options, args) = parser.parse_args()
-
-xs = Requires(originator='xs', name='main', test='ge',
- product=options.product_name, version=options.req_xs_version)
-
-setup(originator='xs', name=options.sp_name, product=options.product_name,
- version=options.product_version, build=options.build, vendor='Citrix Systems, Inc.',
- description=options.sp_description, packages=args, requires=[xs],
- outdir=options.outdir, output=['iso'])
diff --git a/mk/sm.spec.in b/mk/sm.spec.in
index 811acfa8..e0bfee8a 100755
--- a/mk/sm.spec.in
+++ b/mk/sm.spec.in
@@ -71,6 +71,7 @@ cp -f /etc/lvm/lvm.conf.orig /etc/lvm/lvm.conf || exit $?
/etc/xapi.d/plugins/testing-hooks
/etc/xapi.d/plugins/vss_control
/etc/xapi.d/plugins/intellicache-clean
+/etc/xapi.d/plugins/trim
/etc/xensource/master.d/02-vhdcleanup
/opt/xensource/bin/blktap2
/opt/xensource/bin/tapdisk-cache-stats
@@ -255,6 +256,9 @@ cp -f /etc/lvm/lvm.conf.orig /etc/lvm/lvm.conf || exit $?
/opt/xensource/sm/vhdutil.py
/opt/xensource/sm/vhdutil.pyc
/opt/xensource/sm/vhdutil.pyo
+/opt/xensource/sm/trim_util.py
+/opt/xensource/sm/trim_util.pyc
+/opt/xensource/sm/trim_util.pyo
/opt/xensource/sm/vss_control
/opt/xensource/sm/xs_errors.py
/opt/xensource/sm/xs_errors.pyc
@@ -280,10 +284,11 @@ This package adds a new rawhba SR type. This SR type allows utilization of
Fiber Channel raw LUNs as separate VDIs (LUN per VDI)
%files rawhba
-/opt/xensource/sm/RawHBASR
/opt/xensource/sm/RawHBASR.py
+%exclude /opt/xensource/sm/RawHBASR
/opt/xensource/sm/RawHBASR.pyc
/opt/xensource/sm/RawHBASR.pyo
/opt/xensource/sm/B_util.py
/opt/xensource/sm/B_util.pyc
/opt/xensource/sm/B_util.pyo
+/opt/xensource/sm/enable-borehamwood
diff --git a/requirements.txt b/requirements.txt
index e8b88b19..b951f555 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,4 +1,4 @@
mock
xenapi
coveralls
-pylint
+pylint==1.2.0
diff --git a/tests/lvmlib.py b/tests/lvmlib.py
new file mode 100644
index 00000000..f09a0397
--- /dev/null
+++ b/tests/lvmlib.py
@@ -0,0 +1,107 @@
+import optparse
+
+
+class LogicalVolume(object):
+ def __init__(self, vg, name, size_mb, tag, active, zeroed):
+ self.name = name
+ self.size_mb = size_mb
+ self.volume_group = vg
+ self.tag = tag
+ self.active = active
+ self.zeroed = zeroed
+
+
+class VolumeGroup(object):
+ def __init__(self, name):
+ self.name = name
+ self.volumes = []
+
+ def add_volume(self, name, size_mb, tag=None, active=True, zeroed=True):
+ self.volumes.append(
+ LogicalVolume(self, name, size_mb, tag, active, zeroed))
+
+ def delete_volume(self, volume):
+ self.volumes = [vol for vol in self.volumes if vol != volume]
+
+
+class LVSubsystem(object):
+ def __init__(self, logger, executable_injector):
+ self.logger = logger
+ self.lv_calls = []
+ self._volume_groups = []
+ executable_injector('/usr/sbin/lvcreate', self.fake_lvcreate)
+ executable_injector('/usr/sbin/lvremove', self.fake_lvremove)
+ executable_injector('/sbin/dmsetup', self.fake_dmsetup)
+
+ def add_volume_group(self, name):
+ self._volume_groups.append(VolumeGroup(name))
+
+ def get_logical_volumes_with_name(self, name):
+ result = []
+ for vg in self._volume_groups:
+ for lv in vg.volumes:
+ if name == lv.name:
+ result.append(lv)
+ return result
+
+ def get_volume_group(self, vgname):
+ for vg in self._volume_groups:
+ if vg.name == vgname:
+ return vg
+
+ def fake_lvcreate(self, args, stdin):
+ self.logger('lvcreate', repr(args), stdin)
+ parser = optparse.OptionParser()
+ parser.add_option("-n", dest='name')
+ parser.add_option("-L", dest='size_mb')
+ parser.add_option("--addtag", dest='tag')
+ parser.add_option("--inactive", dest='inactive', action='store_true')
+ parser.add_option("--zero", dest='zero', default='y')
+ try:
+ options, args = parser.parse_args(args[1:])
+ except SystemExit, e:
+ self.logger("LVCREATE OPTION PARSING FAILED")
+ return (1, '', str(e))
+
+ vgname, = args
+
+ if self.get_volume_group(vgname) is None:
+ self.logger("volume group does not exist:", vgname)
+ return (1, '', ' Volume group "%s" not found\n' % vgname)
+
+ active = not options.inactive
+ assert options.zero in ['y', 'n']
+ zeroed = options.zero == 'y'
+
+ self.get_volume_group(vgname).add_volume(
+ options.name,
+ int(options.size_mb),
+ options.tag,
+ active,
+ zeroed)
+
+ return 0, '', ''
+
+ def fake_lvremove(self, args, stdin):
+ self.logger('lvremove', repr(args), stdin)
+ parser = optparse.OptionParser()
+ parser.add_option(
+ "-f", "--force", dest='force', action='store_true', default=False)
+ self.logger(args, stdin)
+ try:
+ options, args = parser.parse_args(args[1:])
+ except SystemExit, e:
+ self.logger("LVREMOVE OPTION PARSING FAILED")
+ return (1, '', str(e))
+
+ lvpath, = args
+
+ for vg in self._volume_groups:
+ for lv in vg.volumes:
+ if '/'.join([vg.name, lv.name]) == lvpath:
+ vg.delete_volume(lv)
+
+ return 0, '', ''
+
+ def fake_dmsetup(self, args, stdin):
+ return 0, '', ''
diff --git a/tests/test_ISCSISR.py b/tests/test_ISCSISR.py
index 88d299ce..13730534 100644
--- a/tests/test_ISCSISR.py
+++ b/tests/test_ISCSISR.py
@@ -59,6 +59,33 @@ def __init__(self, extra_dconf=None):
self.dconf.update(extra_dconf or {})
+class NonInitingMultiLUNISCSISR(ISCSISR.ISCSISR):
+ def __init__(self, node1, node2):
+ self.mpath = "false"
+ self.dconf = {
+ 'target': node1['ip'],
+ 'localIQN': 'localIQN',
+ 'targetIQN': node1['iqn']
+ }
+
+ self.dconf.update({})
+ self.target = node1['ip']
+ self.port = node1['port']
+ self.targetIQN = node1['iqn']
+ self.attached = True
+ self.multihomed = True
+ extra_adapter = "%s:%d" % (node2['ip'], node2['port'])
+ self.adapter = {
+ extra_adapter: None
+ }
+
+ def _synchroniseAddrList(self, *args, **kwargs):
+ pass
+
+ def _init_adapters(self):
+ pass
+
+
class TestVdiTypeSetting(TestBase):
@mock.patch('ISCSISR.iscsilib.discovery')
@@ -82,3 +109,68 @@ def test_vdi_type_modified_by_force_tapdisk(self):
self.load_iscsi_sr(iscsi_sr=iscsi_sr)
self.assertEquals('aio', iscsi_sr.sr_vditype)
+
+
+class TestMultiLUNISCSISR(unittest.TestCase):
+
+ def setUp(self):
+ self.node1 = {
+ 'ip': '127.0.0.1',
+ 'port': 3260,
+ 'iqn': 'IQN'
+ }
+ self.node2 = {
+ 'ip': '127.0.0.2',
+ 'port': 8080,
+ 'iqn': 'IQN',
+ 'tpgt': 'TPGT'
+ }
+ self.node_records = [(
+ "%s:%d" % (self.node2['ip'], self.node2['port']),
+ self.node2['tpgt'],
+ self.node2['iqn']
+ )]
+
+ def assertActiveNodeEquals(self, node, iscsi_sr):
+ node_ip_port = "%s:%d" % (node['ip'], node['port'])
+ node_path = '/dev/iscsi/%s/%s' % (node['iqn'], node_ip_port)
+
+ self.assertEquals(node_path, iscsi_sr.path)
+ self.assertEquals(node_ip_port, iscsi_sr.tgtidx)
+ self.assertEquals(node_ip_port, iscsi_sr.address)
+
+ @mock.patch('ISCSISR.os.path.exists')
+ @mock.patch('ISCSISR.iscsilib.get_node_records')
+ def test_initPaths_actual_path_is_active(
+ self,
+ mock_get_node_records,
+ mock_exists):
+ mock_get_node_records.return_value = self.node_records
+ mock_exists.return_value = True
+
+ iscsi_sr = NonInitingMultiLUNISCSISR(self.node1, self.node2)
+
+ iscsi_sr._initPaths()
+
+ self.assertActiveNodeEquals(self.node1, iscsi_sr)
+
+ @mock.patch('ISCSISR.os.path.exists')
+ @mock.patch('ISCSISR.iscsilib.get_node_records')
+ def test_initPaths_active_path_detection(
+ self,
+ mock_get_node_records,
+ mock_exists):
+ mock_get_node_records.return_value = self.node_records
+
+ def fake_exists(path):
+ if self.node1['ip'] in path:
+ return False
+ return True
+
+ mock_exists.side_effect = fake_exists
+
+ iscsi_sr = NonInitingMultiLUNISCSISR(self.node1, self.node2)
+
+ iscsi_sr._initPaths()
+
+ self.assertActiveNodeEquals(self.node2, iscsi_sr)
diff --git a/tests/test_ISOSR.py b/tests/test_ISOSR.py
new file mode 100644
index 00000000..b132152d
--- /dev/null
+++ b/tests/test_ISOSR.py
@@ -0,0 +1,83 @@
+import mock
+import nfs
+import ISOSR
+import unittest
+
+
+class FakeISOSR(ISOSR.ISOSR):
+ uuid = None
+ sr_ref = None
+ session = None
+ srcmd = None
+
+ def __init__(self, srcmd, none):
+ self.dconf = srcmd.dconf
+ self.srcmd = srcmd
+
+
+class TestISOSR(unittest.TestCase):
+
+ def create_isosr(self, location='aServer:/aLocation', atype=None,
+ sr_uuid='asr_uuid', nfsversion=None):
+ srcmd = mock.Mock()
+ srcmd.dconf = {
+ 'location': location
+ }
+ if atype:
+ srcmd.dconf.update({'type': atype})
+ if nfsversion:
+ srcmd.dconf.update({'nfsversion': nfsversion})
+ srcmd.params = {
+ 'command': 'some_command'
+ }
+ isosr = FakeISOSR(srcmd, None)
+ isosr.load(sr_uuid)
+ return isosr
+
+ def test_load(self):
+ self.create_isosr()
+
+ @mock.patch('nfs.validate_nfsversion')
+ def test_load_validate_nfsversion_called(self, validate_nfsversion):
+ isosr = self.create_isosr(nfsversion='aNfsversion')
+
+ validate_nfsversion.assert_called_once_with('aNfsversion')
+
+ @mock.patch('NFSSR.Lock')
+ @mock.patch('nfs.validate_nfsversion')
+ def test_load_validate_nfsversion_returnused(self, validate_nfsversion,
+ Lock):
+ validate_nfsversion.return_value = 'aNfsversion'
+
+ self.assertEquals(self.create_isosr().nfsversion, 'aNfsversion')
+
+ @mock.patch('NFSSR.Lock')
+ @mock.patch('nfs.validate_nfsversion')
+ def test_load_validate_nfsversion_exceptionraised(self,
+ validate_nfsversion,
+ Lock):
+ validate_nfsversion.side_effect = nfs.NfsException('aNfsException')
+
+ self.assertRaises(nfs.NfsException, self.create_isosr)
+
+ @mock.patch('util.gen_uuid')
+ @mock.patch('nfs.soft_mount')
+ @mock.patch('util._convertDNS')
+ @mock.patch('nfs.validate_nfsversion')
+ @mock.patch('util.makedirs')
+ @mock.patch('ISOSR.ISOSR._checkmount')
+ def test_attach_nfs(self, _checkmount, makedirs, validate_nfsversion,
+ convertDNS, soft_mount, gen_uuid):
+ validate_nfsversion.return_value = 'aNfsversionChanged'
+ isosr = self.create_isosr(location='aServer:/aLocation', atype='nfs',
+ sr_uuid='asr_uuid')
+ _checkmount.side_effect = [False, True]
+ gen_uuid.return_value = 'aUuid'
+
+ isosr.attach(None)
+
+ soft_mount.assert_called_once_with('/var/run/sr-mount/asr_uuid',
+ 'aServer',
+ '/aLocation',
+ 'tcp',
+ nfsversion='aNfsversionChanged')
diff --git a/tests/test_LVHDSR.py b/tests/test_LVHDSR.py
new file mode 100644
index 00000000..015012cd
--- /dev/null
+++ b/tests/test_LVHDSR.py
@@ -0,0 +1,78 @@
+import unittest
+import mock
+import LVHDSR
+import journaler
+import lvhdutil
+
+
+class SMLog(object):
+ def __call__(self, *args):
+ print args
+
+
+class Stubs(object):
+ def init_stubs(self):
+ self._stubs = []
+
+ def stubout(self, *args, **kwargs):
+ patcher = mock.patch(*args, **kwargs)
+ self._stubs.append(patcher)
+ patcher.start()
+
+ def remove_stubs(self):
+ for patcher in self._stubs:
+ patcher.stop()
+
+
+class TestLVHDSR(unittest.TestCase, Stubs):
+
+ def setUp(self):
+ self.init_stubs()
+
+ def tearDown(self):
+ self.remove_stubs()
+
+ def create_LVHDSR(self):
+ srcmd = mock.Mock()
+ srcmd.dconf = {'device': '/dev/bar'}
+ srcmd.params = {'command': 'foo', 'session_ref': 'some session ref'}
+ return LVHDSR.LVHDSR(srcmd, "some SR UUID")
+
+ @mock.patch('lvhdutil.getVDIInfo')
+ def test_loadvids(self, mock_getVDIInfo):
+ """sr.allVDIs populated by _loadvdis"""
+
+ vdi_uuid = 'some VDI UUID'
+ mock_getVDIInfo.return_value = {vdi_uuid: lvhdutil.VDIInfo(vdi_uuid)}
+ sr = self.create_LVHDSR()
+
+ sr._loadvdis()
+
+ self.assertEquals([vdi_uuid], sr.allVDIs.keys())
+
+ @mock.patch('lvhdutil.lvRefreshOnAllSlaves')
+ @mock.patch('lvhdutil.getVDIInfo')
+ @mock.patch('journaler.Journaler.getAll')
+ def test_undoAllInflateJournals(
+ self,
+ mock_getAll,
+ mock_getVDIInfo,
+ mock_lvhdutil_lvRefreshOnAllSlaves):
+ """No LV refresh on slaves when Cleaning up local LVHD SR's journal"""
+
+ self.stubout('XenAPI.xapi_local')
+ self.stubout('journaler.Journaler.remove')
+ self.stubout('util.zeroOut')
+ self.stubout('lvhdutil.deflate')
+ self.stubout('util.SMlog', new_callable=SMLog)
+ self.stubout('lvmcache.LVMCache')
+
+ vdi_uuid = 'some VDI UUID'
+
+ mock_getAll.return_value = {vdi_uuid: '0'}
+ mock_getVDIInfo.return_value = {vdi_uuid: lvhdutil.VDIInfo(vdi_uuid)}
+
+ sr = self.create_LVHDSR()
+
+ sr._undoAllInflateJournals()
+ self.assertEquals(0, mock_lvhdutil_lvRefreshOnAllSlaves.call_count)
diff --git a/tests/test_NFSSR.py b/tests/test_NFSSR.py
new file mode 100644
index 00000000..49524ff7
--- /dev/null
+++ b/tests/test_NFSSR.py
@@ -0,0 +1,85 @@
+import mock
+import nfs
+import NFSSR
+import unittest
+
+
+class FakeNFSSR(NFSSR.NFSSR):
+ uuid = None
+ sr_ref = None
+ session = None
+ srcmd = None
+
+ def __init__(self, srcmd, none):
+ self.dconf = srcmd.dconf
+ self.srcmd = srcmd
+
+
+class TestNFSSR(unittest.TestCase):
+
+ def create_nfssr(self, server='aServer', serverpath='/aServerpath',
+ sr_uuid='asr_uuid', nfsversion=None):
+ srcmd = mock.Mock()
+ srcmd.dconf = {
+ 'server': server,
+ 'serverpath': serverpath
+ }
+ if nfsversion:
+ srcmd.dconf.update({'nfsversion': nfsversion})
+ srcmd.params = {
+ 'command': 'some_command'
+ }
+ nfssr = FakeNFSSR(srcmd, None)
+ nfssr.load(sr_uuid)
+ return nfssr
+
+ @mock.patch('NFSSR.Lock')
+ def test_load(self, Lock):
+ self.create_nfssr()
+
+ @mock.patch('NFSSR.Lock')
+ @mock.patch('nfs.validate_nfsversion')
+ def test_load_validate_nfsversion_called(self, validate_nfsversion, Lock):
+ nfssr = self.create_nfssr(nfsversion='aNfsversion')
+
+ validate_nfsversion.assert_called_once_with('aNfsversion')
+
+ @mock.patch('NFSSR.Lock')
+ @mock.patch('nfs.validate_nfsversion')
+ def test_load_validate_nfsversion_returnused(self, validate_nfsversion,
+ Lock):
+ validate_nfsversion.return_value = 'aNfsversion'
+
+ self.assertEquals(self.create_nfssr().nfsversion, "aNfsversion")
+
+ @mock.patch('NFSSR.Lock')
+ @mock.patch('nfs.validate_nfsversion')
+ def test_load_validate_nfsversion_exceptionraised(self,
+ validate_nfsversion,
+ Lock):
+ validate_nfsversion.side_effect = nfs.NfsException('aNfsException')
+
+ self.assertRaises(nfs.NfsException, self.create_nfssr)
+
+ @mock.patch('util.makedirs')
+ @mock.patch('NFSSR.Lock')
+ @mock.patch('nfs.soft_mount')
+ @mock.patch('util._testHost')
+ @mock.patch('nfs.check_server_tcp')
+ @mock.patch('nfs.validate_nfsversion')
+ def test_attach(self, validate_nfsversion, check_server_tcp, _testhost,
+ soft_mount, Lock, makedirs):
+ validate_nfsversion.return_value = "aNfsversionChanged"
+ nfssr = self.create_nfssr(server='aServer', serverpath='/aServerpath',
+ sr_uuid='UUID')
+
+ nfssr.attach(None)
+
+ check_server_tcp.assert_called_once_with('aServer',
+ 'aNfsversionChanged')
+ soft_mount.assert_called_once_with('/var/run/sr-mount/UUID',
+ 'aServer',
+ '/aServerpath/UUID',
+ 'tcp',
+ timeout=0,
+ nfsversion='aNfsversionChanged')
diff --git a/tests/test_SRCommand.py b/tests/test_SRCommand.py
new file mode 100644
index 00000000..f0313534
--- /dev/null
+++ b/tests/test_SRCommand.py
@@ -0,0 +1,127 @@
+import unittest
+import mock
+
+import SRCommand
+
+
+class SomeException(Exception):
+ pass
+
+
+class TestStandaloneFunctions(unittest.TestCase):
+
+ @mock.patch('util.SMlog')
+ @mock.patch('__builtin__.reduce')
+ @mock.patch('SRCommand.SRCommand.run_statics')
+ @mock.patch('SRCommand.SRCommand.parse')
+ def test_run_correctly_log_all_exceptions(
+ self,
+ mock_parse,
+ mock_run_statics,
+ mock_reduce,
+ mock_SMlog):
+
+ """ Assert that any arbitrary exception raised and with a big message length is logged to SMlog. Only the first line of the message is asserted (traceback ommited).
+ """
+
+ from random import choice
+ from string import ascii_letters
+ from DummySR import DRIVER_INFO
+
+ MSG_LEN = 2048
+
+ # TestSRCommand data member to hold SMlog output.
+ self.smlog_out = None
+
+ # Generate random exception message of MSG_LEN characters
+ rand_huge_msg = ''.join(choice(ascii_letters) for _ in range(MSG_LEN))
+
+ # Create function to raise exception in SRCommand.run()
+ mock_driver = mock.Mock(side_effect=SomeException(rand_huge_msg))
+
+ # MockSMlog replaces util.SMlog. Instead of printing to
+ # /var/log/SMlog, it writes the output to self.smlog_out.
+ def MockSMlog(str_arg):
+ self.smlog_out = str_arg.strip()
+
+ mock_reduce.return_value = ''
+ mock_SMlog.side_effect = MockSMlog
+
+ try:
+ SRCommand.run(mock_driver, DRIVER_INFO)
+ except SomeException:
+ # SomeException needs to be suppressed for this
+ # test, as it is re-raised after it is logged.
+ pass
+
+ self.assertTrue(rand_huge_msg in self.smlog_out)
+
+ @mock.patch('util.logException')
+ @mock.patch('SRCommand.SRCommand.run_statics')
+ @mock.patch('SRCommand.SRCommand.parse')
+ def test_run_print_xml_error_if_SRException(
+ self,
+ mock_parse,
+ mock_run_statics,
+ mock_logException):
+
+ """ If an SR.SRException is thrown, assert that print .toxml()" is called.
+ """
+
+ import sys
+ from StringIO import StringIO
+ from SR import SRException
+ from DummySR import DRIVER_INFO
+
+ # Save original sys.stdout file object.
+ saved_stdout = sys.stdout
+
+ # Create a mock_stdout object and assign it to sys.stdout
+ mock_stdout = StringIO()
+ sys.stdout = mock_stdout
+
+ # Create function to raise exception in SRCommand.run()
+ mock_driver = mock.Mock(side_effect=SRException(
+ "[UnitTest] SRException thrown"))
+
+ try:
+ SRCommand.run(mock_driver, DRIVER_INFO)
+ except SystemExit:
+ pass
+
+ # Write SRCommand.run() output to variable.
+ actual_out = mock_stdout.getvalue()
+
+ # Restore the original sys.stdout object.
+ sys.stdout = saved_stdout
+
+ expected_out = ("\n\n\n"
+ "\n\nfaultCode\n"
+ "22\n\n\n"
+ "faultString\n[UnitTest] "
+ "SRException thrown\n\n"
+ "\n\n\n\n")
+
+ self.assertEqual(actual_out, expected_out)
+
+ @mock.patch('util.logException')
+ @mock.patch('SRCommand.SRCommand.run_statics')
+ @mock.patch('SRCommand.SRCommand.parse')
+ def test_run_reraise_if_not_SRException(
+ self,
+ mock_parse,
+ mock_run_statics,
+ mock_logException):
+
+ """ If an exception other than SR.SRException is thrown, assert that it is re-raised.
+ """
+
+ from DummySR import DRIVER_INFO
+
+ # Create function to raise exception in SRCommand.run()
+ mock_driver = mock.Mock(side_effect=SomeException)
+
+ try:
+ SRCommand.run(mock_driver, DRIVER_INFO)
+ except Exception, e:
+ self.assertTrue(isinstance(e, SomeException))
diff --git a/tests/test_blktap2.py b/tests/test_blktap2.py
new file mode 100644
index 00000000..45d1598c
--- /dev/null
+++ b/tests/test_blktap2.py
@@ -0,0 +1,27 @@
+import unittest
+import blktap2
+import mock
+
+
+class TestVDI(unittest.TestCase):
+ @mock.patch('blktap2.VDI.TargetDriver')
+ def setUp(self, mock_target):
+ mock_target.get_vdi_type.return_value = 'phy'
+
+ def mock_handles(type_str):
+ return type_str == 'udev'
+
+ mock_target.vdi.sr.handles = mock_handles
+
+ self.vdi = blktap2.VDI('uuid', mock_target, None)
+ self.vdi.target = mock_target
+
+ def test_tap_wanted_returns_true_for_udev_device(self):
+ result = self.vdi.tap_wanted()
+
+ self.assertEquals(True, result)
+
+ def test_get_tap_type_returns_aio_for_udev_device(self):
+ result = self.vdi.get_tap_type()
+
+ self.assertEquals('aio', result)
diff --git a/tests/test_cleanup.py b/tests/test_cleanup.py
new file mode 100644
index 00000000..f35ce36d
--- /dev/null
+++ b/tests/test_cleanup.py
@@ -0,0 +1,141 @@
+import unittest
+import mock
+
+import cleanup
+
+import util
+
+
+class FakeXapi(object):
+ def __init__(self):
+ self.srRecord = {
+ 'name_label': 'dummy'
+ }
+
+ def isPluggedHere(self):
+ return True
+
+ def isMaster(self):
+ return True
+
+
+class AlwaysLockedLock(object):
+ def acquireNoblock(self):
+ return False
+
+
+class AlwaysFreeLock(object):
+ def acquireNoblock(self):
+ return True
+
+
+class IrrelevantLock(object):
+ pass
+
+
+def create_cleanup_sr():
+ xapi = FakeXapi()
+ return cleanup.SR(uuid=None, xapi=xapi, createLock=False, force=False)
+
+
+class TestSR(unittest.TestCase):
+ def setUp(self):
+ self.sleep_patcher = mock.patch('cleanup.time.sleep')
+ self.sleep_patcher.start()
+
+ def tearDown(self):
+ self.sleep_patcher.stop()
+
+ def setup_abort_flag(self, ipc_mock, should_abort=False):
+ flag = mock.Mock()
+ flag.test = mock.Mock(return_value=should_abort)
+
+ ipc_mock.return_value = flag
+
+ def test_lock_if_already_locked(self):
+ """
+ Given an already locked SR, a lock call increments the lock counter
+ """
+
+ sr = create_cleanup_sr()
+ sr._srLock = IrrelevantLock()
+ sr._locked = 1
+
+ sr.lock()
+
+ self.assertEquals(2, sr._locked)
+
+ def test_lock_if_no_locking_is_used(self):
+ """
+ Given no srLock present, the lock operations don't touch the counter
+ """
+
+ sr = create_cleanup_sr()
+ sr._srLock = None
+
+ sr.lock()
+
+ self.assertEquals(0, sr._locked)
+
+ @mock.patch('cleanup.IPCFlag')
+ def test_lock_succeeds_if_lock_is_acquired(
+ self,
+ mock_ipc_flag):
+ """
+ After performing a lock, the counter equals to 1
+ """
+
+ self.setup_abort_flag(mock_ipc_flag)
+ sr = create_cleanup_sr()
+ sr._srLock = AlwaysFreeLock()
+
+ sr.lock()
+
+ self.assertEquals(1, sr._locked)
+
+ @mock.patch('cleanup.IPCFlag')
+ def test_lock_raises_exception_if_abort_requested(
+ self,
+ mock_ipc_flag):
+ """
+ If IPC abort was requested, lock raises AbortException
+ """
+
+ self.setup_abort_flag(mock_ipc_flag, should_abort=True)
+ sr = create_cleanup_sr()
+ sr._srLock = AlwaysLockedLock()
+
+ self.assertRaises(cleanup.AbortException, sr.lock)
+
+ @mock.patch('cleanup.IPCFlag')
+ def test_lock_raises_exception_if_unable_to_acquire_lock(
+ self,
+ mock_ipc_flag):
+ """
+ If the lock is busy, SMException is raised
+ """
+
+ self.setup_abort_flag(mock_ipc_flag)
+ sr = create_cleanup_sr()
+ sr._srLock = AlwaysLockedLock()
+
+ self.assertRaises(util.SMException, sr.lock)
+
+ @mock.patch('cleanup.IPCFlag')
+ def test_lock_leaves_sr_consistent_if_unable_to_acquire_lock(
+ self,
+ mock_ipc_flag):
+ """
+ If the lock is busy, the lock counter is not incremented
+ """
+
+ self.setup_abort_flag(mock_ipc_flag)
+ sr = create_cleanup_sr()
+ sr._srLock = AlwaysLockedLock()
+
+ try:
+ sr.lock()
+ except:
+ pass
+
+ self.assertEquals(0, sr._locked)
diff --git a/tests/test_devscan.py b/tests/test_devscan.py
index 49504e12..b7786b96 100644
--- a/tests/test_devscan.py
+++ b/tests/test_devscan.py
@@ -45,7 +45,7 @@ def test_scanning_empty_sr(self, context):
@testlib.with_context
def test_scanning_sr_with_devices(self, context):
sr = create_hba_sr()
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
adapter.add_disk()
sr._init_hbadict()
@@ -66,7 +66,7 @@ def test_scanning_sr_with_devices(self, context):
@testlib.with_context
def test_scanning_sr_includes_parameters(self, context):
sr = create_hba_sr()
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
adapter.add_disk()
sr._init_hbadict()
adapter.add_parameter('fc_host', dict(port_name='VALUE'))
@@ -96,9 +96,27 @@ def test_no_adapters(self, context):
self.assertEquals({'devs': {}, 'adt': {}}, result)
+ @mock.patch('devscan.match_hbadevs')
+ @testlib.with_context
+ def test_exotic_adapter_with_security_device(self, context, match_hbadevs):
+ adapter = context.add_adapter(testlib.AdapterWithNonBlockDevice())
+ adapter.add_disk()
+
+ match_hbadevs.return_value = 'lpfc'
+ result = devscan.adapters()
+
+ self.assertEquals(
+ {
+ 'devs': {},
+ 'adt': {
+ 'host0': 'lpfc'
+ }
+ },
+ result)
+
@testlib.with_context
def test_adapter_and_disk_added(self, context):
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
adapter.add_disk()
result = devscan.adapters()
@@ -117,3 +135,51 @@ def test_adapter_and_disk_added(self, context):
}
},
result)
+
+
+class TestExtractDevName(unittest.TestCase):
+ @testlib.with_context
+ def test_26_kernel(self, context):
+ context.kernel_version = '2.6'
+ context.fake_makedirs('/somepath/block:sde')
+ result = devscan._extract_dev_name('/somepath')
+
+ self.assertEquals('sde', result)
+
+ @testlib.with_context
+ def test_3x_kernel(self, context):
+ context.kernel_version = '3.2'
+ context.fake_makedirs('/somepath/block/sde')
+ result = devscan._extract_dev_name('/somepath')
+
+ self.assertEquals('sde', result)
+
+ @testlib.with_context
+ def test_extract_dev_name_from_directory_without_block_device(
+ self,
+ context):
+ context.kernel_version = '3.10'
+
+ result = devscan._extract_dev_name('/nonexisting')
+
+ self.assertEquals(devscan.INVALID_DEVICE_NAME, result)
+
+
+class TestUpdateDevsDict(unittest.TestCase):
+ def test_whencalled_updates_dict(self):
+ devices = {}
+ dev = 'dev'
+ entry = 'entry'
+
+ devscan.update_devs_dict(devices, dev, entry)
+
+ self.assertEquals({'dev': 'entry'}, devices)
+
+ def test_whencalled_with_empty_key_does_not_update_dict(self):
+ devices = {}
+ dev = devscan.INVALID_DEVICE_NAME
+ entry = 'entry'
+
+ devscan.update_devs_dict(devices, dev, entry)
+
+ self.assertEquals({}, devices)
diff --git a/tests/test_lock.py b/tests/test_lock.py
new file mode 100644
index 00000000..7e065687
--- /dev/null
+++ b/tests/test_lock.py
@@ -0,0 +1,73 @@
+import unittest
+import mock
+import os
+import gc
+import errno
+
+import testlib
+
+import lock
+
+
+class FailingOpenContext(testlib.TestContext):
+ def fake_open(self, fname, mode='r'):
+ raise IOError()
+
+
+class TestLock(unittest.TestCase):
+ @testlib.with_context
+ def test_lock_without_namespace_creates_nil_namespace(self, context):
+ lck = lock.Lock('somename')
+
+ self.assertTrue(
+ os.path.exists(
+ os.path.join(lck.BASE_DIR, '.nil')))
+
+ @testlib.with_context
+ def test_lock_with_namespace_creates_namespace(self, context):
+ lck = lock.Lock('somename', ns='namespace')
+
+ self.assertTrue(
+ os.path.exists(
+ os.path.join(lck.BASE_DIR, 'namespace')))
+
+ @testlib.with_context
+ def test_lock_without_namespace_creates_file(self, context):
+ lck = lock.Lock('somename')
+
+ self.assertTrue(
+ os.path.exists(
+ os.path.join(lck.BASE_DIR, '.nil', 'somename')))
+
+ @testlib.with_context
+ def test_lock_with_namespace_creates_file(self, context):
+ lck = lock.Lock('somename', ns='namespace')
+
+ self.assertTrue(
+ os.path.exists(
+ os.path.join(lck.BASE_DIR, 'namespace', 'somename')))
+
+ @testlib.with_context
+ def test_lock_file_create_fails_retried(self, context):
+ Lock = create_lock_class_that_fails_to_create_file(1)
+ lck = Lock('somename', ns='namespace')
+
+ self.assertTrue(
+ os.path.exists(
+ os.path.join(lck.BASE_DIR, 'namespace', 'somename')))
+
+
+def create_lock_class_that_fails_to_create_file(number_of_failures):
+
+ class LockThatFailsToCreateFile(lock.Lock):
+ _failures = number_of_failures
+
+ def _open_lockfile(self):
+ if self._failures > 0:
+ error = IOError('No such file')
+ error.errno = errno.ENOENT
+ self._failures -= 1
+ raise error
+ return lock.Lock._open_lockfile(self)
+
+ return LockThatFailsToCreateFile
diff --git a/tests/test_lvmlib.py b/tests/test_lvmlib.py
new file mode 100644
index 00000000..36b783e9
--- /dev/null
+++ b/tests/test_lvmlib.py
@@ -0,0 +1,161 @@
+import unittest
+import mock
+
+import lvmlib
+
+
+class ExecResultMixIn(object):
+ def assertExecutionSucceeded(self, exec_result):
+ returncode, stdout, stderr = exec_result
+
+ self.assertEquals(0, returncode)
+
+ def assertExecutionFailed(self, exec_result):
+ returncode, stdout, stderr = exec_result
+
+ self.assertEquals(1, returncode)
+
+
+class TestLVSubSystem(unittest.TestCase, ExecResultMixIn):
+ def test_lvcreate_is_mocked(self):
+ executable_injector = mock.Mock()
+
+ lvsubsystem = lvmlib.LVSubsystem(None, executable_injector)
+
+ self.assertTrue(
+ mock.call('/usr/sbin/lvcreate', lvsubsystem.fake_lvcreate)
+ in executable_injector.mock_calls
+ )
+
+ def test_lvremove_is_mocked(self):
+ executable_injector = mock.Mock()
+
+ lvsubsystem = lvmlib.LVSubsystem(None, executable_injector)
+
+ self.assertTrue(
+ mock.call('/usr/sbin/lvremove', lvsubsystem.fake_lvremove)
+ in executable_injector.mock_calls
+ )
+
+ def test_dmsetup_is_mocked(self):
+ executable_injector = mock.Mock()
+
+ lvsubsystem = lvmlib.LVSubsystem(None, executable_injector)
+
+ self.assertTrue(
+ mock.call('/sbin/dmsetup', lvsubsystem.fake_dmsetup)
+ in executable_injector.mock_calls
+ )
+
+ def test_add_volume_group(self):
+ lvsubsystem = lvmlib.LVSubsystem(None, mock.Mock())
+
+ lvsubsystem.add_volume_group('vg')
+ vg = lvsubsystem.get_volume_group('vg')
+
+ self.assertEquals('vg', vg.name)
+
+ def test_fake_lvcreate_creates_volume(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+ vg = lvsubsystem.add_volume_group('vg')
+
+ exec_result = lvsubsystem.fake_lvcreate(
+ "someprog -n name -L 100 vg".split(), '')
+
+ lv, = lvsubsystem.get_logical_volumes_with_name('name')
+
+ self.assertEquals('name', lv.name)
+ self.assertEquals(lvsubsystem.get_volume_group('vg'), lv.volume_group)
+ self.assertTrue(lv.active)
+ self.assertTrue(lv.zeroed)
+ self.assertEquals(None, lv.tag)
+ self.assertEquals(100, lv.size_mb)
+
+ def test_fake_lvcreate_with_tags(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+ lvsubsystem.add_volume_group('vg')
+
+ exec_result = lvsubsystem.fake_lvcreate(
+ "someprog -n name --addtag tagg -L 100 vg".split(), '')
+
+ lv, = lvsubsystem.get_logical_volumes_with_name('name')
+ self.assertEquals('tagg', lv.tag)
+
+ def test_fake_lvcreate_inactive(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+ lvsubsystem.add_volume_group('vg')
+
+ exec_result = lvsubsystem.fake_lvcreate(
+ "someprog -n name --inactive -L 100 vg".split(), '')
+
+ lv, = lvsubsystem.get_logical_volumes_with_name('name')
+ self.assertFalse(lv.active)
+
+ def test_fake_lvcreate_non_zeroed(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+ lvsubsystem.add_volume_group('vg')
+
+ exec_result = lvsubsystem.fake_lvcreate(
+ "someprog -n name --zero n -L 100 vg".split(), '')
+
+ lv, = lvsubsystem.get_logical_volumes_with_name('name')
+
+ self.assertFalse(lv.zeroed)
+ self.assertExecutionSucceeded(exec_result)
+
+ def test_fake_lvcreate_called_with_wrong_params(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+ lvsubsystem.add_volume_group('vg')
+
+ exec_result = lvsubsystem.fake_lvcreate(
+ "someprog --something-stupid -n name n -L 100 vg".split(), '')
+
+ self.assertExecutionFailed(exec_result)
+
+ def test_fake_lvcreate_fails_if_no_volume_group_found(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+
+ exec_result = lvsubsystem.fake_lvcreate(
+ "someprog -n name -L 100 nonexisting".split(), '')
+
+ self.assertExecutionFailed(exec_result)
+
+ def test_fake_lvremove(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+ lvsubsystem.add_volume_group('vg')
+ lvsubsystem.get_volume_group('vg').add_volume('lv', 100)
+
+ exec_result = lvsubsystem.fake_lvremove(
+ "someprog vg/lv".split(), '')
+
+ self.assertExecutionSucceeded(exec_result)
+
+ def test_fake_lvremove_with_force(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+ lvsubsystem.add_volume_group('vg')
+ lvsubsystem.get_volume_group('vg').add_volume('lv', 100)
+
+ exec_result = lvsubsystem.fake_lvremove(
+ "someprog -f vg/lv".split(), '')
+
+ self.assertExecutionSucceeded(exec_result)
+
+ def test_fake_lvremove_with_bad_params(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+ lvsubsystem.add_volume_group('vg')
+ lvsubsystem.get_volume_group('vg').add_volume('lv', 100)
+
+ exec_result = lvsubsystem.fake_lvremove(
+ "someprog -f vg/lv --stupid-parameter".split(), '')
+
+ self.assertExecutionFailed(exec_result)
+
+ def test_fake_dmsetup_status_returns_zero(self):
+ lvsubsystem = lvmlib.LVSubsystem(mock.Mock(), mock.Mock())
+
+ exec_result = lvsubsystem.fake_dmsetup(
+ "someprog status".split(), '')
+
+ self.assertExecutionSucceeded(exec_result)
+
+
diff --git a/tests/test_lvutil.py b/tests/test_lvutil.py
new file mode 100644
index 00000000..ab3e4898
--- /dev/null
+++ b/tests/test_lvutil.py
@@ -0,0 +1,107 @@
+import unittest
+import testlib
+import lvmlib
+import mock
+
+import lvutil
+
+
+ONE_MEGABYTE=1*1024*1024
+
+
+def with_lvm_subsystem(func):
+ @testlib.with_context
+ def decorated(self, context, *args, **kwargs):
+ lvsystem = lvmlib.LVSubsystem(context.log, context.add_executable)
+ return func(self, lvsystem, *args, **kwargs)
+
+ decorated.__name__ = func.__name__
+ return decorated
+
+
+class TestCreate(unittest.TestCase):
+ @with_lvm_subsystem
+ def test_create_volume_size(self, lvsystem):
+ lvsystem.add_volume_group('vgroup')
+
+ lvutil.create('volume', 100 * ONE_MEGABYTE, 'vgroup')
+
+ created_lv, = lvsystem.get_logical_volumes_with_name('volume')
+
+ self.assertEquals(100, created_lv.size_mb)
+
+ @with_lvm_subsystem
+ def test_create_volume_is_in_the_right_volume_group(self, lvsystem):
+ lvsystem.add_volume_group('vgroup')
+
+ lvutil.create('volume', 100 * ONE_MEGABYTE, 'vgroup')
+
+ created_lv, = lvsystem.get_logical_volumes_with_name('volume')
+
+ self.assertEquals(100, created_lv.size_mb)
+
+ self.assertEquals('vgroup', created_lv.volume_group.name)
+ self.assertTrue(created_lv.active)
+ self.assertTrue(created_lv.zeroed)
+
+ @with_lvm_subsystem
+ def test_create_volume_is_active(self, lvsystem):
+ lvsystem.add_volume_group('vgroup')
+
+ lvutil.create('volume', 100 * ONE_MEGABYTE, 'vgroup')
+
+ created_lv, = lvsystem.get_logical_volumes_with_name('volume')
+
+ self.assertEquals(100, created_lv.size_mb)
+
+ self.assertTrue(created_lv.active)
+ self.assertTrue(created_lv.zeroed)
+
+ @with_lvm_subsystem
+ def test_create_volume_is_zeroed(self, lvsystem):
+ lvsystem.add_volume_group('vgroup')
+
+ lvutil.create('volume', 100 * ONE_MEGABYTE, 'vgroup')
+
+ created_lv, = lvsystem.get_logical_volumes_with_name('volume')
+
+ self.assertEquals(100, created_lv.size_mb)
+
+ self.assertTrue(created_lv.zeroed)
+
+ @with_lvm_subsystem
+ def test_create_creates_logical_volume_with_tags(self, lvsystem):
+ lvsystem.add_volume_group('vgroup')
+
+ lvutil.create('volume', ONE_MEGABYTE, 'vgroup', tag='hello')
+
+ created_lv, = lvsystem.get_logical_volumes_with_name('volume')
+ self.assertEquals('hello', created_lv.tag)
+
+ @mock.patch('util.pread2')
+ def test_create_percentage_has_precedence_over_size(self, mock_pread2):
+ lvutil.create('volume', ONE_MEGABYTE, 'vgroup',
+ size_in_percentage="10%F")
+
+ mock_pread2.assert_called_once_with(
+ [lvutil.CMD_LVCREATE] + "-n volume -l 10%F vgroup".split())
+
+
+class TestRemove(unittest.TestCase):
+ @with_lvm_subsystem
+ def test_remove_removes_volume(self, lvsystem):
+ lvsystem.add_volume_group('vgroup')
+ lvsystem.get_volume_group('vgroup').add_volume('volume', 100)
+
+ lvutil.remove('vgroup/volume')
+
+ self.assertEquals([], lvsystem.get_logical_volumes_with_name('volume'))
+
+ @mock.patch('lvutil._lvmBugCleanup')
+ @mock.patch('util.pread2')
+ def test_remove_additional_config_param(self, mock_pread2, _bugCleanup):
+ lvutil.remove('vgroup/volume', config_param="blah")
+ mock_pread2.assert_called_once_with(
+ [lvutil.CMD_LVREMOVE]
+ + "-f vgroup/volume --config devices{blah}".split()
+ )
diff --git a/tests/test_nfs.py b/tests/test_nfs.py
new file mode 100644
index 00000000..ac683f98
--- /dev/null
+++ b/tests/test_nfs.py
@@ -0,0 +1,64 @@
+import unittest
+import nfs
+import mock
+import sys
+
+
+class Test_nfs(unittest.TestCase):
+
+ @mock.patch('util.pread')
+ def test_check_server_tcp(self, pread):
+ nfs.check_server_tcp('aServer')
+
+ pread.assert_called_once_with(['/usr/sbin/rpcinfo', '-t', 'aServer',
+ 'nfs', '3'])
+
+ @mock.patch('util.pread')
+ def test_check_server_tcp_nfsversion(self, pread):
+ nfs.check_server_tcp('aServer', 'aNfsversion')
+
+ pread.assert_called_once_with(['/usr/sbin/rpcinfo', '-t', 'aServer',
+ 'nfs', 'aNfsversion'])
+
+ def get_soft_mount_pread(self, binary):
+ return ([binary, 'remoteserver:remotepath', 'mountpoint', '-o',
+ 'soft,timeo=600,retrans=2147483647,proto=transport,acdirmin=0'
+ ',acdirmax=0'])
+
+ @mock.patch('util.makedirs')
+ @mock.patch('util.pread')
+ def test_soft_mount(self, pread, makedirs):
+ nfs.soft_mount('mountpoint', 'remoteserver', 'remotepath', 'transport',
+ timeout=0)
+
+ pread.assert_called_once_with(self.get_soft_mount_pread('mount.nfs'))
+
+ @mock.patch('util.makedirs')
+ @mock.patch('util.pread')
+ def test_soft_mount_nfsversion_3(self, pread, makedirs):
+ nfs.soft_mount('mountpoint', 'remoteserver', 'remotepath', 'transport',
+ timeout=0, nfsversion='3')
+
+ pread.assert_called_once_with(self.get_soft_mount_pread('mount.nfs'))
+
+ @mock.patch('util.makedirs')
+ @mock.patch('util.pread')
+ def test_soft_mount_nfsversion_4(self, pread, makedirs):
+ nfs.soft_mount('mountpoint', 'remoteserver', 'remotepath', 'transport',
+ timeout=0, nfsversion='4')
+
+ pread.assert_called_once_with(self.get_soft_mount_pread('mount.nfs4'))
+
+ def test_validate_nfsversion_invalid(self):
+ for thenfsversion in ['2', '4.1']:
+ self.assertRaises(nfs.NfsException, nfs.validate_nfsversion,
+ thenfsversion)
+
+ def test_validate_nfsversion_default(self):
+ for thenfsversion in ['', None]:
+ self.assertEquals(nfs.validate_nfsversion(thenfsversion), '3')
+
+ def test_validate_nfsversion_valid(self):
+ for thenfsversion in ['3', '4']:
+ self.assertEquals(nfs.validate_nfsversion(thenfsversion),
+ thenfsversion)
diff --git a/tests/test_refcounter.py b/tests/test_refcounter.py
new file mode 100644
index 00000000..c52b0cf7
--- /dev/null
+++ b/tests/test_refcounter.py
@@ -0,0 +1,105 @@
+import unittest
+import testlib
+import os
+import mock
+import errno
+
+import refcounter
+
+
+class TestRefCounter(unittest.TestCase):
+ @testlib.with_context
+ def test_get_whencalled_creates_namespace(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ refcounter.RefCounter.get('not-important', False, 'somenamespace')
+
+ self.assertEquals(
+ ['somenamespace'],
+ os.listdir(os.path.join(refcounter.RefCounter.BASE_DIR)))
+
+ @testlib.with_context
+ def test_get_whencalled_returns_counters(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ result = refcounter.RefCounter.get(
+ 'not-important', False, 'somenamespace')
+
+ self.assertEquals(1, result)
+
+ @testlib.with_context
+ def test_get_whencalled_creates_refcounter_file(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ refcounter.RefCounter.get('someobject', False, 'somenamespace')
+
+ self.assertEquals(
+ ['someobject'],
+ os.listdir(os.path.join(
+ refcounter.RefCounter.BASE_DIR, 'somenamespace')))
+
+ @testlib.with_context
+ def test_get_whencalled_refcounter_file_contents(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ refcounter.RefCounter.get('someobject', False, 'somenamespace')
+
+ path_to_refcounter = os.path.join(
+ refcounter.RefCounter.BASE_DIR, 'somenamespace', 'someobject')
+
+ refcounter_file = open(path_to_refcounter, 'r')
+ contents = refcounter_file.read()
+ refcounter_file.close()
+
+ self.assertEquals('1 0\n', contents)
+
+ @testlib.with_context
+ def test_put_is_noop_if_already_zero(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ result = refcounter.RefCounter.put(
+ 'someobject', False, 'somenamespace')
+
+ self.assertEquals(0, result)
+
+ @testlib.with_context
+ def test_writeCount_returns_true_if_file_found(self, context):
+ os.makedirs('/existing')
+
+ result = refcounter.RefCounter._writeCount('/existing/file', 1, 1)
+
+ self.assertTrue(result)
+
+ @testlib.with_context
+ def test_writeCount_returns_false_if_file_not_found(self, context):
+ result = refcounter.RefCounter._writeCount('/nonexisting/file', 1, 1)
+
+ self.assertFalse(result)
+
+ @mock.patch('os.rmdir')
+ @mock.patch('os.unlink')
+ @mock.patch('util.pathexists')
+ def test_removeObject_ignores_if_directory_already_removed(self,
+ pathexists,
+ unlink,
+ rmdir):
+ rmdir.side_effect = OSError(errno.ENOENT, 'ignored')
+
+ refcounter.RefCounter._removeObject('namespace', 'obj')
+
+ rmdir.assert_called_once_with(
+ os.path.join(refcounter.RefCounter.BASE_DIR, 'namespace'))
+
+ @mock.patch('os.rmdir')
+ @mock.patch('os.unlink')
+ @mock.patch('util.pathexists')
+ def test_removeObject_ignores_if_directory_not_empty(self,
+ pathexists,
+ unlink,
+ rmdir):
+ rmdir.side_effect = OSError(errno.ENOTEMPTY, 'ignored')
+
+ refcounter.RefCounter._removeObject('namespace', 'obj')
+
+ rmdir.assert_called_once_with(
+ os.path.join(refcounter.RefCounter.BASE_DIR, 'namespace'))
diff --git a/tests/test_refcouter.py b/tests/test_refcouter.py
new file mode 100644
index 00000000..396efb0b
--- /dev/null
+++ b/tests/test_refcouter.py
@@ -0,0 +1,60 @@
+import unittest
+import testlib
+import os
+
+import refcounter
+
+
+class TestRefCounter(unittest.TestCase):
+ @testlib.with_context
+ def test_get_whencalled_creates_namespace(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ refcounter.RefCounter.get('not-important', False, 'somenamespace')
+
+ self.assertEquals(
+ ['somenamespace'],
+ os.listdir(os.path.join(refcounter.RefCounter.BASE_DIR)))
+
+ @testlib.with_context
+ def test_get_whencalled_returns_counters(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ result = refcounter.RefCounter.get('not-important', False, 'somenamespace')
+
+ self.assertEquals(1, result)
+
+ @testlib.with_context
+ def test_get_whencalled_creates_refcounter_file(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ refcounter.RefCounter.get('someobject', False, 'somenamespace')
+
+ self.assertEquals(
+ ['someobject'],
+ os.listdir(os.path.join(
+ refcounter.RefCounter.BASE_DIR, 'somenamespace')))
+
+ @testlib.with_context
+ def test_get_whencalled_refcounter_file_contents(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ refcounter.RefCounter.get('someobject', False, 'somenamespace')
+
+ path_to_refcounter = os.path.join(
+ refcounter.RefCounter.BASE_DIR, 'somenamespace', 'someobject')
+
+ refcounter_file = open(path_to_refcounter, 'r')
+ contents = refcounter_file.read()
+ refcounter_file.close()
+
+ self.assertEquals('1 0\n', contents)
+
+ @testlib.with_context
+ def test_put_is_noop_if_already_zero(self, context):
+ os.makedirs(refcounter.RefCounter.BASE_DIR)
+
+ result = refcounter.RefCounter.put(
+ 'someobject', False, 'somenamespace')
+
+ self.assertEquals(0, result)
diff --git a/tests/test_testlib.py b/tests/test_testlib.py
index 881bd6b3..e562ce28 100644
--- a/tests/test_testlib.py
+++ b/tests/test_testlib.py
@@ -1,6 +1,7 @@
import unittest
import os
import mock
+import errno
import testlib
@@ -14,14 +15,14 @@ def test_generate_inventory_file(self):
@testlib.with_context
def test_adapter_adds_scsi_host_entry(self, context):
- context.adapter()
+ context.add_adapter(testlib.SCSIAdapter())
self.assertEquals(['host0'], os.listdir('/sys/class/scsi_host'))
@testlib.with_context
def test_add_disk_adds_scsi_disk_entry(self, context):
import glob
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
adapter.add_disk()
self.assertEquals(
@@ -31,7 +32,7 @@ def test_add_disk_adds_scsi_disk_entry(self, context):
@testlib.with_context
def test_add_disk_adds_scsibus_entry(self, context):
import glob
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
adapter.long_id = 'HELLO'
adapter.add_disk()
@@ -41,7 +42,7 @@ def test_add_disk_adds_scsibus_entry(self, context):
@testlib.with_context
def test_add_disk_adds_device(self, context):
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
adapter.add_disk()
self.assertEquals(
@@ -50,7 +51,7 @@ def test_add_disk_adds_device(self, context):
@testlib.with_context
def test_add_disk_adds_disk_by_id_entry(self, context):
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
disk = adapter.add_disk()
disk.long_id = 'SOMEID'
@@ -59,21 +60,21 @@ def test_add_disk_adds_disk_by_id_entry(self, context):
@testlib.with_context
def test_add_disk_adds_glob(self, context):
import glob
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
disk = adapter.add_disk()
self.assertEquals(['/dev/disk/by-id'], glob.glob('/dev/disk/by-id'))
@testlib.with_context
def test_add_disk_path_exists(self, context):
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
disk = adapter.add_disk()
self.assertTrue(os.path.exists('/dev/disk/by-id'))
@testlib.with_context
def test_add_parameter_parameter_file_exists(self, context):
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
disk = adapter.add_disk()
adapter.add_parameter('fc_host', {'node_name': 'ignored'})
@@ -81,7 +82,7 @@ def test_add_parameter_parameter_file_exists(self, context):
@testlib.with_context
def test_add_parameter_parameter_file_contents(self, context):
- adapter = context.adapter()
+ adapter = context.add_adapter(testlib.SCSIAdapter())
disk = adapter.add_disk()
adapter.add_parameter('fc_host', {'node_name': 'value'})
@@ -134,6 +135,18 @@ def test_exists_returns_false_for_non_existing(self, context):
def test_exists_returns_true_for_root(self, context):
self.assertTrue(os.path.exists('/'))
+ @testlib.with_context
+ def test_stat_nonexistent_file_throws_oserror(self, context):
+ self.assertRaises(
+ OSError,
+ lambda: os.stat('/nonexistingstuff'))
+
+ @testlib.with_context
+ def test_stat_does_not_fail_with_existing_file(self, context):
+ os.makedirs('/existingstuff')
+
+ os.stat('/existingstuff')
+
@testlib.with_context
def test_error_codes_read(self, context):
context.setup_error_codes()
@@ -225,6 +238,16 @@ def test_write_a_file(self, context):
('/blah/subdir/somefile', 'hello')
in list(context.generate_path_content()))
+ @testlib.with_context
+ def test_write_a_file_in_non_existing_dir(self, context):
+ import os
+
+ try:
+ open('/blah/subdir/somefile', 'w')
+ raise AssertionError('No exception raised')
+ except IOError, e:
+ self.assertEquals(errno.ENOENT, e.errno)
+
@testlib.with_context
def test_file_returns_an_object_with_fileno_callable(self, context):
f = file('/file', 'w+')
@@ -314,6 +337,39 @@ def somefunction(firstparam, context):
self.assertEquals(original_open, os.open)
+ @testlib.with_context
+ def test_rmdir_is_replaced_with_a_fake(self, context):
+ self.assertEquals(context.fake_rmdir, os.rmdir)
+
+ def test_rmdir_raises_error_if_dir_not_found(self):
+ context = testlib.TestContext()
+
+ try:
+ context.fake_rmdir('nonexisting')
+ raise AssertionError('No Exception raised')
+ except OSError, e:
+ self.assertEquals(errno.ENOENT, e.errno)
+
+ def test_rmdir_removes_dir_if_found(self):
+ context = testlib.TestContext()
+
+ context.fake_makedirs('/existing_dir')
+
+ context.fake_rmdir('/existing_dir')
+
+ self.assertFalse(context.fake_exists('/existing_dir'))
+
+ def test_rmdir_raises_exception_if_dir_is_not_empty(self):
+ context = testlib.TestContext()
+
+ context.fake_makedirs('/existing_dir/somefile')
+
+ try:
+ context.fake_rmdir('/existing_dir')
+ raise AssertionError('No Exception raised')
+ except OSError, e:
+ self.assertEquals(errno.ENOTEMPTY, e.errno)
+
class TestFilesystemFor(unittest.TestCase):
def test_returns_single_item_for_root(self):
diff --git a/tests/test_trim_util.py b/tests/test_trim_util.py
new file mode 100644
index 00000000..000d6149
--- /dev/null
+++ b/tests/test_trim_util.py
@@ -0,0 +1,289 @@
+import unittest
+import trim_util
+import testlib
+
+import mock
+
+
+class AlwaysBusyLock(object):
+ def acquireNoblock(self):
+ return False
+
+
+class AlwaysFreeLock(object):
+ def __init__(self):
+ self.acquired = False
+
+ def acquireNoblock(self):
+ self.acquired = True
+ return True
+
+ def release(self):
+ self.acquired = False
+
+
+class TestTrimUtil(unittest.TestCase, testlib.XmlMixIn):
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_error_code_trim_not_supported(self,
+ context,
+ sr_get_capability):
+ sr_get_capability.return_value = []
+ context.setup_error_codes()
+
+ result = trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ self.assertXML("""
+
+
+
+ errcode
+ UnsupportedSRForTrim
+
+
+ errmsg
+ Trim on [some-uuid] not supported
+
+
+ """, result)
+
+ @mock.patch('time.sleep')
+ @mock.patch('lock.Lock')
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_unable_to_obtain_lock_on_sr(self,
+ context,
+ sr_get_capability,
+ MockLock,
+ sleep):
+ MockLock.return_value = AlwaysBusyLock()
+ sr_get_capability.return_value = [trim_util.TRIM_CAP]
+ context.setup_error_codes()
+
+ result = trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ self.assertXML("""
+
+
+
+ errcode
+ SRUnavailable
+
+
+ errmsg
+ Unable to get SR lock [some-uuid]
+
+
+ """, result)
+
+ @mock.patch('time.sleep')
+ @mock.patch('lock.Lock')
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_sleeps_a_sec_and_retries_three_times(self,
+ context,
+ sr_get_capability,
+ MockLock,
+ sleep):
+ MockLock.return_value = AlwaysBusyLock()
+ sr_get_capability.return_value = [trim_util.TRIM_CAP]
+ context.setup_error_codes()
+
+ trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ self.assertEquals([
+ mock.call(1),
+ mock.call(1),
+ mock.call(1)
+ ],
+ sleep.mock_calls
+ )
+
+ @mock.patch('trim_util.lvutil')
+ @mock.patch('lock.Lock')
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_creates_an_lv(self,
+ context,
+ sr_get_capability,
+ MockLock,
+ lvutil):
+ MockLock.return_value = AlwaysFreeLock()
+ sr_get_capability.return_value = [trim_util.TRIM_CAP]
+ context.setup_error_codes()
+
+ trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ lvutil.create.assert_called_once_with(
+ 'some-uuid_trim_lv', 0, 'VG_XenStorage-some-uuid',
+ size_in_percentage='100%F'
+ )
+
+ @mock.patch('trim_util.lvutil')
+ @mock.patch('lock.Lock')
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_removes_lv_no_leftover_trim_vol(self,
+ context,
+ sr_get_capability,
+ MockLock,
+ lvutil):
+ lvutil.exists.return_value = False
+ MockLock.return_value = AlwaysFreeLock()
+ sr_get_capability.return_value = [trim_util.TRIM_CAP]
+ context.setup_error_codes()
+
+ trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ lvutil.remove.assert_called_once_with(
+ '/dev/VG_XenStorage-some-uuid/some-uuid_trim_lv',
+ config_param='issue_discards=1')
+
+ @mock.patch('trim_util.lvutil')
+ @mock.patch('lock.Lock')
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_releases_lock(self,
+ context,
+ sr_get_capability,
+ MockLock,
+ lvutil):
+ lvutil.exists.return_value = False
+ sr_lock = MockLock.return_value = AlwaysFreeLock()
+ sr_get_capability.return_value = [trim_util.TRIM_CAP]
+ context.setup_error_codes()
+
+ trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ self.assertFalse(sr_lock.acquired)
+
+ @mock.patch('trim_util.lvutil')
+ @mock.patch('lock.Lock')
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_removes_lv_with_leftover_trim_vol(self,
+ context,
+ sr_get_capability,
+ MockLock,
+ lvutil):
+ lvutil.exists.return_value = True
+ MockLock.return_value = AlwaysFreeLock()
+ sr_get_capability.return_value = [trim_util.TRIM_CAP]
+ context.setup_error_codes()
+
+ trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ self.assertEquals([
+ mock.call('/dev/VG_XenStorage-some-uuid/some-uuid_trim_lv'),
+ mock.call(
+ '/dev/VG_XenStorage-some-uuid/some-uuid_trim_lv',
+ config_param='issue_discards=1')
+ ], lvutil.remove.mock_calls)
+
+ @mock.patch('trim_util.lvutil')
+ @mock.patch('lock.Lock')
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_lock_released_even_if_exception_raised(self,
+ context,
+ sr_get_capability,
+ MockLock,
+ lvutil):
+ lvutil.exists.side_effect = Exception('blah')
+ srlock = AlwaysFreeLock()
+ MockLock.return_value = srlock
+ sr_get_capability.return_value = [trim_util.TRIM_CAP]
+ context.setup_error_codes()
+
+ trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ self.assertFalse(srlock.acquired)
+
+ @mock.patch('trim_util.lvutil')
+ @mock.patch('lock.Lock')
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_when_exception_then_returns_generic_err(self,
+ context,
+ sr_get_capability,
+ MockLock,
+ lvutil):
+ lvutil.exists.side_effect = Exception('blah')
+ srlock = AlwaysFreeLock()
+ MockLock.return_value = srlock
+ sr_get_capability.return_value = [trim_util.TRIM_CAP]
+ context.setup_error_codes()
+
+ result = trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ self.assertXML("""
+
+
+
+ errcode
+ UnknownTrimException
+
+
+ errmsg
+ Unknown Exception: trim failed on SR [some-uuid]
+
+
+ """, result)
+
+ @mock.patch('trim_util.lvutil')
+ @mock.patch('lock.Lock')
+ @mock.patch('util.sr_get_capability')
+ @testlib.with_context
+ def test_do_trim_when_trim_succeeded_returns_true(self,
+ context,
+ sr_get_capability,
+ MockLock,
+ lvutil):
+ MockLock.return_value = AlwaysFreeLock()
+ sr_get_capability.return_value = [trim_util.TRIM_CAP]
+ context.setup_error_codes()
+
+ result = trim_util.do_trim(None, {'sr_uuid': 'some-uuid'})
+
+ self.assertEquals('True', result)
+
+ @mock.patch('trim_util.time.time')
+ def test_log_last_triggered_no_key(self, mock_time):
+ session = mock.Mock()
+ mock_time.return_value = 0
+ session.xenapi.SR.get_by_uuid.return_value = 'sr_ref'
+ session.xenapi.SR.get_other_config.return_value = {}
+
+ trim_util._log_last_triggered(session, 'sr_uuid')
+
+ session.xenapi.SR.add_to_other_config.assert_called_with(
+ 'sr_ref', trim_util.TRIM_LAST_TRIGGERED_KEY, '0')
+ self.assertEqual(0, session.xenapi.SR.remove_from_other_config.call_count)
+
+ @mock.patch('trim_util.time.time')
+ def test_log_last_triggered_has_key(self, mock_time):
+ session = mock.Mock()
+ mock_time.return_value = 0
+ session.xenapi.SR.get_by_uuid.return_value = 'sr_ref'
+ other_config = {trim_util.TRIM_LAST_TRIGGERED_KEY: '0'}
+ session.xenapi.SR.get_other_config.return_value = other_config
+
+ trim_util._log_last_triggered(session, 'sr_uuid')
+
+ session.xenapi.SR.remove_from_other_config.assert_called_with(
+ 'sr_ref', trim_util.TRIM_LAST_TRIGGERED_KEY)
+ session.xenapi.SR.add_to_other_config.assert_called_with(
+ 'sr_ref', trim_util.TRIM_LAST_TRIGGERED_KEY, '0')
+
+ @mock.patch('trim_util.time.time')
+ @mock.patch('trim_util.util.logException')
+ def test_log_last_triggered_exc_logged(self, mock_log_exc, mock_time):
+ session = mock.Mock()
+ mock_time.return_value = 0
+ session.xenapi.SR.get_by_uuid.side_effect = Exception()
+
+ # This combination ensures that an exception does not cause the log
+ # function to throw, but the exception is still logged
+ trim_util._log_last_triggered(session, 'sr_uuid')
+
+ self.assertEqual(1, mock_log_exc.call_count)
diff --git a/tests/test_udevSR.py b/tests/test_udevSR.py
new file mode 100644
index 00000000..5935ebdd
--- /dev/null
+++ b/tests/test_udevSR.py
@@ -0,0 +1,28 @@
+import unittest
+import udevSR
+import SRCommand
+import mock
+
+
+VDI_LOCATION = '/path/to/vdi'
+
+
+class TestVdi(unittest.TestCase):
+
+ @mock.patch('udevSR.udevSR.get_vdi_location')
+ @mock.patch('udevSR.udevSR.load')
+ def test_vdi_succeeds_if_vdi_location_not_in_params_dictionary(
+ self,
+ mock_load,
+ mock_get_vdi_location):
+ mock_get_vdi_location.return_value = VDI_LOCATION
+ srcmd = SRCommand.SRCommand('driver_info')
+ srcmd.params = {'command': 'cmd'}
+ sr_uuid = 'sr_uuid'
+ udev_sr = udevSR.udevSR(srcmd, sr_uuid)
+
+ self.assertEquals(None, udev_sr.srcmd.params.get('vdi_location'))
+
+ udev_vdi = udev_sr.vdi('vdi_uuid')
+
+ self.assertEquals(VDI_LOCATION, udev_vdi.location)
diff --git a/tests/testlib.py b/tests/testlib.py
index acbbae05..3d6ea6aa 100644
--- a/tests/testlib.py
+++ b/tests/testlib.py
@@ -6,6 +6,7 @@
import string
import random
import textwrap
+import errno
PATHSEP = '/'
@@ -26,10 +27,19 @@ def get_error_codes():
class SCSIDisk(object):
- def __init__(self):
+ def __init__(self, adapter):
+ self.adapter = adapter
self.long_id = ''.join(
random.choice(string.digits) for _ in range(33))
+ def disk_device_paths(self, host_id, disk_id, actual_disk_letter):
+ yield '/sys/class/scsi_disk/%s:0:%s:0' % (host_id, disk_id)
+ yield '/sys/class/scsi_disk/%s:0:%s:0/device/block/sd%s' % (
+ host_id, disk_id, actual_disk_letter)
+ yield '/dev/disk/by-scsibus/%s-%s:0:%s:0' % (
+ self.adapter.long_id, host_id, disk_id)
+ yield '/dev/disk/by-id/%s' % (self.long_id)
+
class SCSIAdapter(object):
def __init__(self):
@@ -39,13 +49,24 @@ def __init__(self):
self.parameters = []
def add_disk(self):
- disk = SCSIDisk()
+ disk = SCSIDisk(self)
self.disks.append(disk)
return disk
def add_parameter(self, host_class, values):
self.parameters.append((host_class, values))
+ def adapter_device_paths(self, host_id):
+ yield '/sys/class/scsi_host/host%s' % host_id
+
+
+class AdapterWithNonBlockDevice(SCSIAdapter):
+ def adapter_device_paths(self, host_id):
+ for adapter_device_path in super(AdapterWithNonBlockDevice,
+ self).adapter_device_paths(host_id):
+ yield adapter_device_path
+ yield '/sys/class/fc_transport/target7:0:0/device/7:0:0:0'
+
class Executable(object):
def __init__(self, function_to_call):
@@ -106,10 +127,27 @@ def start(self):
mock.patch('glob.glob', new=self.fake_glob),
mock.patch('os.uname', new=self.fake_uname),
mock.patch('subprocess.Popen', new=self.fake_popen),
+ mock.patch('os.rmdir', new=self.fake_rmdir),
+ mock.patch('os.stat', new=self.fake_stat),
]
map(lambda patcher: patcher.start(), self.patchers)
self.setup_modinfo()
+ def fake_rmdir(self, path):
+ if path not in self.get_filesystem():
+ raise OSError(errno.ENOENT, 'No such file %s' % path)
+
+ if self.fake_glob(os.path.join(path, '*')):
+ raise OSError(errno.ENOTEMPTY, 'Directory is not empty %s' % path)
+
+ assert path in self._created_directories
+ self._created_directories = [
+ d for d in self._created_directories if d != path]
+
+ def fake_stat(self, path):
+ if not self.fake_exists(path):
+ raise OSError()
+
def fake_makedirs(self, path):
if path in self.get_filesystem():
raise OSError(path + " Already exists")
@@ -164,10 +202,13 @@ def fake_open(self, fname, mode='r'):
if fpath == fname:
return StringIO.StringIO(contents)
- if mode == 'w+':
+ if 'w' in mode:
if os.path.dirname(fname) in self.get_created_directories():
self._path_content[fname] = ''
return WriteableFile(self, fname, self._get_inc_fileno())
+ error = IOError('No such file %s' % fname)
+ error.errno = errno.ENOENT
+ raise error
self.log('tried to open file', fname)
raise IOError(fname)
@@ -222,21 +263,14 @@ def generate_path_content(self):
def generate_device_paths(self):
actual_disk_letter = 'a'
for host_id, adapter in enumerate(self.scsi_adapters):
- yield '/sys/class/scsi_host/host%s' % host_id
+ for adapter_device_path in adapter.adapter_device_paths(host_id):
+ yield adapter_device_path
for disk_id, disk in enumerate(adapter.disks):
- yield '/sys/class/scsi_disk/%s:0:%s:0' % (
- host_id, disk_id)
-
- yield '/sys/class/scsi_disk/%s:0:%s:0/device/block/sd%s' % (
- host_id, disk_id, actual_disk_letter)
-
+ for path in disk.disk_device_paths(host_id, disk_id,
+ actual_disk_letter):
+ yield path
actual_disk_letter = chr(ord(actual_disk_letter) + 1)
- yield '/dev/disk/by-scsibus/%s-%s:0:%s:0' % (
- adapter.long_id, host_id, disk_id)
-
- yield '/dev/disk/by-id/%s' % (disk.long_id)
-
for path, _content in self.generate_path_content():
yield path
@@ -263,7 +297,7 @@ def log(self, *args):
WARNING = '\033[93m'
ENDC = '\033[0m'
import sys
- sys.stderr.write(
+ sys.stdout.write(
WARNING
+ ' '.join(str(arg) for arg in args)
+ ENDC
@@ -272,26 +306,32 @@ def log(self, *args):
def stop(self):
map(lambda patcher: patcher.stop(), self.patchers)
- def adapter(self):
- adapter = SCSIAdapter()
+ def add_adapter(self, adapter):
self.scsi_adapters.append(adapter)
return adapter
+def with_custom_context(context_class):
+ def _with_context(func):
+ def decorated(self, *args, **kwargs):
+ context = context_class()
+ context.start()
+ try:
+ result = func(self, context, *args, **kwargs)
+ context.stop()
+ return result
+ except:
+ context.stop()
+ raise
+
+ decorated.__name__ = func.__name__
+ return decorated
+ return _with_context
+
+
def with_context(func):
- def decorated(self, *args, **kwargs):
- context = TestContext()
- context.start()
- try:
- result = func(self, context, *args, **kwargs)
- context.stop()
- return result
- except:
- context.stop()
- raise
-
- decorated.__name__ = func.__name__
- return decorated
+ decorator = with_custom_context(TestContext)
+ return decorator(func)
def xml_string(text):