Skip to content

Commit

Permalink
Fix for #51
Browse files Browse the repository at this point in the history
Registry with references to nbd devices has been added to SR sm_config
  • Loading branch information
rposudnevskiy committed Jul 3, 2017
1 parent 9533307 commit 7bc57c5
Show file tree
Hide file tree
Showing 3 changed files with 159 additions and 53 deletions.
134 changes: 93 additions & 41 deletions bins/RBDSR.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@
import xml.dom.minidom
import blktap2
import vhdutil
import json
import inventory

CAPABILITIES = ["VDI_CREATE", "VDI_DELETE", "VDI_ATTACH", "VDI_DETACH", "VDI_CLONE", "VDI_SNAPSHOT",
"VDI_INTRODUCE", "VDI_RESIZE", "VDI_RESIZE_ONLINE", "VDI_UPDATE", "VDI_MIRROR",
Expand Down Expand Up @@ -219,6 +221,10 @@ def attach(self, sr_uuid):
if not self.RBDPOOLs.has_key(self.uuid):
raise xs_errors.XenError('SRUnavailable',opterr='no pool with uuid: %s' % sr_uuid)

sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
if sr_sm_config.has_key("dev_instances"):
self.session.xenapi.SR.remove_from_sm_config(self.sr_ref, "dev_instances")

cephutils.SR.attach(self, sr_uuid)

def update(self, sr_uuid):
Expand All @@ -227,6 +233,11 @@ def update(self, sr_uuid):

def detach(self, sr_uuid):
util.SMlog("RBDSR.detach: sr_uuid=%s" % sr_uuid)

sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr_ref)
if sr_sm_config.has_key("dev_instances"):
self.session.xenapi.SR.remove_from_sm_config(self.sr_ref, "dev_instances")

cephutils.SR.detach(self, sr_uuid)

def scan(self, sr_uuid):
Expand Down Expand Up @@ -402,13 +413,35 @@ def attach(self, sr_uuid, vdi_uuid):

vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref)
host_uuid = inventory.get_localhost_uuid()
self.size = int(self.session.xenapi.VDI.get_virtual_size(vdi_ref))

#if sm_config.has_key("snapshot-of"):
# base_uuid = sm_config["snapshot-of"]
# # it's a snapshot VDI
# self.path = self.sr._get_snap_path(base_uuid, vdi_uuid)
#else:
if sr_sm_config.has_key("dev_instances"):
sr_dev_instances = json.loads(sr_sm_config["dev_instances"])
self.session.xenapi.SR.remove_from_sm_config(self.sr.sr_ref, "dev_instances")
else:
sr_dev_instances={"hosts":{}}

first_free_instance = -1
if sr_dev_instances["hosts"].has_key(host_uuid):
for i in range(cephutils.NBDS_MAX):
if sr_dev_instances["hosts"][host_uuid][i] == None:
first_free_instance = i
break
sr_dev_instances["hosts"][host_uuid][first_free_instance] = vdi_uuid
else:
#sr_dev_instances["hosts"].append({host_uuid:[None]*cephutils.NBDS_MAX})
sr_dev_instances["hosts"][host_uuid] = [None]*cephutils.NBDS_MAX
sr_dev_instances["hosts"][host_uuid][0] = "reserved"
sr_dev_instances["hosts"][host_uuid][1] = vdi_uuid
first_free_instance = 1

self.session.xenapi.SR.add_to_sm_config(self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances))
if sm_config.has_key("dev_instance"):
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, "dev_instance")
self.session.xenapi.VDI.add_to_sm_config(vdi_ref, "dev_instance", str(first_free_instance))

self.path = self.sr._get_path(vdi_uuid)

if not hasattr(self,'xenstore_data'):
Expand All @@ -419,52 +452,61 @@ def attach(self, sr_uuid, vdi_uuid):
self.xenstore_data['storage-type']='rbd'
self.xenstore_data['vdi-type']=self.vdi_type

##########
vdis = self.session.xenapi.SR.get_VDIs(self.sr.sr_ref)
has_a_snapshot = False
for tmp_vdi in vdis:
tmp_vdi_uuid = self.session.xenapi.VDI.get_uuid(tmp_vdi)
tmp_sm_config = self.session.xenapi.VDI.get_sm_config(tmp_vdi)
if tmp_sm_config.has_key("snapshot-of"):
if tmp_sm_config["snapshot-of"] == vdi_uuid:
has_a_snapshot = True
# if tmp_sm_config.has_key("sxm_mirror"):
# sxm_mirror_vdi = vdi_uuid
########## SXM VDIs
if sm_config.has_key("snapshot-of"):
base_uuid = sm_config["snapshot-of"]
# it's a snapshot VDI, attach it as snapshot
self._map_SNAP(base_uuid, vdi_uuid, self.size, "none")
elif sm_config.has_key("base_mirror"):
if has_a_snapshot:
# it's a mirror vdi of storage migrating VM
# it's attached first
self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'sxm_mirror', 'true')
# creating dm snapshot dev
self._map_sxm_mirror(vdi_uuid, self.size)
try:
##########
vdis = self.session.xenapi.SR.get_VDIs(self.sr.sr_ref)
has_a_snapshot = False
for tmp_vdi in vdis:
tmp_vdi_uuid = self.session.xenapi.VDI.get_uuid(tmp_vdi)
tmp_sm_config = self.session.xenapi.VDI.get_sm_config(tmp_vdi)
if tmp_sm_config.has_key("snapshot-of"):
if tmp_sm_config["snapshot-of"] == vdi_uuid:
has_a_snapshot = True
# if tmp_sm_config.has_key("sxm_mirror"):
# sxm_mirror_vdi = vdi_uuid
########## SXM VDIs
if sm_config.has_key("snapshot-of"):
base_uuid = sm_config["snapshot-of"]
# it's a snapshot VDI, attach it as snapshot
self._map_SNAP(base_uuid, vdi_uuid, self.size, "none")
elif sm_config.has_key("base_mirror"):
if has_a_snapshot:
# it's a mirror vdi of storage migrating VM
# it's attached first
self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'sxm_mirror', 'true')
# creating dm snapshot dev
self._map_sxm_mirror(vdi_uuid, self.size)
else:
# it's a base vdi of storage migrating VM
# it's attached after mirror VDI and mirror snapshot VDI has been created
self._map_VHD(vdi_uuid, self.size, "none")
########## not SXM VDIs
else:
# it's a base vdi of storage migrating VM
# it's attached after mirror VDI and mirror snapshot VDI has been created
# it's not SXM VDI, just attach it
self._map_VHD(vdi_uuid, self.size, "none")
########## not SXM VDIs
else:
# it's not SXM VDI, just attach it
self._map_VHD(vdi_uuid, self.size, "none")

if not util.pathexists(self.path):
raise xs_errors.XenError('VDIUnavailable', opterr='Could not find: %s' % self.path)
if not util.pathexists(self.path):
raise xs_errors.XenError('VDIUnavailable', opterr='Could not find: %s' % self.path)

self.attached = True
if sm_config.has_key("attached"):
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'attached')
self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'attached', 'true')
self.attached = True
if sm_config.has_key("attached"):
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'attached')
self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'attached', 'true')

except:
self.session.xenapi.SR.remove_from_sm_config(self.sr.sr_ref, "dev_instances")
sr_dev_instances["hosts"][host_uuid][first_free_instance] = None
self.session.xenapi.SR.add_to_sm_config(self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances))
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, "dev_instance")

return VDI.VDI.attach(self, self.sr.uuid, self.uuid)

def detach(self, sr_uuid, vdi_uuid):
util.SMlog("RBDVDI.detach: sr_uuid=%s, vdi_uuid=%s" % (sr_uuid, vdi_uuid))
vdi_ref = self.sr.srcmd.params['vdi_ref']
sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref)
host_uuid = inventory.get_localhost_uuid()

self.size = int(self.session.xenapi.VDI.get_virtual_size(vdi_ref))

Expand All @@ -480,6 +522,16 @@ def detach(self, sr_uuid, vdi_uuid):
self.attached = False
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'attached')

sr_dev_instances = json.loads(sr_sm_config["dev_instances"])
self.session.xenapi.SR.remove_from_sm_config(self.sr.sr_ref, "dev_instances")
if sr_dev_instances["hosts"].has_key(host_uuid):
for i in range(cephutils.NBDS_MAX):
if sr_dev_instances["hosts"][host_uuid][i] == vdi_uuid:
sr_dev_instances["hosts"][host_uuid][i] = None
break
self.session.xenapi.SR.add_to_sm_config(self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances))
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, "dev_instance")

def clone(self, sr_uuid, snap_uuid):
util.SMlog("RBDVDI.clone: sr_uuid=%s, snap_uuid=%s" % (sr_uuid, snap_uuid))

Expand Down Expand Up @@ -756,7 +808,7 @@ def attach_from_config(self, sr_uuid, vdi_uuid):
pass
elif self.mode == "nbd":
self._disable_rbd_caching()
cmdout = util.pread2(["rbd-nbd", "--nbds_max", str(cephutils.NBDS_MAX), "-c", "/etc/ceph/ceph.conf.nocaching", "map", "%s/%s" % (self.sr.CEPH_POOL_NAME, _vdi_name), "--name", self.sr.CEPH_USER]).rstrip('\n')
cmdout = util.pread2(["rbd-nbd", "--device", "/dev/nbd0", "--nbds_max", str(cephutils.NBDS_MAX), "-c", "/etc/ceph/ceph.conf.nocaching", "map", "%s/%s" % (self.sr.CEPH_POOL_NAME, _vdi_name), "--name", self.sr.CEPH_USER]).rstrip('\n')
util.pread2(["ln", "-s", cmdout, _dev_name])
util.pread2(["ln", "-s", cmdout, dev_name])

Expand Down
10 changes: 6 additions & 4 deletions bins/ceph_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -74,11 +74,12 @@ def _map(session, arg_dict):
elif mode == "fuse":
pass
elif mode == "nbd":
dev = "%s%s" % ("/dev/nbd", arg_dict['dev'])
if sharable == "true":
_disable_rbd_caching()
dev = util.pread2(["rbd-nbd", "--nbds_max", NBDS_MAX, "-c", "/etc/ceph/ceph.conf.nocaching", "map", "%s/%s" % (CEPH_POOL_NAME, _vdi_name), "--name", CEPH_USER]).rstrip('\n')
util.pread2(["rbd-nbd", "--device", dev, "--nbds_max", NBDS_MAX, "-c", "/etc/ceph/ceph.conf.nocaching", "map", "%s/%s" % (CEPH_POOL_NAME, _vdi_name), "--name", CEPH_USER]).rstrip('\n')
else:
dev = util.pread2(["rbd-nbd", "--nbds_max", NBDS_MAX, "map", "%s/%s" % (CEPH_POOL_NAME, _vdi_name), "--name", CEPH_USER]).rstrip('\n')
util.pread2(["rbd-nbd", "--device", dev, "--nbds_max", NBDS_MAX, "map", "%s/%s" % (CEPH_POOL_NAME, _vdi_name), "--name", CEPH_USER]).rstrip('\n')
util.pread2(["ln", "-fs", dev, _dev_name])

if dm == "linear":
Expand Down Expand Up @@ -154,11 +155,12 @@ def __map(session, arg_dict):
elif mode == "fuse":
pass
elif mode == "nbd":
dev = "%s%s" % ("/dev/nbd", arg_dict['dev'])
if sharable == "true":
_disable_rbd_caching()
dev = util.pread2(["rbd-nbd", "--nbds_max", NBDS_MAX, "-c", "/etc/ceph/ceph.conf.nocaching", "map", "%s/%s" % (CEPH_POOL_NAME, _vdi_name), "--name", CEPH_USER]).rstrip('\n')
dev = util.pread2(["rbd-nbd", "--device", dev, "--nbds_max", NBDS_MAX, "-c", "/etc/ceph/ceph.conf.nocaching", "map", "%s/%s" % (CEPH_POOL_NAME, _vdi_name), "--name", CEPH_USER]).rstrip('\n')
else:
dev = util.pread2(["rbd-nbd", "--nbds_max", NBDS_MAX, "map", "%s/%s" % (CEPH_POOL_NAME, _vdi_name), "--name", CEPH_USER]).rstrip('\n')
dev = util.pread2(["rbd-nbd", "--device", dev, "--nbds_max", NBDS_MAX, "map", "%s/%s" % (CEPH_POOL_NAME, _vdi_name), "--name", CEPH_USER]).rstrip('\n')

if dm != "none":
util.pread2(["dmsetup", "resume", _dm_name])
Expand Down
68 changes: 60 additions & 8 deletions bins/cephutils.py
Original file line number Diff line number Diff line change
Expand Up @@ -474,9 +474,11 @@ def __map_VHD(self, vdi_uuid):
vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
if sm_config.has_key("dm"):
dm=sm_config["dm"]
dm = sm_config["dm"]
else:
dm="none"
dm = "none"

dev = sm_config["dev_instance"]

if self.session.xenapi.VDI.get_sharable(vdi_ref):
sharable="true"
Expand All @@ -491,7 +493,7 @@ def __map_VHD(self, vdi_uuid):
"CEPH_POOL_NAME":self.sr.CEPH_POOL_NAME,
"NBDS_MAX":str(NBDS_MAX),
"CEPH_USER":self.sr.CEPH_USER,"sharable":sharable,
"dm":dm}
"dm":dm, "dev":dev}
self._call_plugin('_map',args)

def __unmap_VHD(self, vdi_uuid):
Expand Down Expand Up @@ -532,6 +534,8 @@ def _map_VHD(self, vdi_uuid, size, dm):
dev_name = "%s/%s" % (self.sr.SR_ROOT, vdi_name)

vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
dev = sm_config["dev_instance"]

if self.session.xenapi.VDI.get_sharable(vdi_ref):
sharable="true"
Expand All @@ -547,7 +551,7 @@ def _map_VHD(self, vdi_uuid, size, dm):
"CEPH_POOL_NAME":self.sr.CEPH_POOL_NAME,
"NBDS_MAX":str(NBDS_MAX),
"CEPH_USER":self.sr.CEPH_USER,"sharable":sharable,
"dm":dm,
"dm":dm, "dev":dev,
"size":str(size)}
self._call_plugin('map',args)
self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'dm', dm)
Expand Down Expand Up @@ -596,6 +600,8 @@ def _map_SNAP(self, vdi_uuid, snap_uuid, size, dm):
dev_name = "%s/%s" % (self.sr.SR_ROOT, vdi_name)

snap_ref = self.session.xenapi.VDI.get_by_uuid(snap_uuid)
sm_config = self.session.xenapi.VDI.get_sm_config(snap_ref)
dev = sm_config["dev_instance"]

if self.session.xenapi.VDI.get_sharable(snap_ref):
sharable="true"
Expand All @@ -612,7 +618,7 @@ def _map_SNAP(self, vdi_uuid, snap_uuid, size, dm):
"CEPH_POOL_NAME":self.sr.CEPH_POOL_NAME,
"NBDS_MAX":str(NBDS_MAX),
"CEPH_USER":self.sr.CEPH_USER,"sharable":sharable,
"dm":dm,
"dm":dm, "dev":dev,
"size":str(size)}
self._call_plugin('map',args)
self.session.xenapi.VDI.add_to_sm_config(snap_ref, 'dm', dm)
Expand Down Expand Up @@ -662,7 +668,10 @@ def _map_sxm_mirror(self, vdi_uuid, size):
dev_name = "%s/%s" % (self.sr.SR_ROOT, vdi_name)

vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
dm="mirror"
dev = sm_config["dev_instance"]

if self.session.xenapi.VDI.get_sharable(vdi_ref):
sharable="true"
else:
Expand All @@ -677,7 +686,7 @@ def _map_sxm_mirror(self, vdi_uuid, size):
"CEPH_POOL_NAME":self.sr.CEPH_POOL_NAME,
"NBDS_MAX":str(NBDS_MAX),
"CEPH_USER":self.sr.CEPH_USER,"sharable":sharable,
"dm":dm,
"dm":dm, "dev":dev,
"size":str(size)}
self._call_plugin('map',args)
self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'dm', dm)
Expand Down Expand Up @@ -706,7 +715,7 @@ def _unmap_sxm_mirror(self, vdi_uuid, size):
"CEPH_POOL_NAME":self.sr.CEPH_POOL_NAME,
"NBDS_MAX":str(NBDS_MAX),
"CEPH_USER":self.sr.CEPH_USER,"sharable":sharable,
"dm":"mirror",
"dm":dm,
"size":str(size)}
self._call_plugin('unmap',args)
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'dm')
Expand All @@ -720,7 +729,31 @@ def _map_sxm_base(self, vdi_uuid, size):
dev_name = "%s/%s" % (self.sr.SR_ROOT, vdi_name)

vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref)
dm="base"

if sr_sm_config.has_key("dev_instances"):
sr_dev_instances = json.loads(sr_sm_config["dev_instances"])
else:
sr_dev_instances={"hosts":{}}

first_free_instance = -1
if sr_dev_instances["hosts"].has_key(host_uuid):
for i in range(NBDS_MAX):
if sr_dev_instances["hosts"][host_uuid][i] == None:
first_free_instance = i
break
sr_dev_instances["hosts"][host_uuid][first_free_instance] = vdi_uuid
else:
#sr_dev_instances["hosts"].append({host_uuid:[None]*NBDS_MAX})
sr_dev_instances["hosts"][host_uuid] = [None]*NBDS_MAX
sr_dev_instances["hosts"][host_uuid][0] = "reserved"
sr_dev_instances["hosts"][host_uuid][1] = vdi_uuid
first_free_instance = 1

dev = str(first_free_instance)

if self.session.xenapi.VDI.get_sharable(vdi_ref):
sharable="true"
else:
Expand All @@ -735,11 +768,17 @@ def _map_sxm_base(self, vdi_uuid, size):
"CEPH_POOL_NAME":self.sr.CEPH_POOL_NAME,
"NBDS_MAX":str(NBDS_MAX),
"CEPH_USER":self.sr.CEPH_USER,"sharable":sharable,
"dm":dm,
"dm":dm, "dev":dev,
"size":str(size)}
self._call_plugin('map',args)
self.session.xenapi.VDI.add_to_sm_config(vdi_ref, 'dm', dm)

self.session.xenapi.SR.remove_from_sm_config(self.sr.sr_ref, "dev_instances")
self.session.xenapi.SR.add_to_sm_config(self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances))
if sm_config.has_key("dev_instance"):
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, "dev_instance")
self.session.xenapi.VDI.add_to_sm_config(vdi_ref, "dev_instance", str(first_free_instance))

def _unmap_sxm_base(self, vdi_uuid, size):
_vdi_name = "%s%s" % (VDI_PREFIX, vdi_uuid)
_dev_name = "%s/%s" % (self.sr.DEV_ROOT, _vdi_name)
Expand All @@ -749,6 +788,9 @@ def _unmap_sxm_base(self, vdi_uuid, size):
dev_name = "%s/%s" % (self.sr.SR_ROOT, vdi_name)

vdi_ref = self.session.xenapi.VDI.get_by_uuid(vdi_uuid)
sm_config = self.session.xenapi.VDI.get_sm_config(vdi_ref)
sr_sm_config = self.session.xenapi.SR.get_sm_config(self.sr.sr_ref)

dm="base"
if self.session.xenapi.VDI.get_sharable(vdi_ref):
sharable="true"
Expand All @@ -769,6 +811,16 @@ def _unmap_sxm_base(self, vdi_uuid, size):
self._call_plugin('unmap',args)
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, 'dm')

sr_dev_instances = json.loads(sr_sm_config["dev_instances"])
self.session.xenapi.SR.remove_from_sm_config(self.sr.sr_ref, "dev_instances")
if sr_dev_instances["hosts"].has_key(host_uuid):
for i in range(NBDS_MAX):
if sr_dev_instances["hosts"][host_uuid][i] == vdi_uuid:
sr_dev_instances["hosts"][host_uuid][i] = None
break
self.session.xenapi.SR.add_to_sm_config(self.sr.sr_ref, "dev_instances", json.dumps(sr_dev_instances))
self.session.xenapi.VDI.remove_from_sm_config(vdi_ref, "dev_instance")

def _merge_sxm_diffs(self, mirror_uuid, base_uuid, size):
util.SMlog("Calling cephutills.VDI._merge_sxm_diffs: mirror_uuid=%s, base_uuid=%s, size=%s" % (mirror_uuid, base_uuid, size))
_mirror_vdi_name = "%s%s" % (VDI_PREFIX, mirror_uuid)
Expand Down

0 comments on commit 7bc57c5

Please sign in to comment.