Browse files

Merge Connected_vertex_lines features into glrework-merged (#270)

* Minor removal of comments print statements etc
* Filtering based on Dijkstra
* Refinements to A-star, plus option to exclude a small border region
* caching of shared voxel array, better mapping of verts to image
* Example added for pycortex gallery
  • Loading branch information...
sutkarsh authored and marklescroart committed Jun 1, 2018
1 parent 17e559b commit 4f8b76b040cd9144fdebc3d8a7354807737b570d
@@ -274,22 +274,22 @@ def get_overlay(self, subject, **kwargs):
pts, polys = self.get_surf(subject, "flat", merge=True, nudge=True)
paths = self.get_paths(subject)
# NOTE: This try loop is broken, in that it does nothing for the inteded
# use case (loading an overlay from a packed subject) - needs fixing.
# This hasn't come up yet due to very rare use of packed subjects.
if self.auxfile is not None:
#print("I FOUND AN AUXFILE! HOLY MOLY, WHAT IS THAT??") # still unclear what this does
# Prob should be something like:
#self.auxfile.get_overlay(subject, **kwargs)
tf = self.auxfile.get_overlay(subject) # kwargs??
svgfile =
except (AttributeError, IOError):
# NOTE: This is better error handling, but does not account for
# case in which self.auxfile is None - when is that?? I (ML) think
# it only comes up with new svg layer variants in extra_layers branch...
# svgfile = self.get_paths(subject)["rois"]
# Layer type does not exist or has been temporarily removed
# I think the rest of the code should be here (as in other functions...)
if 'pts' in kwargs:
pts = kwargs['pts']
del kwargs['pts']
# ... and `svgfile` variable should be used here...
if os.path.exists(paths['rois']) and not os.path.exists(paths['overlays']):
svgoverlay.import_roi(paths['rois'], paths['overlays'])
@@ -480,6 +480,32 @@ def get_mask(self, subject, xfmname, type='thick'):
self.save_mask(subject, xfmname, type, mask)
return mask
def get_shared_voxels(self, subject, xfmname, hemi="both", merge=True, use_astar=True, recache=False):
"""Get an array indicating which vertices are inappropriately mapped to the same voxel.
For a given transform and surface, returns an array containing a list of vertices which
are spatially distant on the cortical surface but that map to the same voxels (this occurs
at sulcal crossings)
# Test for packed subjects
voxels = self.auxfile.get_shared_voxels(subject, xfmname, hemi=hemi, merge=merge, use_astar=use_astar)
return voxels
except (AttributeError, IOError):
# Proceed w/ potential load
shared_voxel_file = os.path.join(self.get_cache(subject), 'shared_vertices_{xfmname}_{hemi}.npy'.format(xfmname=xfmname, hemi=hemi))
if not os.path.exists(shared_voxel_file) or recache:
print('Shared voxel array not found, generating...')
from .utils import get_shared_voxels as _get_shared_voxels
voxels = _get_shared_voxels(subject, xfmname, hemi=hemi, merge=merge, use_astar=use_astar), voxels)
return voxels
voxels = np.load(shared_voxel_file)
return voxels
def get_coords(self, subject, xfmname, hemisphere="both", magnet=None):
"""Calculate the coordinates of each vertex in the epi space by transforming the fiducial to the coordinate space
@@ -83,6 +83,13 @@ def _to_raw(self, data1, data2):
# Preserve nan values as alpha = 0
aidx = np.logical_or(np.isnan(data1),np.isnan(data2))
a[aidx] = 0
# Code from master, to handle alpha input, prob better here but not tested.
# # Possibly move this above setting nans to alpha = 0;
# # Possibly multiply specified alpha by alpha in colormap??
# if 'alpha' in self.attrs:
# # Over-write alpha from colormap / nans with alpha arg if provided.
# # Question: Might it be important tokeep alpha as an attr?
# a = self.attrs.pop('alpha')
return r, g, b, a
@@ -3,8 +3,12 @@
from .. import dataset
from ..database import db
from ..options import config
from ..svgoverlay import get_overlay
from ..utils import get_shared_voxels, get_mapper
from .utils import _get_height, _get_extents, _convert_svg_kwargs, _has_cmap, _get_images, _parse_defaults
from .utils import make_flatmap_image, _make_hatch_image
from .utils import make_flatmap_image, _make_hatch_image, _return_pixel_pairs, get_flatmask, get_flatcache
import time
### --- Individual compositing functions --- ###
@@ -77,6 +81,9 @@ def add_curvature(fig, dataview, extents=None, height=None, threshold=True, cont
curv_vertices = db.get_surfinfo(dataview.subject, smooth=smooth)
curv, _ = make_flatmap_image(curv_vertices, recache=recache, height=height)
# First, limit to sensible range for flatmap curvature
norm = Normalize(vmin=-0.5, vmax=0.5)
curv_im = norm(curv)
# Option to use thresholded curvature
default_threshold = config.get('curvature','threshold').lower() in ('true','t','1','y','yes')
use_threshold_curvature = default_threshold if threshold is None else threshold
@@ -212,7 +219,7 @@ def add_rois(fig, dataview, extents=None, height=None, with_labels=True, roi_lis
return img
def add_sulci(fig, dataview, extents=None, height=1024, with_labels=True, **kwargs):
def add_sulci(fig, dataview, extents=None, height=None, with_labels=True, **kwargs):
"""Add sulci layer to figure
@@ -386,7 +393,7 @@ def add_custom(fig, dataview, svgfile, layer, extents=None, height=None, with_la
extents : array-like
4 values for [Left, Right, Bottom, Top] extents of image plotted. If None, defaults to
extents of images already present in figure.
height : scalar
height : scalar
Height of image. if None, defaults to height of images already present in figure.
with_labels : bool
Whether to display text labels on ROIs
@@ -431,6 +438,102 @@ def add_custom(fig, dataview, svgfile, layer, extents=None, height=None, with_la
return img
def add_connected_vertices(fig, dataview, exclude_border_width=None,
height=None, extents=None, recache=False,
color=(1.0, 0.5, 0.1, 0.6), linewidth=2,
alpha=1.0, **kwargs):
"""Plot lines btw distant vertices that are within the same voxel
fig : matplotlib figure
Figure into which to plot the hatches. Should have pycortex flatmap
image in it already.
dataview : cortex.Volume
cortex.Volume object containing data used to determine which vertices
are connected.
exclude_border_width : scalar or None
if not None, width from edge of flatmap for which crossover lines are
not computed.
height : scalar
Height of image. if None, defaults to height of images already present
in figure.
extents : array-like
4 values for [Left, Right, Bottom, Top] extents of image plotted. If
None, defaults to extents of images already present in figure.
color : rgba tuple
color of lines
linewidth : scalar
width of plotted lines
alpha : scalar, [0-1]
alpha value for plotted lines
kwargs are mapped to cortex.db.get_shared_voxels
The process of drawing all the connected vertices is graphically intensive
because of the sheer number of lines to draw. This is already partly sped
up by using a LineCollection object instead of plotting each line,
but it's still an expensive step, and takes quite a while on some systems.
`extents` is currently unused, but probably should be to scale pix_array
As a result, this may be brittle to some figure transformations.
from matplotlib.collections import LineCollection
from scipy.ndimage import binary_dilation
if extents is None:
extents = _get_extents(fig)
if height is None:
height = _get_height(fig)
subject = dataview.subject
xfmname = dataview.xfmname
if xfmname is None:
raise ValueError("Dataview for add_connected_vertices must be a Volume! You seem to have provided vertex data.")
print('computing shared voxels')
shared_voxels = db.get_shared_voxels(subject, xfmname, recache=recache, **kwargs)
print('Finished computing shared voxels')
mask, extents = get_flatmask(subject)
pixmap = get_flatcache(subject, None)
n_pixels, n_verts = pixmap.shape
if exclude_border_width:
# Finding vertices that map to the border of the flatmap
img = np.nan * np.ones(mask.shape)
img[mask] = pixmap * np.arange(n_verts) # mapper.nverts
border_mask = binary_dilation(~mask, iterations=exclude_border_width) ^ (~mask)
border_vertices = set(img[border_mask].astype(int))
shared_voxels = np.array([a for a in shared_voxels if ((a[1] not in border_vertices) and (a[2] not in border_vertices))])
valid_vert_mask = np.array(pixmap.sum(0) > 0).flatten()
valid_verts = np.arange(n_verts)[valid_vert_mask] # mapper.nverts
# Assure both vertices in each pair are not in the medial wall
vtx1valid = np.in1d(shared_voxels[:, 1], valid_verts)
vtx2valid = np.in1d(shared_voxels[:, 2], valid_verts)
va, vb = shared_voxels[vtx1valid & vtx2valid, 1:].T
# Get X, Y coordinates per vertex, scale to 0-1 range
[lpt, lpoly], [rpt, rpoly] = db.get_surf(subject, "flat", nudge=True)
vert_xyz = np.vstack([lpt, rpt])
vert_xyz -= vert_xyz.min(0)
vert_xyz /= vert_xyz.max(0)
x, y = vert_xyz[:, :2].T
# Map vertices to X, Y coordinates suitable for LineCollection input
pix_array_x = np.vstack([x[va], x[vb]]).T
pix_array_y = np.vstack([y[va], y[vb]]).T
pix_array_scaled = np.dstack([pix_array_x, pix_array_y])
# Add line collection
# (This is the most time consuming step, as it draws many lines)
print('plotting lines...')
lc = LineCollection(pix_array_scaled,
ax = fig.gca()
lc_object = ax.add_collection(lc)
return lc_object
def add_cutout(fig, name, dataview, layers=None, height=None, extents=None):
"""Apply a cutout mask to extant layers in flatmap figure
@@ -157,6 +157,20 @@ def get_flatcache(subject, xfmname, pixelwise=True, thick=32, sampler='nearest',
return pixmap
def _return_pixel_pairs(vert_pair_list, x_dict, y_dict):
"""Janky and probably unnecessary"""
pix_list = []
vert_pairs_valid = []
for (vert1, vert2) in vert_pair_list:
if vert1 in x_dict and vert2 in x_dict:
pix1 = np.array((x_dict[vert1], y_dict[vert1]))
pix2 = np.array((x_dict[vert2], y_dict[vert2]))
pix_list.append(np.array([pix1, pix2]))
vert_pairs_valid.append((vert1, vert2))
#These are vertex pairs not represented in the flatmap. I have found them to belong to the middle brain are that is deleted while creating the flat map.
return np.array(pix_list), np.array(vert_pairs_valid)
### --- Hidden helper functions --- ###
@@ -14,6 +14,7 @@ def make_figure(braindata, recache=False, pixelwise=True, thick=32, sampler='nea
height=1024, dpi=100, depth=0.5, with_rois=True, with_sulci=False,
with_labels=True, with_colorbar=True, with_borders=False,
with_dropout=False, with_curvature=False, extra_disp=None,
linewidth=None, linecolor=None, roifill=None, shadow=None,
labelsize=None, labelcolor=None, cutout=None, curvature_brightness=None,
curvature_contrast=None, curvature_threshold=None, fig=None, extra_hatch=None,
@@ -43,8 +44,8 @@ def make_figure(braindata, recache=False, pixelwise=True, thick=32, sampler='nea
depth : float
Value between 0 and 1 for how deep to sample the surface for the flatmap (0 = gray/white matter
boundary, 1 = pial surface)
with_rois, with_labels, with_colorbar, with_borders, with_dropout, with_curvature : bool, optional
Display the rois, labels, colorbar, annotated flatmap borders, or cross-hatch dropout?
with_rois, with_labels, with_colorbar, with_borders, with_dropout, with_curvature, etc : bool, optional
Display the rois, labels, colorbar, annotated flatmap borders, etc
cutout : str
Name of flatmap cutout with which to clip the full flatmap. Should be the name
of a sub-layer of the 'cutouts' layer in <filestore>/<subject>/overlays.svg
@@ -93,7 +94,6 @@ def make_figure(braindata, recache=False, pixelwise=True, thick=32, sampler='nea
dataview = dataset.normalize(braindata)
if not isinstance(dataview, dataset.Dataview):
raise TypeError('Please provide a Dataview (e.g. an instance of cortex.Volume, cortex.Vertex, etc), not a Dataset')
if fig is None:
fig_resize = True
fig = plt.figure()
@@ -171,6 +171,9 @@ def make_figure(braindata, recache=False, pixelwise=True, thick=32, sampler='nea
linewidth=linewidth, linecolor=linecolor, shadow=shadow, labelsize=labelsize, labelcolor=labelcolor,
layers['custom'] = custom_im
# Add connector lines btw connected vertices
if with_connected_vertices:
vertex_lines = composite.add_connected_vertices(fig, dataview)
ax.set_xlim(extents[0], extents[1])
Oops, something went wrong.

0 comments on commit 4f8b76b

Please sign in to comment.