/
utils.py
440 lines (374 loc) · 16.2 KB
/
utils.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
"""Makes flattened views of volumetric data on the cortical surface.
"""
from six import string_types
from functools import reduce
import os
import glob
import numpy as np
import string
from .. import utils
from .. import dataset
from ..database import db
from ..options import config
def make_flatmap_image(braindata, height=1024, recache=False, **kwargs):
"""Generate flatmap image from volumetric brain data
This
Parameters
----------
braindata : one of: {cortex.Volume, cortex.Vertex, cortex.Dataview)
Object containing containing data to be plotted, subject (surface identifier),
and transform.
height : scalar
Height of image. None defaults to height of images already present in figure.
recache : boolean
Whether or not to recache intermediate files. Takes longer to plot this way, potentially
resolves some errors. Useful if you've made changes to the alignment.
kwargs : idk
idk
Returns
-------
image :
extents :
"""
mask, extents = get_flatmask(braindata.subject, height=height, recache=recache)
if not hasattr(braindata, "xfmname"):
pixmap = get_flatcache(braindata.subject,
None,
height=height,
recache=recache,
**kwargs)
if isinstance(braindata, dataset.Vertex2D):
data = braindata.raw.vertices
else:
data = braindata.vertices
else:
pixmap = get_flatcache(braindata.subject,
braindata.xfmname,
height=height,
recache=recache,
**kwargs)
if isinstance(braindata, dataset.Volume2D):
data = braindata.raw.volume
else:
data = braindata.volume
if data.shape[0] > 1:
raise ValueError("Input data was not the correct dimensionality - please provide 3D Volume or 2D Vertex data")
if data.dtype == np.bool:
# Convert data to float to avoid image artifacts with booleans
data = data.astype(np.float)
if data.dtype == np.uint8:
img = np.zeros(mask.shape+(4,), dtype=np.uint8)
img[mask] = pixmap * data.reshape(-1, 4)
return img.transpose(1,0,2)[::-1], extents
else:
badmask = np.array(pixmap.sum(1) > 0).ravel()
img = (np.nan*np.ones(mask.shape)).astype(data.dtype)
mimg = (np.nan*np.ones(badmask.shape)).astype(data.dtype)
mimg[badmask] = (pixmap*data.ravel())[badmask].astype(mimg.dtype)
img[mask] = mimg
return img.T[::-1], extents
def get_flatmask(subject, height=1024, recache=False):
"""
Parameters
----------
subject : str
Name of subject in pycortex store
height : int
Height in pixels to generate the image
recache : bool
Recache the intermediate files? Can resolve some issues but is slower.
"""
cachedir = db.get_cache(subject)
cachefile = os.path.join(cachedir, "flatmask_{h}.npz".format(h=height))
if not os.path.exists(cachefile) or recache:
mask, extents = _make_flatmask(subject, height=height)
np.savez(cachefile, mask=mask, extents=extents)
else:
npz = np.load(cachefile)
mask, extents = npz['mask'], npz['extents']
npz.close()
return mask, extents
def get_flatcache(subject, xfmname, pixelwise=True, thick=32, sampler='nearest',
recache=False, height=1024, depth=0.5):
"""
Parameters
----------
subject : str
Subject name in pycortex db
xfmname : str
Name of transform for subject
pixelwise : bool
thick : int
sampler :
recache : bool
Recache intermediate files? Doing so is slower but can resolve some errors.
height : int
Height in pixels of image to generated
depth : float
Returns
-------
"""
cachedir = db.get_cache(subject)
cachefile = os.path.join(cachedir, "flatverts_{height}.npz").format(height=height)
if pixelwise and xfmname is not None:
cachefile = os.path.join(cachedir, "flatpixel_{xfmname}_{height}_{sampler}_{extra}.npz")
extra = "l%d"%thick if thick > 1 else "d%g"%depth
cachefile = cachefile.format(height=height, xfmname=xfmname, sampler=sampler, extra=extra)
if not os.path.exists(cachefile) or recache:
print("Generating a flatmap cache")
if pixelwise and xfmname is not None:
pixmap = _make_pixel_cache(subject, xfmname, height=height, sampler=sampler, thick=thick, depth=depth)
else:
pixmap = _make_vertex_cache(subject, height=height)
np.savez(cachefile, data=pixmap.data, indices=pixmap.indices, indptr=pixmap.indptr, shape=pixmap.shape)
else:
from scipy import sparse
npz = np.load(cachefile)
pixmap = sparse.csr_matrix((npz['data'], npz['indices'], npz['indptr']), shape=npz['shape'])
npz.close()
if not pixelwise and xfmname is not None:
from scipy import sparse
mapper = utils.get_mapper(subject, xfmname, sampler)
pixmap = pixmap * sparse.vstack(mapper.masks)
return pixmap
### --- Hidden helper functions --- ###
def _color2hex(color):
"""Convert arbitrary color input to hex string"""
from matplotlib import colors
cc = colors.ColorConverter()
rgba = cc.to_rgba(color)
hexcol = colors.rgb2hex(rgba)
return hexcol
def _convert_svg_kwargs(kwargs):
"""Convert matplotlib-like plotting property names/values to svg object property names/values"""
svg_style_key_mapping = dict(
linewidth='stroke-width',
lw='stroke-width',
linecolor='stroke',
lc='stroke',
#labelcolor='', # FIX ME
#labelsize='', # FIX ME
linealpha='stroke-opacity',
roifill='fill',
fillcolor='fill',
fillalpha='fill-opacity',
dashes='stroke-dasharray'
#dash_capstyle # ADD ME?
#dash_joinstyle # ADD ME?
)
svg_style_value_mapping = dict(
linewidth=lambda x: x,
lw=lambda x: x,
linecolor=lambda x: _color2hex(x),
lc=lambda x: _color2hex(x),
labelcolor=lambda x: _color2hex(x),
labelsize=lambda x: x,
linealpha=lambda x: x,
roifill=lambda x: _color2hex(x),
fillcolor=lambda x: _color2hex(x),
fillalpha=lambda x: x,
dashes=lambda x: '{}, {}'.format(*x),
#dash_capstyle # ADD ME?
#dash_joinstyle # ADD ME?
)
out = dict((svg_style_key_mapping[k], svg_style_value_mapping[k](v))
for k,v in kwargs.items() if v is not None)
return out
def _parse_defaults(section):
defaults = dict(config.items(section))
for k in defaults.keys():
# Convert numbers to floating point numbers
if defaults[k][0] in string.digits + '.':
if ',' in defaults[k]:
defaults[k] = [float(x) for x in defaults[k].split(',')]
else:
defaults[k] = float(defaults[k])
# Convert 'None' to None
if defaults[k] == 'None':
defaults[k] = None
# Special case formatting
if k=='stroke' or k=='fill':
defaults[k] = _color2hex(defaults[k])
elif k=='stroke-dasharray' and isinstance(defaults[k], (list,tuple)):
defaults[k] = '{}, {}'.format(*defaults[k])
return defaults
def _get_images(fig):
"""Get all images in a given matplotlib axis"""
from matplotlib.image import AxesImage
ax = fig.gca()
images = dict((x.get_label(), x) for x in ax.get_children() if isinstance(x, AxesImage))
return images
def _get_extents(fig):
"""Get extents of images current in a given matplotlib figure"""
images = _get_images(fig)
if 'data' not in images:
raise ValueError("You must specify `extents` argument if you have not yet plotted a data flatmap!")
extents = images['data'].get_extent()
return extents
def _get_height(fig):
"""Get height of images in currently in a given matplotlib figure"""
images = _get_images(fig)
if 'data_cutout' in images:
raise Exception("Can't add plots once cutout has been performed! Do cutouts last!")
if 'data' in images:
height = images['data'].get_array().shape[0]
else:
# No images, revert to default
height = 1024
return height
def _make_hatch_image(hatch_data, height, sampler='nearest', hatch_space=4, recache=False):
"""Make hatch image
Parameters
----------
hatch_data : cortex.Dataview
brain data with values ranging from 0-1, specifying where to show hatch marks (data value
will be mapped to alpha value of hatch marks)
height : scalar
height of image to display
sampler : string
pycortex sampler string, {'nearest', ...} (FILL ME IN ??)
hatch_space : scalar
space between hatch lines (in pixels)
recache : boolean
"""
dmap, _ = make_flatmap_image(hatch_data, height=height, sampler=sampler, recache=recache)
hx, hy = np.meshgrid(range(dmap.shape[1]), range(dmap.shape[0]))
hatchpat = (hx+hy)%(2*hatch_space) < 2
# Leila code that breaks shit:
#hatch_size = [0, 4, 4]
#hatchpat = (hx + hy + hatch_size[0])%(hatch_size[1] * hatch_space) < hatch_size[2]
hatchpat = np.logical_or(hatchpat, hatchpat[:,::-1]).astype(float)
hatchim = np.dstack([1-hatchpat]*3 + [hatchpat])
hatchim[:, : ,3] *= np.clip(dmap, 0, 1).astype(float)
return hatchim
def _make_flatmask(subject, height=1024):
from .. import polyutils
from PIL import Image, ImageDraw
pts, polys = db.get_surf(subject, "flat", merge=True, nudge=True)
bounds = polyutils.trace_poly(polyutils.boundary_edges(polys))
try:
left, right = bounds.next(), bounds.next() # python 2.X
except AttributeError:
left, right = next(bounds), next(bounds) # python 3.X
aspect = (height / (pts.max(0) - pts.min(0))[1])
lpts = (pts[left] - pts.min(0)) * aspect
rpts = (pts[right] - pts.min(0)) * aspect
im = Image.new('L', (int(aspect * (pts.max(0) - pts.min(0))[0]), height))
draw = ImageDraw.Draw(im)
draw.polygon(lpts[:,:2].ravel().tolist(), fill=255)
draw.polygon(rpts[:,:2].ravel().tolist(), fill=255)
extents = np.hstack([pts.min(0), pts.max(0)])[[0,3,1,4]]
return np.array(im).T > 0, extents
def _make_vertex_cache(subject, height=1024):
from scipy import sparse
from scipy.spatial import cKDTree
flat, polys = db.get_surf(subject, "flat", merge=True, nudge=True)
valid = np.unique(polys)
fmax, fmin = flat.max(0), flat.min(0)
size = fmax - fmin
aspect = size[0] / size[1]
width = int(aspect * height)
grid = np.mgrid[fmin[0]:fmax[0]:width*1j, fmin[1]:fmax[1]:height*1j].reshape(2,-1)
mask, extents = get_flatmask(subject, height=height)
assert mask.shape[0] == width and mask.shape[1] == height
kdt = cKDTree(flat[valid,:2])
dist, vert = kdt.query(grid.T[mask.ravel()])
dataij = (np.ones((len(vert),)), np.array([np.arange(len(vert)), valid[vert]]))
return sparse.csr_matrix(dataij, shape=(mask.sum(), len(flat)))
def _make_pixel_cache(subject, xfmname, height=1024, thick=32, depth=0.5, sampler='nearest'):
from scipy import sparse
from scipy.spatial import Delaunay
flat, polys = db.get_surf(subject, "flat", merge=True, nudge=True)
valid = np.unique(polys)
fmax, fmin = flat.max(0), flat.min(0)
size = fmax - fmin
aspect = size[0] / size[1]
width = int(aspect * height)
grid = np.mgrid[fmin[0]:fmax[0]:width*1j, fmin[1]:fmax[1]:height*1j].reshape(2,-1)
mask, extents = get_flatmask(subject, height=height)
assert mask.shape[0] == width and mask.shape[1] == height
## Get barycentric coordinates
dl = Delaunay(flat[valid,:2])
simps = dl.find_simplex(grid.T[mask.ravel()])
missing = simps == -1
tfms = dl.transform[simps]
l1, l2 = (tfms[:,:2].transpose(1,2,0) * (grid.T[mask.ravel()] - tfms[:,2]).T).sum(1)
l3 = 1 - l1 - l2
ll = np.vstack([l1, l2, l3])
ll[:,missing] = 0
from ..mapper import samplers
xfm = db.get_xfm(subject, xfmname, xfmtype='coord')
sampclass = getattr(samplers, sampler)
## Transform fiducial vertex locations to pixel locations using barycentric xfm
try:
pia, polys = db.get_surf(subject, "pia", merge=True, nudge=False)
wm, polys = db.get_surf(subject, "wm", merge=True, nudge=False)
piacoords = xfm((pia[valid][dl.vertices][simps] * ll[np.newaxis].T).sum(1))
wmcoords = xfm((wm[valid][dl.vertices][simps] * ll[np.newaxis].T).sum(1))
valid_p = np.array([np.all((0 <= piacoords), axis=1),
piacoords[:,0] < xfm.shape[2],
piacoords[:,1] < xfm.shape[1],
piacoords[:,2] < xfm.shape[0]])
valid_p = np.all(valid_p, axis=0)
valid_w = np.array([np.all((0 <= wmcoords), axis=1),
wmcoords[:,0] < xfm.shape[2],
wmcoords[:,1] < xfm.shape[1],
wmcoords[:,2] < xfm.shape[0]])
valid_w = np.all(valid_w, axis=0)
valid = np.logical_and(valid_p, valid_w)
vidx = np.nonzero(valid)[0]
mapper = sparse.csr_matrix((mask.sum(), np.prod(xfm.shape)))
if thick == 1:
i, j, data = sampclass(piacoords[valid]*depth + wmcoords[valid]*(1-depth), xfm.shape)
mapper = mapper + sparse.csr_matrix((data / float(thick), (vidx[i], j)),
shape=mapper.shape)
return mapper
for t in np.linspace(0, 1, thick+2)[1:-1]:
i, j, data = sampclass(piacoords[valid]*t + wmcoords[valid]*(1-t), xfm.shape)
mapper = mapper + sparse.csr_matrix((data / float(thick), (vidx[i], j)),
shape=mapper.shape)
return mapper
except IOError:
fid, polys = db.get_surf(subject, "fiducial", merge=True)
fidcoords = xfm((fid[valid][dl.vertices][simps] * ll[np.newaxis].T).sum(1))
valid = reduce(np.logical_and,
[reduce(np.logical_and, (0 <= fidcoords).T),
fidcoords[:,0] < xfm.shape[2],
fidcoords[:,1] < xfm.shape[1],
fidcoords[:,2] < xfm.shape[0]])
vidx = np.nonzero(valid)[0]
i, j, data = sampclass(fidcoords[valid], xfm.shape)
csrshape = mask.sum(), np.prod(xfm.shape)
return sparse.csr_matrix((data, (vidx[i], j)), shape=csrshape)
def _has_cmap(dataview):
"""Checks whether a given dataview has colormap (cmap) information as an
instance or is an RGB volume and does not have a cmap.
Returns a dictionary with cmap information for non RGB volumes"""
from matplotlib import colors, cm, pyplot as plt
cmapdict = dict()
if not isinstance(dataview, (dataset.VolumeRGB, dataset.VertexRGB)):
# Get colormap from matplotlib or pycortex colormaps
## -- redundant code, here and in cortex/dataset/views.py -- ##
if isinstance(dataview.cmap, string_types):
if not dataview.cmap in cm.__dict__:
# unknown colormap, test whether it's in pycortex colormaps
cmapdir = config.get('webgl', 'colormaps')
colormaps = glob.glob(os.path.join(cmapdir, "*.png"))
colormaps = dict(((os.path.split(c)[1][:-4],c) for c in colormaps))
if not dataview.cmap in colormaps:
raise Exception('Unkown color map!')
I = plt.imread(colormaps[dataview.cmap])
cmap = colors.ListedColormap(np.squeeze(I))
# Register colormap while we're at it
cm.register_cmap(dataview.cmap,cmap)
else:
cmap = dataview.cmap
elif isinstance(dataview.cmap, colors.Colormap):
# Allow input of matplotlib colormap class
cmap = dataview.cmap
else:
raise TypeError('{} type not handled'.format(type(dataview.cmap)))
cmapdict.update(cmap=cmap,
vmin=dataview.vmin,
vmax=dataview.vmax)
return cmapdict