Skip to content

Commit

Permalink
bugfix in drawing kdtree-based map with autoscaling
Browse files Browse the repository at this point in the history
  • Loading branch information
François Laurent committed Feb 28, 2018
1 parent 20de488 commit 44ae6ed
Show file tree
Hide file tree
Showing 7 changed files with 56 additions and 43 deletions.
4 changes: 2 additions & 2 deletions tests/test_commandline.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,9 +17,9 @@
import random


py2_hash, py3_hash = 'y7DkJVGe', 'PypAyFiw'
py2_hash, py3_hash = 'PGiGV4YY', 'wmkqLTc4'
data_server = 'http://dl.pasteur.fr/fop/{}/'.format(py2_hash if sys.version_info[0] == 2 else py3_hash)
data_update = '180227'
data_update = '180228'
data_file = 'glycine_receptor.trxyt'

data_dir = '{}_py{}_{}'.format('test_commandline', sys.version_info[0], data_update)
Expand Down
42 changes: 27 additions & 15 deletions tramway/core/scaler.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ class Scaler(object):
function (callable):
A function that takes a data matrix as input and returns `center` and `factor`.
`function` is called once during the first call to :meth:`scale_point`.
euclidian (list):
euclidean (list):
Sequence of names or indices of the columns to be scaled by a common factor.
"""
__slots__ = ['init', 'center', 'factor', 'columns', 'function', 'euclidean']
Expand All @@ -61,7 +61,7 @@ def __init__(self, scale=None, euclidean=None):
scale (callable):
A function that takes a data matrix as input and returns `center` and
`factor`. `scale` becomes the :attr:`function` attribute.
euclidian (list):
euclidean (list):
Sequence of names or indices of the columns to be scaled by a common
factor.
"""
Expand Down Expand Up @@ -89,10 +89,10 @@ def scaled(self, points, asarray=False):
points = points[self.columns]
else:
if self.center is not None and isstructured(self.center):
raise TypeError("input data are not structured whereas scaler' is")
raise TypeError("input data are not structured whereas scaler's internal data are")
points = points[:, self.columns]
elif isstructured(points):
raise ValueError("input data are structured whereas scaler' is not")
raise ValueError("input data are structured whereas scaler's internal data are not")
else:
scaler_data = self.center
if scaler_data is None:
Expand Down Expand Up @@ -153,12 +153,12 @@ def scale_point(self, points, inplace=True, scaledonly=False, asarray=False):
if self.function:
# calculate centers and factors
self.center, self.factor = self.function(points)
# equalize factor for euclidian variables
# equalize factor for euclidean variables
if self.euclidean:
if isinstance(points, pd.DataFrame):
xyz = points[self.euclidean].values
elif points.dtype.names:
xyz = np.asarray(points[self.euclidian])
xyz = np.asarray(points[self.euclidean])
else:
xyz = points[:,self.euclidean]
_, self.factor[self.euclidean] = self.function(xyz.flatten())
Expand All @@ -177,10 +177,16 @@ def scale_point(self, points, inplace=True, scaledonly=False, asarray=False):
if not (self.center is None and self.factor is None):
if not inplace:
points = points.copy()
if self.center is not None:
points -= self.center
if self.factor is not None:
points /= self.factor
if isinstance(points, np.ndarray):
if self.center is not None:
points -= np.asarray(self.center)
if self.factor is not None:
points /= np.asarray(self.factor)
else:
if self.center is not None:
points -= self.center
if self.factor is not None:
points /= self.factor
if scaledonly:
points = self.scaled(points, asarray)
elif asarray:
Expand All @@ -207,11 +213,17 @@ def unscale_point(self, points, inplace=True):
raise AttributeError('scaler has not been initialized')
if not (self.center is None and self.factor is None):
if not inplace:
points = points.copy(deep=False)
if self.factor is not None:
points *= self.factor
if self.center is not None:
points += self.center
points = points.copy(False)
if isinstance(points, np.ndarray):
if self.factor is not None:
points *= np.asarray(self.factor)
if self.center is not None:
points += np.asarray(self.center)
else:
if self.factor is not None:
points *= self.factor
if self.center is not None:
points += self.center
return points


Expand Down
1 change: 1 addition & 0 deletions tramway/helper/inference.py
Original file line number Diff line number Diff line change
Expand Up @@ -289,6 +289,7 @@ def map_plot(maps, output_file=None, fig_format=None, \

input_file = None
if isinstance(maps, tuple):
warn('`maps` as (CellStats, str, DataFrame) tuple are deprecated', FutureWarning)
cells, mode, maps = maps
elif isinstance(maps, (pd.DataFrame, Maps)):
if cells is None:
Expand Down
26 changes: 12 additions & 14 deletions tramway/helper/tessellation.py
Original file line number Diff line number Diff line change
Expand Up @@ -188,10 +188,16 @@ def tessellate(xyt_data, method='gwr', output_file=None, verbose=False, \

no_nesting_error = ValueError('nesting tessellations does not apply to translocation data')
multiple_files = lambda a: isinstance(a, (tuple, list, frozenset, set))
if isinstance(xyt_data, six.string_types) or multiple_files(xyt_data):
xyt_files = []
if isinstance(xyt_data, six.string_types):
# file path
xyt_files = [xyt_data]
elif multiple_files(xyt_data):
# file path(s)
if multiple_files(xyt_data) and not xyt_data[1:]:
xyt_files = list(xyt_data)
if not xyt_data[1:]:
xyt_data = xyt_data[0]
if xyt_files:
if multiple_files(xyt_data):
xyt_file = xyt_data
else:
Expand All @@ -207,12 +213,11 @@ def tessellate(xyt_data, method='gwr', output_file=None, verbose=False, \
input_partition, = find_artefacts(analyses, CellStats, input_label)
xyt_data = analyses.data
if xyt_file:
xyt_data, xyt_path = load_xyt(xyt_data, return_paths=True, verbose=verbose)
xyt_data, xyt_files = load_xyt(xyt_files, return_paths=True, verbose=verbose)
analyses = Analyses(xyt_data)
if input_label is not None:
raise no_nesting_error
else:
xyt_path = []
if isinstance(xyt_data, Analyses):
analyses = xyt_data
xyt_data = analyses.data
Expand All @@ -221,7 +226,7 @@ def tessellate(xyt_data, method='gwr', output_file=None, verbose=False, \
#warn('TODO: test direct data input', UseCaseWarning)
if input_label is not None:
raise no_nesting_error
input_files = xyt_path
input_files = xyt_files

try:
setup, module = plugins[method]
Expand Down Expand Up @@ -439,13 +444,6 @@ def _filter_i(voronoi, cell, x):
stats.param['avg_distance'] = avg_distance
if max_distance:
stats.param['max_distance'] = max_distance
#if not plugin:
# if min_location_count:
# stats.param['min_location_count'] = min_location_count
# if avg_location_count:
# stats.param['avg_location_count'] = avg_location_count
# if max_location_count:
# stats.param['max_location_count'] = min_location_count
if knn:
stats.param['knn'] = knn
stats.param.update(kwargs)
Expand All @@ -472,9 +470,9 @@ def _filter_i(voronoi, cell, x):
analyses.add(Analyses(stats), label=label, comment=comment)

# save the analysis tree (`analyses`)
if output_file or xyt_path:
if output_file or xyt_files:
if output_file is None:
output_file = os.path.splitext(xyt_path[0])[0] + hdf_extensions[0]
output_file = os.path.splitext(xyt_files[0])[0] + hdf_extensions[0]

save_rwa(output_file, analyses, verbose, \
force=len(input_files)==1 and input_files[0]==output_file)
Expand Down
15 changes: 10 additions & 5 deletions tramway/plot/map.py
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ def scalar_map_2d(cells, values, aspect=None, clim=None, figure=None, axes=None,

polygons = []
if isinstance(cells, Distributed):
ix, xy, ok = zip(*[ (i, c.center, 0 < c.tcount) for i, c in cells.cells.items() ])
ix, xy, ok = zip(*[ (i, c.center, bool(c)) for i, c in cells.items() ])
ix, xy, ok = np.array(ix), np.array(xy), np.array(ok)
voronoi = scipy.spatial.Voronoi(xy)
for c, r in enumerate(voronoi.point_region):
Expand Down Expand Up @@ -95,7 +95,13 @@ def scalar_map_2d(cells, values, aspect=None, clim=None, figure=None, axes=None,
except AttributeError:
raise TypeError('wrong type for `cells`: {}'.format(_type))

xy_min, xy_max = xy.min(axis=0), xy.max(axis=0)
try:
bounding_box = cells.descriptors(cells.bounding_box, asarray=True)
xy_min, xy_max = bounding_box
except (KeyboardInterrupt, SystemExit):
raise
except:
xy_min, xy_max = xy.min(axis=0), xy.max(axis=0)

scalar_map = values.loc[ix[ok]].values

Expand Down Expand Up @@ -127,6 +133,7 @@ def scalar_map_2d(cells, values, aspect=None, clim=None, figure=None, axes=None,
patches.set_clim(clim)
axes.add_collection(patches)

obj = None
if delaunay:
try:
import tramway.plot.mesh as mesh
Expand All @@ -136,7 +143,6 @@ def scalar_map_2d(cells, values, aspect=None, clim=None, figure=None, axes=None,
except:
import traceback
print(traceback.format_exc())
obj = None

axes.set_xlim(xy_min[0], xy_max[0])
axes.set_ylim(xy_min[1], xy_max[1])
Expand All @@ -149,8 +155,7 @@ def scalar_map_2d(cells, values, aspect=None, clim=None, figure=None, axes=None,
except AttributeError as e:
warn(e.args[0], RuntimeWarning)

if delaunay:
return obj
return obj



Expand Down
6 changes: 1 addition & 5 deletions tramway/tessellation/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -833,11 +833,7 @@ def vertices(self):
"""Unscaled coordinates of the Voronoi vertices (numpy.ndarray)."""
if self._cell_centers is not None and self._vertices is None:
self._postprocess()
if isinstance(self.scaler.factor, pd.Series):
return np.asarray(self.scaler.unscale_point(pd.DataFrame(self._vertices, \
columns=self.scaler.factor.index)))
else:
return self.scaler.unscale_point(self._vertices)
return self.scaler.unscale_point(self._vertices, inplace=False)

@vertices.setter
def vertices(self, vertices):
Expand Down
5 changes: 3 additions & 2 deletions tramway/tessellation/kdtree/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ def tessellate(self, points, **kwargs):
self.dichotomy.split()
self.dichotomy.subset = {} # clear memory
self.dichotomy.subset_counter = 0

origin, level = zip(*[ self.dichotomy.cell[c][:2] for c in range(self.dichotomy.cell_counter) ])
origin = np.vstack(origin)
level = np.array(level)
Expand Down Expand Up @@ -164,7 +164,8 @@ def unique_rows(data, *args, **kwargs):
np.arange(j * n, (j+1) * n)[:,np.newaxis]))))
vertices, I = unique_rows(np.concatenate(vertices, axis=0), \
return_inverse=True)
self.vertices = vertices
self._vertices = vertices
self._lazy['vertices'] = False
ridge_vertices = I[np.concatenate(ridge_vertices, axis=0)]
u, v = ridge_vertices.T
nverts = vertices.shape[0]
Expand Down

0 comments on commit 44ae6ed

Please sign in to comment.