Skip to content

Commit

Permalink
Merge pull request #419 from matthew-brett/np-12-compat
Browse files Browse the repository at this point in the history
MRG: compatibility with numpy 1.12


Numpy 1.12 started raising errors for float indices in a variety of
circumstances, causing lots of test errors - e.g.
https://travis-ci.org/nipy/nipy/jobs/193952659
  • Loading branch information
matthew-brett committed Jan 26, 2017
2 parents 438d51f + b148483 commit e93e1aa
Show file tree
Hide file tree
Showing 33 changed files with 219 additions and 123 deletions.
10 changes: 9 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,8 @@ matrix:
- python: 2.7
env:
# Definitive source for these in nipy/info.py
- DEPENDS="numpy==1.6.0 scipy==0.9.0 sympy==0.7.0 nibabel==1.2.0"
- PRE_DEPENDS="numpy==1.6.0"
- DEPENDS="scipy==0.9.0 sympy==0.7.0 nibabel==1.2.0"
# Test compiling against external lapack
- python: 3.4
env:
Expand All @@ -49,14 +50,21 @@ matrix:
env:
- INSTALL_TYPE=requirements
- DEPENDS=
allow_failures:
- python: 3.6

before_install:
- source tools/travis_tools.sh
- python -m pip install --upgrade pip
- pip install --upgrade virtualenv
- virtualenv --python=python venv
- source venv/bin/activate
- python --version # just to check
- pip install -U pip
- pip install nose mock # always
- if [ -n "$PRE_DEPENDS" ]; then
pip install $PRE_DEPENDS;
fi
- pip install $DEPENDS
- if [ "${COVERAGE}" == "1" ]; then
pip install coverage;
Expand Down
23 changes: 12 additions & 11 deletions nipy/algorithms/clustering/hierarchical_clustering.py
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division

#---------------------------------------------------------------------------
# ------ Routines for Agglomerative Hierarchical Clustering ----------------
Expand Down Expand Up @@ -553,20 +554,20 @@ def _remap(K, i, j, k, Features, linc, rinc):
K.edges[idxj, 1] = k

#------
# update linc,rinc
# update linc, rinc
#------
lidxk = list(np.concatenate((linc[j], linc[i])))
for l in lidxk:
if K.edges[l, 1] == - 1:
lidxk.remove(l)
lidxk = list(linc[j]) + list(linc[i])
for L in lidxk:
if K.edges[L, 1] == -1:
lidxk.remove(L)

linc[k] = lidxk
linc[i] = []
linc[j] = []
ridxk = list(np.concatenate((rinc[j], rinc[i])))
for l in ridxk:
if K.edges[l, 0] == - 1:
ridxk.remove(l)
ridxk = list(rinc[j]) + list(rinc[i])
for L in ridxk:
if K.edges[L, 0] == -1:
ridxk.remove(L)

rinc[k] = ridxk
rinc[i] = []
Expand Down Expand Up @@ -695,7 +696,7 @@ def ward_quick(G, feature, verbose=False):

ml = linc[j]
if np.sum(K.edges[ml, 1] == i) > 0:
m = ml[np.flatnonzero(K.edges[ml, 1] == i)]
m = ml[int(np.flatnonzero(K.edges[ml, 1] == i))]
K.edges[m] = -1
K.weights[m] = np.inf
linc[j].remove(m)
Expand Down Expand Up @@ -957,7 +958,7 @@ def ward(G, feature, verbose=False):

ml = linc[j]
if np.sum(K.edges[ml, 1] == i) > 0:
m = ml[np.flatnonzero(K.edges[ml, 1] == i)]
m = ml[int(np.flatnonzero(K.edges[ml, 1] == i))]
K.edges[m] = -1
K.weights[m] = np.inf
linc[j].remove(m)
Expand Down
7 changes: 5 additions & 2 deletions nipy/algorithms/clustering/imm.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,9 @@
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division

import math

import numpy as np

Expand Down Expand Up @@ -234,7 +237,7 @@ def cross_validated_update(self, x, z, plike, kfold=10):
if np.isscalar(kfold):
aux = np.argsort(np.random.rand(n_samples))
idx = - np.ones(n_samples).astype(np.int)
j = np.ceil(n_samples / kfold)
j = int(math.ceil(n_samples / kfold))
kmax = kfold
for k in range(kmax):
idx[aux[k * j:min(n_samples, j * (k + 1))]] = k
Expand Down Expand Up @@ -599,7 +602,7 @@ def cross_validated_update(self, x, z, plike, null_class_proba, kfold=10):
if np.isscalar(kfold):
aux = np.argsort(np.random.rand(n_samples))
idx = - np.ones(n_samples).astype(np.int)
j = np.ceil(n_samples / kfold)
j = int(math.ceil(n_samples / kfold))
kmax = kfold
for k in range(kmax):
idx[aux[k * j:min(n_samples, j * (k + 1))]] = k
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,9 @@
Author : Bertrand Thirion, 2008-2009
"""
from __future__ import absolute_import
from __future__ import absolute_import, division

import math

import numpy as np
from numpy.random import randn
Expand Down Expand Up @@ -157,7 +159,7 @@ def ward_test_more(n=100, k=5, verbose=0):
# Check that two implementations give the same result
np.random.seed(0)
X = randn(n,2)
X[:np.ceil(n/3)] += 5
X[:int(math.ceil(n / 3))] += 5
G = knn(X, 5)
u,c = ward_segment(G, X, stop=-1, qmax=1, verbose=verbose)
u1,c = ward_segment(G, X, stop=-1, qmax=k, verbose=verbose)
Expand Down
16 changes: 8 additions & 8 deletions nipy/algorithms/clustering/tests/test_imm.py
Original file line number Diff line number Diff line change
Expand Up @@ -152,8 +152,8 @@ def test_imm_wnc():
alpha = .5
g0 = 1.
x = np.random.rand(n, dim)
x[:.3*n] *= .2
x[:.1*n] *= .3
x[:int(.3 * n)] *= .2
x[:int(.1 * n)] *= .3

# instantiate
migmm = MixedIMM(alpha, dim)
Expand Down Expand Up @@ -184,8 +184,8 @@ def test_imm_wnc1():
alpha = .5
g0 = 1.
x = np.random.rand(n, dim)
x[:.3*n] *= .2
x[:.1*n] *= .3
x[:int(.3 * n)] *= .2
x[:int(.1 * n)] *= .3

# instantiate
migmm = MixedIMM(alpha, dim)
Expand Down Expand Up @@ -216,8 +216,8 @@ def test_imm_wnc2():
alpha = .5
g0 = 1.
x = np.random.rand(n, dim)
x[:.3*n] *= .2
x[:.1*n] *= .3
x[:int(.3 * n)] *= .2
x[:int(.1 * n)] *= .3

# instantiate
migmm = MixedIMM(alpha, dim)
Expand All @@ -242,8 +242,8 @@ def test_imm_wnc3():
alpha = .5
g0 = 1.
x = np.random.rand(n, dim)
x[:.3*n] *= .2
x[:.1*n] *= .3
x[:int(.3 * n)] *= .2
x[:int(.1 * n)] *= .3

# instantiate
migmm = MixedIMM(alpha, dim)
Expand Down
3 changes: 2 additions & 1 deletion nipy/algorithms/graph/graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -373,7 +373,8 @@ def knn(X, k=1):
# neighbour system
bool_knn = dist < sorted_dist[k + 1]
bool_knn += bool_knn.T
bool_knn -= np.diag(np.diag(bool_knn))
# xor diagonal
bool_knn ^= np.diag(np.diag(bool_knn))
dist *= (bool_knn > 0)
return wgraph_from_adjacency(dist)

Expand Down
4 changes: 2 additions & 2 deletions nipy/algorithms/graph/tests/test_graph.py
Original file line number Diff line number Diff line change
Expand Up @@ -416,7 +416,7 @@ def test_subgraph_2(n=10):
x = nr.randn(n, 2)
G = knn(x, 5)
valid = np.zeros(n)
valid[:n / 2] = 1
valid[:n // 2] = 1
assert_true(G.subgraph(valid).edges.max() < n / 2)


Expand All @@ -427,7 +427,7 @@ def test_graph_create_from_array():
wg = wgraph_from_adjacency(a)
b = wg.to_coo_matrix()
assert_array_equal(a, b.todense())


def test_graph_create_from_coo_matrix():
"""Test the creation of a graph from a sparse coo_matrix
Expand Down
4 changes: 3 additions & 1 deletion nipy/algorithms/interpolation.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
from scipy import ndimage

from ..fixes.scipy.ndimage import map_coordinates
from ..utils import seq_prod


class ImageInterpolator(object):
""" Interpolate Image instance at arbitrary points in world space
Expand Down Expand Up @@ -88,7 +90,7 @@ def evaluate(self, points):
"""
points = np.array(points, np.float64)
output_shape = points.shape[1:]
points.shape = (points.shape[0], np.product(output_shape))
points.shape = (points.shape[0], seq_prod(output_shape))
cmapi = self.image.coordmap.inverse()
voxels = cmapi(points.T).T
V = map_coordinates(self.data,
Expand Down
13 changes: 8 additions & 5 deletions nipy/algorithms/kernel_smooth.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,7 @@
import numpy.fft as fft
import numpy.linalg as npl

from nipy.utils import seq_prod
from nipy.core.api import Image, AffineTransform
from nipy.core.reference.coordinate_map import product

Expand Down Expand Up @@ -59,7 +60,7 @@ def _setup_kernel(self):
# reshape to (N coordinates, -1). We appear to need to assign
# to shape instead of doing a reshape, in order to avoid memory
# copies
voxels.shape = (voxels.shape[0], np.product(voxels.shape[1:]))
voxels.shape = (voxels.shape[0], seq_prod(voxels.shape[1:]))
# physical coordinates relative to center
X = (self.coordmap(voxels.T) - phys_center).T
X.shape = (self.coordmap.ndims[1],) + tuple(self.bshape)
Expand All @@ -70,8 +71,9 @@ def _setup_kernel(self):
'l1':np.fabs(kernel).sum(),
'l1sum':kernel.sum()}
self._kernel = kernel
self.shape = (np.ceil((np.asarray(self.bshape) +
np.asarray(kernel.shape))/2)*2+2)
self.shape = (np.ceil(
(np.asarray(self.bshape) + np.asarray(kernel.shape)) / 2)
* 2 + 2).astype(np.intp)
self.fkernel = np.zeros(self.shape)
slices = [slice(0, kernel.shape[i]) for i in range(len(kernel.shape))]
self.fkernel[slices] = kernel
Expand Down Expand Up @@ -179,8 +181,9 @@ def smooth(self, inimage, clean=False, is_fft=False):
_out = data
_slice += 1
gc.collect()
_out = _out[[slice(self._kernel.shape[i]/2, self.bshape[i] +
self._kernel.shape[i]/2) for i in range(len(self.bshape))]]
_out = _out[[slice(self._kernel.shape[i] // 2,
self.bshape[i] + self._kernel.shape[i] // 2)
for i in range(len(self.bshape))]]
if inimage.ndim == 3:
return Image(_out, coordmap=self.coordmap)
else:
Expand Down
19 changes: 13 additions & 6 deletions nipy/algorithms/registration/histogram_registration.py
Original file line number Diff line number Diff line change
Expand Up @@ -140,6 +140,13 @@ def _set_interp(self, interp):

interp = property(_get_interp, _set_interp)

def _slicer(self, corner, size, spacing):
return tuple(
slice(int(corner[i]),
int(size[i] + corner[i]),
int(spacing[i]))
for i in range(3))

def set_fov(self, spacing=None, corner=(0, 0, 0), size=None,
npoints=None):
"""
Expand All @@ -164,20 +171,20 @@ def set_fov(self, spacing=None, corner=(0, 0, 0), size=None,
spacing = [1, 1, 1]
if size is None:
size = self._from_img.shape
slicer = lambda c, s, sp:\
tuple([slice(c[i], s[i] + c[i], sp[i]) for i in range(3)])
# Adjust spacing to match desired field of view size
if spacing is not None:
fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]
fov_data = self._from_img.get_data()[
self._slicer(corner, size, spacing)]
else:
fov_data = self._from_img.get_data()[
slicer(corner, size, [1, 1, 1])]
self._slicer(corner, size, [1, 1, 1])]
spacing = ideal_spacing(fov_data, npoints=npoints)
fov_data = self._from_img.get_data()[slicer(corner, size, spacing)]
fov_data = self._from_img.get_data()[
self._slicer(corner, size, spacing)]
self._from_data = fov_data
self._from_npoints = (fov_data >= 0).sum()
self._from_affine = subgrid_affine(xyz_affine(self._from_img),
slicer(corner, size, spacing))
self._slicer(corner, size, spacing))
# We cache the voxel coordinates of the clamped image
self._vox_coords =\
np.indices(self._from_data.shape).transpose((1, 2, 3, 0))
Expand Down
9 changes: 5 additions & 4 deletions nipy/algorithms/statistics/empirical_pvalue.py
Original file line number Diff line number Diff line change
Expand Up @@ -202,13 +202,14 @@ def learn(self, left=0.2, right=0.8):

# generate the histogram
step = 3.5 * np.std(self.x) / np.exp(np.log(self.n) / 3)
bins = max(10, (self.x.max() - self.x.min()) // step)
bins = max(10, int((self.x.max() - self.x.min()) // step))
hist, ledge = np.histogram(x, bins=bins)
step = ledge[1] - ledge[0]
medge = ledge + 0.5 * step

# remove null bins
hist, medge = hist[hist > 0].astype(np.float), medge[hist > 0]
hist = hist[hist > 0].astype(np.float)
medge = medge[:-1][hist > 0] # edges include rightmost outer

# fit the histogram
dmtx = np.ones((3, len(hist)))
Expand Down Expand Up @@ -449,8 +450,8 @@ def three_classes_GMM_fit(x, test=None, alpha=0.01, prior_strength=100,

# set the priors from a reasonable model of the data (!)
# prior means
mb0 = np.mean(sx[ : alpha * nvox])
mb2 = np.mean(sx[(1 - alpha) * nvox:])
mb0 = np.mean(sx[:int(alpha * nvox)])
mb2 = np.mean(sx[int((1 - alpha) * nvox):])
prior_means = np.reshape(np.array([mb0, 0, mb2]), (nclasses, 1))
if fixed_scale:
prior_scale = np.ones((nclasses, 1, 1)) * 1. / (prior_strength)
Expand Down
27 changes: 16 additions & 11 deletions nipy/algorithms/statistics/formula/formulae.py
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@
The I is the "intercept" term, I have explicity not used R's default of
adding it to everything.
>>> f.design(r)
>>> f.design(r) #doctest: +STRUCTARR_EQUAL
array([(51.0, 39.0, 1989.0, 1.0), (64.0, 54.0, 3456.0, 1.0),
(70.0, 69.0, 4830.0, 1.0), (63.0, 47.0, 2961.0, 1.0),
(78.0, 66.0, 5148.0, 1.0), (55.0, 44.0, 2420.0, 1.0),
Expand Down Expand Up @@ -402,20 +402,25 @@ def make_recarray(rows, names, dtypes=None, drop_name_dim=_NoValue):
The following tests depend on machine byte order for their exact output.
>>> arr = np.array([[3, 4], [4, 6], [6, 8]])
>>> make_recarray(arr, ['x', 'y']) #doctest: +ELLIPSIS
>>> make_recarray(arr, ['x', 'y'],
... drop_name_dim=True) #doctest: +STRUCTARR_EQUAL
array([(3, 4), (4, 6), (6, 8)],
dtype=[('x', '<i8'), ('y', '<i8')])
>>> make_recarray(arr, ['x', 'y'],
... drop_name_dim=False) #doctest: +STRUCTARR_EQUAL
array([[(3, 4)],
[(4, 6)],
[(6, 8)]],
dtype=[('x', '...'), ('y', '...')])
>>> r = make_recarray(arr, ['w', 'u'])
>>> make_recarray(r, ['x', 'y']) #doctest: +ELLIPSIS
array([[(3, 4)],
[(4, 6)],
[(6, 8)]],
dtype=[('x', '...'), ('y', '...')])
>>> make_recarray([[3, 4], [4, 6], [7, 9]], 'wv', [np.float, np.int]) #doctest: +ELLIPSIS
dtype=[('x', '<i8'), ('y', '<i8')])
>>> r = make_recarray(arr, ['w', 'u'], drop_name_dim=True)
>>> make_recarray(r, ['x', 'y'],
... drop_name_dim=True) #doctest: +STRUCTARR_EQUAL
array([(3, 4), (4, 6), (6, 8)],
dtype=[('x', '<i8'), ('y', '<i8')])
>>> make_recarray([[3, 4], [4, 6], [7, 9]], 'wv',
... [np.float, np.int]) #doctest: +STRUCTARR_EQUAL
array([(3.0, 4), (4.0, 6), (7.0, 9)],
dtype=[('w', '...'), ('v', '...')])
dtype=[('w', '<f8'), ('v', '<i8')])
Raises
------
Expand Down

0 comments on commit e93e1aa

Please sign in to comment.