Permalink
Browse files

Python 3 compatibility fixes

  • Loading branch information...
1 parent f960792 commit 1953c173a2a684735818c88e3c01109c56e173c7 @tyarkoni tyarkoni committed Sep 1, 2015
@@ -19,7 +19,7 @@
import sys
import os
-from version import __version__
+from .version import __version__
logger = logging.getLogger("neurosynth")
@@ -96,7 +96,7 @@ def get_studies_by_regions(dataset, masks, threshold=0.08,
try:
loaded_masks = [nib.load(os.path.relpath(m)) for m in masks]
except OSError:
- print 'Error loading masks. Check the path'
+ print('Error loading masks. Check the path')
# Get a list of studies that activate for each mask file--i.e., a list of
# lists
@@ -64,7 +64,7 @@ def transform(self, transformer, transpose=False):
''' Apply a transformation to the Clusterable instance. Accepts any
scikit-learn-style class that implements a fit_transform() method. '''
data = self.data.T if transpose else self.data
- self.data = transformer.fit_transform(data)
+ self.data = transformer.fit_transform(data).T
return self
@@ -160,8 +160,6 @@ def magic(dataset, method='coactivation', roi_mask=None,
transpose = (method == 'coactivation')
reference = reference.transform(reduce_reference, transpose=transpose)
- if transpose:
- reference.data = reference.data.T
distances = pairwise_distances(roi.data, reference.data,
metric=distance_metric)
@@ -7,6 +7,7 @@
from os import path
import pandas as pd
import matplotlib.pyplot as plt
+from six import string_types
class Decoder:
@@ -82,7 +83,7 @@ def decode(self, images, save=None, round=4, names=None, **kwargs):
each image is a column. The meaning of the values depends on the
decoding method used. """
- if isinstance(images, basestring):
+ if isinstance(images, string_types):
images = [images]
if isinstance(images, list):
@@ -270,9 +271,9 @@ def plot_polar(self, data, n_top=3, overplot=False, labels=None,
fig, ax = plt.subplots(1, 1, subplot_kw=dict(polar=True))
fig.set_size_inches(10, 10)
else:
- fig, axes = plt.subplots(n_panels, 1, sharex=False, sharey=False,
+ fig, axes = plt.subplots(1, n_panels, sharex=False, sharey=False,
subplot_kw=dict(polar=True))
- fig.set_size_inches((6, 6 * n_panels))
+ fig.set_size_inches((6 * n_panels, 6))
# A bit silly to import seaborn just for this...
# should extract just the color_palette functionality.
import seaborn as sns
@@ -285,8 +286,10 @@ def plot_polar(self, data, n_top=3, overplot=False, labels=None,
alpha = 0.8
ax.set_ylim(data.values.min(), data.values.max())
d = data.iloc[:, i].values
- ax.fill(
- theta, d, ec='k', alpha=alpha, color=colors[i], linewidth=2)
+ ax.fill(theta, d, color=colors[i], alpha=alpha, ec='k',
+ linewidth=0)
+ ax.fill(theta, d, alpha=1.0, ec=colors[i],
+ linewidth=2, fill=False)
ax.set_xticks(theta)
ax.set_xticklabels(labels, fontsize=18)
[lab.set_fontsize(18) for lab in ax.get_yticklabels()]
@@ -4,7 +4,7 @@
import numpy as np
from scipy.stats import norm
from neurosynth.base import imageutils
-import stats
+from neurosynth.analysis import stats
from os.path import join, exists
from os import makedirs
@@ -1,6 +1,7 @@
""" Network analysis-related tools"""
from neurosynth.analysis import meta
+from six import string_types
def coactivation(dataset, seed, threshold=0.0, output_dir='.', prefix='', r=6):
@@ -29,7 +30,7 @@ def coactivation(dataset, seed, threshold=0.0, output_dir='.', prefix='', r=6):
meta.MetaAnalysis.
"""
- if isinstance(seed, basestring):
+ if isinstance(seed, string_types):
ids = dataset.get_studies(mask=seed, activation_threshold=threshold)
else:
ids = dataset.get_studies(peaks=seed, r=r,
@@ -7,11 +7,22 @@
import numpy as np
import pandas as pd
from scipy import sparse
-import mappable
+from neurosynth.base import mappable
from neurosynth.base import mask, imageutils, transformations
from neurosynth.base import lexparser as lp
from neurosynth.utils import deprecated
-import urllib2
+
+# For Python 2/3 compatibility
+from six import string_types
+from functools import reduce
+try:
+ from urllib2 import urlopen
+except ImportError:
+ from urllib.request import urlopen
+try:
+ import cPickle as pickle
+except:
+ import pickle
logger = logging.getLogger('neurosynth.dataset')
@@ -34,9 +45,8 @@ def download(path='.', url=None, unpack=False):
f = open(filename, 'wb')
- u = urllib2.urlopen(url)
- meta = u.info()
- file_size = int(meta.getheaders("Content-Length")[0])
+ u = urlopen(url)
+ file_size = int(u.headers["Content-Length"][0])
print("Downloading the latest Neurosynth files: {0} bytes: {1}".format(
url, file_size))
@@ -514,8 +524,7 @@ def get_feature_counts(self, threshold=0.001):
@classmethod
def load(cls, filename):
""" Load a pickled Dataset instance from file. """
- import cPickle
- dataset = cPickle.load(open(filename, 'rb'))
+ dataset = pickle.load(open(filename, 'rb'))
if hasattr(dataset, 'feature_table'):
dataset.feature_table._csr_to_sdf()
return dataset
@@ -534,8 +543,7 @@ def save(self, filename, keep_mappables=False):
if hasattr(self, 'feature_table'):
self.feature_table._sdf_to_csr()
- import cPickle
- cPickle.dump(self, open(filename, 'wb'), -1)
+ pickle.dump(self, open(filename, 'wb'), -1)
if hasattr(self, 'feature_table'):
self.feature_table._csr_to_sdf()
@@ -660,8 +668,7 @@ def save_images_to_file(self, ids, outroot='./'):
pass
def save(self, filename):
- import cPickle
- cPickle.dump(self, open(filename, 'wb'), -1)
+ pickle.dump(self, open(filename, 'wb'), -1)
class FeatureTable(object):
@@ -703,7 +710,7 @@ def add_features(self, features, merge='outer', duplicates='ignore',
threshold (float): minimum frequency threshold each study must
exceed in order to count towards min_studies.
"""
- if isinstance(features, basestring):
+ if isinstance(features, string_types):
if not os.path.exists(features):
raise ValueError("%s cannot be found." % features)
try:
@@ -840,7 +847,7 @@ def search_features(self, search):
Returns:
A list of matching feature names.
'''
- if isinstance(search, basestring):
+ if isinstance(search, string_types):
search = [search]
search = [s.replace('*', '.*') for s in search]
cols = list(self.data.columns)
@@ -5,6 +5,7 @@
import nibabel as nb
from nibabel import nifti1
import numpy as np
+from six import string_types
logger = logging.getLogger('neurosynth.imageutils')
@@ -50,7 +51,7 @@ def load_imgs(filenames, masker, nan_to_num=True):
An m x n 2D numpy array, where m = number of voxels in mask and
n = number of images passed.
"""
- if isinstance(filenames, basestring):
+ if isinstance(filenames, string_types):
filenames = [filenames]
data = np.zeros((masker.n_vox_in_mask, len(filenames)))
for i, f in enumerate(filenames):
@@ -113,7 +114,7 @@ def create_grid(image, scale=4, apply_mask=True, save_file=None):
A nibabel image with the same dimensions as the input image. All voxels
in each cell in the 3D grid are assigned the same non-zero label.
"""
- if isinstance(image, basestring):
+ if isinstance(image, string_types):
image = nb.load(image)
# create a list of cluster centers
@@ -128,17 +129,17 @@ def create_grid(image, scale=4, apply_mask=True, save_file=None):
# factor
grid = np.zeros(image.shape)
for (i, (x, y, z)) in enumerate(centers):
- for mov_x in range((-scale+1)/2, (scale+1)/2):
- for mov_y in range((-scale+1)/2, (scale+1)/2):
- for mov_z in range((-scale+1)/2, (scale+1)/2):
+ for mov_x in range((-scale+1)//2, (scale+1)//2):
+ for mov_y in range((-scale+1)//2, (scale+1)//2):
+ for mov_z in range((-scale+1)//2, (scale+1)//2):
try: # Ignore voxels outside bounds of image
grid[x+mov_x, y+mov_y, z+mov_z] = i+1
except:
pass
if apply_mask:
mask = image
- if isinstance(mask, basestring):
+ if isinstance(mask, string_types):
mask = nb.load(mask)
if type(mask).__module__ != np.__name__:
mask = mask.get_data()
@@ -50,7 +50,7 @@ def test(self, data):
tok = self.lexer.token()
if not tok:
break # No more input
- print tok
+ print(tok)
class Parser(object):
@@ -109,7 +109,7 @@ def build(self, **kwargs):
self.parser = yacc.yacc(module=self, **kwargs)
def p_error(self, p):
- print p
+ print(p)
def parse(self, input):
return self.parser.parse(input)
@@ -0,0 +1,10 @@
+# lextab.py. This file automatically created by PLY (version 3.6). Don't edit!
+_tabversion = '3.5'
+_lextokens = {'ANDNOT', 'LT', 'RPAR', 'LPAR', 'OR', 'WORD', 'FLOAT', 'AND', 'RT'}
+_lexreflags = 0
+_lexliterals = ''
+_lexstateinfo = {'INITIAL': 'inclusive'}
+_lexstatere = {'INITIAL': [('(?P<t_WORD>[a-zA-Z\\_\\-\\*]+)|(?P<t_FLOAT>[0-9\\.]+)|(?P<t_ANDNOT>\\&\\~)|(?P<t_AND>\\&)|(?P<t_LPAR>\\()|(?P<t_RPAR>\\))|(?P<t_RT>\\>)|(?P<t_LT>\\<)|(?P<t_OR>\\|)', [None, ('t_WORD', 'WORD'), ('t_FLOAT', 'FLOAT'), (None, 'ANDNOT'), (None, 'AND'), (None, 'LPAR'), (None, 'RPAR'), (None, 'RT'), (None, 'LT'), (None, 'OR')])]}
+_lexstateignore = {'INITIAL': ' \t'}
+_lexstateerrorf = {'INITIAL': 't_error'}
+_lexstateeoff = {}
@@ -1,6 +1,6 @@
import numpy as np
import nibabel as nb
-
+from six import string_types
class Masker(object):
@@ -15,7 +15,7 @@ def __init__(self, volume, layers=None):
an image filename or a NiBabel image.
layers: Optional masking layers to add; see docstring for add().
"""
- if isinstance(volume, basestring):
+ if isinstance(volume, string_types):
volume = nb.load(volume)
self.volume = volume
data = self.volume.get_data()
@@ -77,7 +77,7 @@ def remove(self, layers):
if not isinstance(layers, list):
layers = [layers]
for l in layers:
- if isinstance(l, basestring):
+ if isinstance(l, string_types):
if l not in self.layers:
raise ValueError("There's no image/layer named '%s' in "
"the masking stack!" % l)
@@ -103,7 +103,7 @@ def get_image(self, image, output='vector'):
'image': A NiBabel image
Returns: An object containing image data; see output options above.
"""
- if isinstance(image, basestring):
+ if isinstance(image, string_types):
image = nb.load(image)
if type(image).__module__.startswith('nibabel'):
@@ -210,7 +210,7 @@ def get_mask(self, layers=None, output='vector', in_global_mask=True):
elif not isinstance(layers, list):
layers = [layers]
- layers = map(lambda x: x if isinstance(x, basestring)
+ layers = map(lambda x: x if isinstance(x, string_types)
else self.stack[x], layers)
layers = [self.layers[l] for l in layers if l in self.layers]
Oops, something went wrong.

0 comments on commit 1953c17

Please sign in to comment.