diff --git a/scipy/__init__.py b/scipy/__init__.py index 008ae2dfc743..2c48fcd10925 100644 --- a/scipy/__init__.py +++ b/scipy/__init__.py @@ -58,6 +58,7 @@ __numpy_version__ --- Numpy version string """ +from __future__ import division, print_function, absolute_import __all__ = ['test'] diff --git a/scipy/cluster/__init__.py b/scipy/cluster/__init__.py index 0a0e82ccca79..5defee81deb0 100644 --- a/scipy/cluster/__init__.py +++ b/scipy/cluster/__init__.py @@ -20,10 +20,11 @@ to generate flat clusters, and visualizing clusters with dendrograms. """ +from __future__ import division, print_function, absolute_import __all__ = ['vq', 'hierarchy'] -import vq, hierarchy +from . import vq, hierarchy from numpy.testing import Tester test = Tester().test diff --git a/scipy/cluster/doc/ex1.py b/scipy/cluster/doc/ex1.py index 22121d2bc13a..38923aaf90df 100755 --- a/scipy/cluster/doc/ex1.py +++ b/scipy/cluster/doc/ex1.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from scipy import * from scipy.cluster import vq @@ -33,5 +35,5 @@ def cluster_data(data,cluster_cnt,iter=20,thresh=1e-5): clusters = cluster_data(data,2) for i in range(len(clusters)): - print 'cluster %d:' % i - print clusters[i] + print('cluster %d:' % i) + print(clusters[i]) diff --git a/scipy/cluster/hierarchy.py b/scipy/cluster/hierarchy.py index 451134223ea7..f43618e64233 100644 --- a/scipy/cluster/hierarchy.py +++ b/scipy/cluster/hierarchy.py @@ -130,6 +130,7 @@ * Mathematica is a registered trademark of The Wolfram Research, Inc. """ +from __future__ import division, print_function, absolute_import # Copyright (C) Damian Eads, 2007-2008. New BSD License. @@ -172,9 +173,11 @@ import warnings import numpy as np -import _hierarchy_wrap +from . import _hierarchy_wrap import scipy.spatial.distance as distance +from scipy.lib.six import string_types +from scipy.lib.six.moves import xrange _cpy_non_euclid_methods = {'single': 0, 'complete': 1, 'average': 2, 'weighted': 6} @@ -1231,7 +1234,7 @@ def is_valid_im(R, warning=False, throw=False, name=None): else: raise ValueError('Inconsistency matrix contains negative ' 'link counts.') - except Exception, e: + except Exception as e: if throw: raise if warning: @@ -1345,7 +1348,7 @@ def is_valid_linkage(Z, warning=False, throw=False, name=None): # % name) # else: # raise ValueError('Linkage does not use all clusters.') - except Exception, e: + except Exception as e: if throw: raise if warning: @@ -1811,7 +1814,7 @@ def _plot_dendrogram(icoords, dcoords, ivl, p, n, mh, orientation, for color in colors_used: color_to_lines[color] = [] for (xline, yline, color) in zip(xlines, ylines, color_list): - color_to_lines[color].append(zip(xline, yline)) + color_to_lines[color].append(list(zip(xline, yline))) colors_to_collections = {} # Construct the collections. @@ -1874,9 +1877,9 @@ def set_link_color_palette(palette): """ - if type(palette) not in (types.ListType, types.TupleType): + if type(palette) not in (list, tuple): raise TypeError("palette must be a list or tuple") - _ptypes = [type(p) == types.StringType for p in palette] + _ptypes = [isinstance(p, string_types) for p in palette] if False in _ptypes: raise TypeError("all palette list elements must be color strings") @@ -2088,7 +2091,7 @@ def llf(id): is_valid_linkage(Z, throw=True, name='Z') Zs = Z.shape n = Zs[0] + 1 - if type(p) in (types.IntType, types.FloatType): + if type(p) in (int, float): p = int(p) else: raise TypeError('The second argument must be a number') @@ -2117,7 +2120,7 @@ def llf(id): else: ivl = [] if color_threshold is None or \ - (type(color_threshold) == types.StringType and + (isinstance(color_threshold, string_types) and color_threshold == 'default'): color_threshold = max(Z[:, 2]) * 0.7 R = {'icoord': icoord_list, 'dcoord': dcoord_list, 'ivl': ivl, @@ -2454,7 +2457,7 @@ def _dendrogram_calculate_info(Z, p, truncate_mode, \ dcoord_list.append([uah, h, h, ubh]) if link_color_func is not None: v = link_color_func(int(i)) - if type(v) != types.StringType: + if not isinstance(v, string_types): raise TypeError("link_color_func must return a matplotlib " "color string!") color_list.append(v) @@ -2501,7 +2504,7 @@ def is_isomorphic(T1, T2): n = T1S[0] d = {} for i in xrange(0, n): - if T1[i] in d.keys(): + if T1[i] in list(d.keys()): if d[T1[i]] != T2[i]: return False else: @@ -2602,7 +2605,7 @@ def maxRstat(Z, R, i): R = np.asarray(R, order='c') is_valid_linkage(Z, throw=True, name='Z') is_valid_im(R, throw=True, name='R') - if type(i) is not types.IntType: + if type(i) is not int: raise TypeError('The third argument must be an integer.') if i < 0 or i > 3: raise ValueError('i must be an integer between 0 and 3 inclusive.') @@ -2711,13 +2714,13 @@ def _leader_identify(tr, T): right = tr.get_right() lfid = _leader_identify(left, T) rfid = _leader_identify(right, T) - print 'ndid: %d lid: %d lfid: %d rid: %d rfid: %d' \ - % (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid) + print('ndid: %d lid: %d lfid: %d rid: %d rfid: %d' \ + % (tr.get_id(), left.get_id(), lfid, right.get_id(), rfid)) if lfid != rfid: if lfid != -1: - print 'leader: %d with tag %d' % (left.id, lfid) + print('leader: %d with tag %d' % (left.id, lfid)) if rfid != -1: - print 'leader: %d with tag %d' % (right.id, rfid) + print('leader: %d with tag %d' % (right.id, rfid)) return -1 else: return lfid diff --git a/scipy/cluster/setup.py b/scipy/cluster/setup.py index 715bbac5de80..d16f3f19ae6d 100755 --- a/scipy/cluster/setup.py +++ b/scipy/cluster/setup.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + import sys from os.path import join diff --git a/scipy/cluster/setupscons.py b/scipy/cluster/setupscons.py index 069449d2a178..8e2462f58d08 100755 --- a/scipy/cluster/setupscons.py +++ b/scipy/cluster/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/cluster/tests/test_hierarchy.py b/scipy/cluster/tests/test_hierarchy.py index 1333a4f94e2c..dcd9d88d8338 100644 --- a/scipy/cluster/tests/test_hierarchy.py +++ b/scipy/cluster/tests/test_hierarchy.py @@ -32,12 +32,15 @@ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from __future__ import division, print_function, absolute_import import os.path import numpy as np from numpy.testing import TestCase, run_module_suite +from scipy.lib.six.moves import xrange + from scipy.cluster.hierarchy import linkage, from_mlab_linkage, to_mlab_linkage,\ num_obs_linkage, inconsistent, cophenet, fclusterdata, fcluster, \ is_isomorphic, single, complete, weighted, centroid, leaders, \ @@ -790,7 +793,7 @@ def test_correspond_2_and_up(self): def test_correspond_4_and_up(self): "Tests correspond(Z, y) on linkage and CDMs over observation sets of different sizes. Correspondance should be false." - for (i, j) in zip(range(2, 4), range(3, 5)) + zip(range(3, 5), range(2, 4)): + for (i, j) in list(zip(list(range(2, 4)), list(range(3, 5)))) + list(zip(list(range(3, 5)), list(range(2, 4)))): y = np.random.rand(i*(i-1)/2) y2 = np.random.rand(j*(j-1)/2) Z = linkage(y) @@ -800,7 +803,7 @@ def test_correspond_4_and_up(self): def test_correspond_4_and_up_2(self): "Tests correspond(Z, y) on linkage and CDMs over observation sets of different sizes. Correspondance should be false." - for (i, j) in zip(range(2, 7), range(16, 21)) + zip(range(2, 7), range(16, 21)): + for (i, j) in list(zip(list(range(2, 7)), list(range(16, 21)))) + list(zip(list(range(2, 7)), list(range(16, 21)))): y = np.random.rand(i*(i-1)/2) y2 = np.random.rand(j*(j-1)/2) Z = linkage(y) diff --git a/scipy/cluster/tests/test_vq.py b/scipy/cluster/tests/test_vq.py index 4bc007503f8f..5d23a9054279 100644 --- a/scipy/cluster/tests/test_vq.py +++ b/scipy/cluster/tests/test_vq.py @@ -2,6 +2,7 @@ # David Cournapeau # Last Change: Wed Nov 05 07:00 PM 2008 J +from __future__ import division, print_function, absolute_import import os.path import warnings @@ -16,7 +17,7 @@ from scipy.cluster import _vq TESTC=True except ImportError: - print "== Error while importing _vq, not testing C imp of vq ==" + print("== Error while importing _vq, not testing C imp of vq ==") TESTC=False #Optional: @@ -59,7 +60,7 @@ def test_vq(self): assert_array_equal(label1, LABEL1) tlabel1, tdist = vq(X, initc) else: - print "== not testing C imp of vq ==" + print("== not testing C imp of vq ==") #def test_py_vq_1d(self): # """Test special rank 1 vq algo, python implementation.""" @@ -82,7 +83,7 @@ def test_vq_1d(self): assert_array_equal(a, ta) assert_array_equal(b, tb) else: - print "== not testing C imp of vq (rank 1) ==" + print("== not testing C imp of vq (rank 1) ==") def test__vq_sametype(self): if TESTC: diff --git a/scipy/cluster/tests/vq_test.py b/scipy/cluster/tests/vq_test.py index f215a5c10138..8cfbb41ce519 100755 --- a/scipy/cluster/tests/vq_test.py +++ b/scipy/cluster/tests/vq_test.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.cluster import vq @@ -28,7 +30,7 @@ def read_data(name): f = open(name,'r') data = [] for line in f.readlines(): - data.append(map(float,string.split(line))) + data.append(list(map(float,string.split(line)))) f.close() return array(data) diff --git a/scipy/cluster/vq.py b/scipy/cluster/vq.py index 420ad18a5a47..80192e7b95f6 100644 --- a/scipy/cluster/vq.py +++ b/scipy/cluster/vq.py @@ -67,6 +67,8 @@ code book. """ +from __future__ import division, print_function, absolute_import + __docformat__ = 'restructuredtext' __all__ = ['whiten', 'vq', 'kmeans', 'kmeans2'] @@ -188,7 +190,7 @@ def vq(obs, code_book): """ try: - import _vq + from . import _vq ct = common_type(obs, code_book) c_obs = obs.astype(ct) c_code_book = code_book.astype(ct) @@ -289,7 +291,7 @@ def _py_vq_1d(obs, code_book): dist = np.zeros((n, nc)) for i in range(nc): dist[:, i] = np.sum(obs - code_book[i]) - print dist + print(dist) code = argmin(dist) min_dist = dist[code] diff --git a/scipy/constants/__init__.py b/scipy/constants/__init__.py index dcfb865156b7..aea9df2c2937 100644 --- a/scipy/constants/__init__.py +++ b/scipy/constants/__init__.py @@ -282,11 +282,12 @@ http://physics.nist.gov/cuu/Constants/index.html """ +from __future__ import division, print_function, absolute_import # Modules contributed by BasSw (wegwerp@gmail.com) -from codata import * -from constants import * -from codata import _obsolete_constants +from .codata import * +from .constants import * +from .codata import _obsolete_constants _constant_names = [(_k.lower(), _k, _v) for _k, _v in physical_constants.items() @@ -297,6 +298,6 @@ __doc__ = __doc__ % dict(constant_names=_constant_names) del _constant_names -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test diff --git a/scipy/constants/codata.py b/scipy/constants/codata.py index af1d58e7d89a..d63a35398cad 100644 --- a/scipy/constants/codata.py +++ b/scipy/constants/codata.py @@ -21,6 +21,8 @@ http://physics.nist.gov/cuu/Constants/ """ +from __future__ import division, print_function, absolute_import + import warnings from math import pi, sqrt @@ -801,16 +803,16 @@ def parse_constants(d): # check obsolete values _obsolete_constants = {} -for k in physical_constants.iterkeys(): +for k in physical_constants.keys(): if k not in _current_constants: _obsolete_constants[k] = True # generate some additional aliases _aliases = {} -for k in _physical_constants_2002.iterkeys(): +for k in _physical_constants_2002.keys(): if 'magn.' in k: _aliases[k] = k.replace('magn.', 'mag.') -for k in _physical_constants_2006.iterkeys(): +for k in _physical_constants_2006.keys(): if 'momentum' in k: _aliases[k] = k.replace('momentum', 'mom.um') @@ -935,7 +937,7 @@ def find(sub=None, disp=False): """ if sub is None: - result = _current_constants.keys() + result = list(_current_constants.keys()) else: result = [key for key in _current_constants \ if sub.lower() in key.lower()] @@ -943,7 +945,7 @@ def find(sub=None, disp=False): result.sort() if disp: for key in result: - print key + print(key) return else: return result diff --git a/scipy/constants/constants.py b/scipy/constants/constants.py index e1cb7acab81d..599ba4e013a4 100644 --- a/scipy/constants/constants.py +++ b/scipy/constants/constants.py @@ -6,6 +6,7 @@ The list is not meant to be comprehensive, but just a convenient list for everyday use. """ +from __future__ import division, print_function, absolute_import """ BasSw 2006 @@ -17,7 +18,7 @@ """ import math as _math -from codata import value as _cd +from .codata import value as _cd import numpy as _np #mathematical constants diff --git a/scipy/constants/setup.py b/scipy/constants/setup.py index 5c1e2d806868..adc42a8fddb2 100644 --- a/scipy/constants/setup.py +++ b/scipy/constants/setup.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import def configuration(parent_package='', top_path=None): diff --git a/scipy/constants/tests/test_codata.py b/scipy/constants/tests/test_codata.py index 9cffbee79ee9..63086cafed49 100644 --- a/scipy/constants/tests/test_codata.py +++ b/scipy/constants/tests/test_codata.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import warnings from scipy.constants import constants, codata, find, value diff --git a/scipy/constants/tests/test_constants.py b/scipy/constants/tests/test_constants.py index bd6a10227191..a40e60be9c94 100644 --- a/scipy/constants/tests/test_constants.py +++ b/scipy/constants/tests/test_constants.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy.testing import run_module_suite, assert_equal import scipy.constants as sc diff --git a/scipy/fftpack/__init__.py b/scipy/fftpack/__init__.py index 96263cbb0e9b..2af338b593ad 100644 --- a/scipy/fftpack/__init__.py +++ b/scipy/fftpack/__init__.py @@ -80,6 +80,8 @@ destroy_zfftnd_cache """ +from __future__ import division, print_function, absolute_import + __all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', 'fft2','ifft2', @@ -90,18 +92,18 @@ 'rfftfreq' ] -from fftpack_version import fftpack_version as __version__ +from .fftpack_version import fftpack_version as __version__ -from basic import * -from pseudo_diffs import * -from helper import * +from .basic import * +from .pseudo_diffs import * +from .helper import * from numpy.dual import register_func for k in ['fft', 'ifft', 'fftn', 'ifftn', 'fft2', 'ifft2']: register_func(k, eval(k)) del k, register_func -from realtransforms import * +from .realtransforms import * __all__.extend(['dct', 'idct', 'dst', 'idst']) from numpy.testing import Tester diff --git a/scipy/fftpack/basic.py b/scipy/fftpack/basic.py index a4eca06635c9..a798f12b6a38 100644 --- a/scipy/fftpack/basic.py +++ b/scipy/fftpack/basic.py @@ -2,13 +2,14 @@ Discrete Fourier Transforms - basic.py """ # Created by Pearu Peterson, August,September 2002 +from __future__ import division, print_function, absolute_import __all__ = ['fft','ifft','fftn','ifftn','rfft','irfft', 'fft2','ifft2'] from numpy import zeros, swapaxes import numpy -import _fftpack +from . import _fftpack import atexit atexit.register(_fftpack.destroy_zfft_cache) @@ -75,7 +76,7 @@ def _fake_rfft(x, n, *a, **kw): return _fftpack.drfft(x, n, *a, **kw).astype(numpy.float32) def _fake_cfftnd(x, shape, *a, **kw): - if numpy.all(map(_is_safe_size, shape)): + if numpy.all(list(map(_is_safe_size, shape))): return _fftpack.cfftnd(x, shape, *a, **kw) else: return _fftpack.zfftnd(x, shape, *a, **kw).astype(numpy.complex64) @@ -421,7 +422,7 @@ def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function): s = tuple(s) if axes is None: noaxes = True - axes = range(-x.ndim, 0) + axes = list(range(-x.ndim, 0)) else: noaxes = False if len(axes) != len(s): @@ -450,7 +451,7 @@ def _raw_fftnd(x, s, axes, direction, overwrite_x, work_function): # We can now operate on the axes waxes, the p last axes (p = len(axes)), by # fixing the shape of the input array to 1 for any axis the fft is not # carried upon. - waxes = range(x.ndim - len(axes), x.ndim) + waxes = list(range(x.ndim - len(axes), x.ndim)) shape = numpy.ones(x.ndim) shape[waxes] = s diff --git a/scipy/fftpack/benchmarks/bench_basic.py b/scipy/fftpack/benchmarks/bench_basic.py index 16d6d9716bad..7b090f2f72bc 100644 --- a/scipy/fftpack/benchmarks/bench_basic.py +++ b/scipy/fftpack/benchmarks/bench_basic.py @@ -1,5 +1,8 @@ """ Test functions for fftpack.basic module """ + +from __future__ import division, print_function, absolute_import + import sys from numpy.testing import * from scipy.fftpack import ifft, fft, fftn, irfft, rfft @@ -8,6 +11,7 @@ import numpy.fft from numpy.random import rand + def random(size): return rand(*size) @@ -34,13 +38,13 @@ class TestFft(TestCase): def bench_random(self): from numpy.fft import fft as numpy_fft - print - print ' Fast Fourier Transform' - print '=================================================' - print ' | real input | complex input ' - print '-------------------------------------------------' - print ' size | scipy | numpy | scipy | numpy ' - print '-------------------------------------------------' + print() + print(' Fast Fourier Transform') + print('=================================================') + print(' | real input | complex input ') + print('-------------------------------------------------') + print(' size | scipy | numpy | scipy | numpy ') + print('-------------------------------------------------') for size,repeat in [(100,7000),(1000,2000), (256,10000), (512,10000), @@ -49,7 +53,7 @@ def bench_random(self): (2048*2,500), (2048*4,500), ]: - print '%5s' % size, + print('%5s' % size, end=' ') sys.stdout.flush() for x in [random([size]).astype(double), @@ -58,27 +62,27 @@ def bench_random(self): if size > 500: y = fft(x) else: y = direct_dft(x) assert_array_almost_equal(fft(x),y) - print '|%8.2f' % measure('fft(x)',repeat), + print('|%8.2f' % measure('fft(x)',repeat), end=' ') sys.stdout.flush() assert_array_almost_equal(numpy_fft(x),y) - print '|%8.2f' % measure('numpy_fft(x)',repeat), + print('|%8.2f' % measure('numpy_fft(x)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) sys.stdout.flush() class TestIfft(TestCase): def bench_random(self): from numpy.fft import ifft as numpy_ifft - print - print ' Inverse Fast Fourier Transform' - print '===============================================' - print ' | real input | complex input ' - print '-----------------------------------------------' - print ' size | scipy | numpy | scipy | numpy ' - print '-----------------------------------------------' + print() + print(' Inverse Fast Fourier Transform') + print('===============================================') + print(' | real input | complex input ') + print('-----------------------------------------------') + print(' size | scipy | numpy | scipy | numpy ') + print('-----------------------------------------------') for size,repeat in [(100,7000),(1000,2000), (256,10000), (512,10000), @@ -87,7 +91,7 @@ def bench_random(self): (2048*2,500), (2048*4,500), ]: - print '%5s' % size, + print('%5s' % size, end=' ') sys.stdout.flush() for x in [random([size]).astype(double), @@ -96,25 +100,25 @@ def bench_random(self): if size > 500: y = ifft(x) else: y = direct_idft(x) assert_array_almost_equal(ifft(x),y) - print '|%8.2f' % measure('ifft(x)',repeat), + print('|%8.2f' % measure('ifft(x)',repeat), end=' ') sys.stdout.flush() assert_array_almost_equal(numpy_ifft(x),y) - print '|%8.2f' % measure('numpy_ifft(x)',repeat), + print('|%8.2f' % measure('numpy_ifft(x)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) sys.stdout.flush() class TestRfft(TestCase): def bench_random(self): from numpy.fft import rfft as numpy_rfft - print - print 'Fast Fourier Transform (real data)' - print '==================================' - print ' size | scipy | numpy ' - print '----------------------------------' + print() + print('Fast Fourier Transform (real data)') + print('==================================') + print(' size | scipy | numpy ') + print('----------------------------------') for size,repeat in [(100,7000),(1000,2000), (256,10000), (512,10000), @@ -123,17 +127,17 @@ def bench_random(self): (2048*2,500), (2048*4,500), ]: - print '%5s' % size, + print('%5s' % size, end=' ') sys.stdout.flush() x = random([size]).astype(double) - print '|%8.2f' % measure('rfft(x)',repeat), + print('|%8.2f' % measure('rfft(x)',repeat), end=' ') sys.stdout.flush() - print '|%8.2f' % measure('numpy_rfft(x)',repeat), + print('|%8.2f' % measure('numpy_rfft(x)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) sys.stdout.flush() class TestIrfft(TestCase): @@ -141,11 +145,11 @@ class TestIrfft(TestCase): def bench_random(self): from numpy.fft import irfft as numpy_irfft - print - print 'Inverse Fast Fourier Transform (real data)' - print '==================================' - print ' size | scipy | numpy ' - print '----------------------------------' + print() + print('Inverse Fast Fourier Transform (real data)') + print('==================================') + print(' size | scipy | numpy ') + print('----------------------------------') for size,repeat in [(100,7000),(1000,2000), (256,10000), (512,10000), @@ -154,7 +158,7 @@ def bench_random(self): (2048*2,500), (2048*4,500), ]: - print '%5s' % size, + print('%5s' % size, end=' ') sys.stdout.flush() x = random([size]).astype(double) @@ -166,14 +170,14 @@ def bench_random(self): x1[-1] = x[-1] y = irfft(x) - print '|%8.2f' % measure('irfft(x)',repeat), + print('|%8.2f' % measure('irfft(x)',repeat), end=' ') sys.stdout.flush() assert_array_almost_equal(numpy_irfft(x1,size),y) - print '|%8.2f' % measure('numpy_irfft(x1,size)',repeat), + print('|%8.2f' % measure('numpy_irfft(x1,size)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) sys.stdout.flush() @@ -181,18 +185,18 @@ class TestFftn(TestCase): def bench_random(self): from numpy.fft import fftn as numpy_fftn - print - print ' Multi-dimensional Fast Fourier Transform' - print '===================================================' - print ' | real input | complex input ' - print '---------------------------------------------------' - print ' size | scipy | numpy | scipy | numpy ' - print '---------------------------------------------------' + print() + print(' Multi-dimensional Fast Fourier Transform') + print('===================================================') + print(' | real input | complex input ') + print('---------------------------------------------------') + print(' size | scipy | numpy | scipy | numpy ') + print('---------------------------------------------------') for size,repeat in [((100,100),100),((1000,100),7), ((256,256),10), ((512,512),3), ]: - print '%9s' % ('%sx%s'%size), + print('%9s' % ('%sx%s'%size), end=' ') sys.stdout.flush() for x in [random(size).astype(double), @@ -202,14 +206,14 @@ def bench_random(self): #if size > 500: y = fftn(x) #else: y = direct_dft(x) assert_array_almost_equal(fftn(x),y) - print '|%8.2f' % measure('fftn(x)',repeat), + print('|%8.2f' % measure('fftn(x)',repeat), end=' ') sys.stdout.flush() assert_array_almost_equal(numpy_fftn(x),y) - print '|%8.2f' % measure('numpy_fftn(x)',repeat), + print('|%8.2f' % measure('numpy_fftn(x)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) sys.stdout.flush() diff --git a/scipy/fftpack/benchmarks/bench_pseudo_diffs.py b/scipy/fftpack/benchmarks/bench_pseudo_diffs.py index ddb1fbc669ee..1519c3e6f796 100644 --- a/scipy/fftpack/benchmarks/bench_pseudo_diffs.py +++ b/scipy/fftpack/benchmarks/bench_pseudo_diffs.py @@ -1,5 +1,8 @@ """ Benchmark functions for fftpack.pseudo_diffs module """ + +from __future__ import division, print_function, absolute_import + import sys from numpy import arange, sin, cos, pi, exp, tanh, sign @@ -55,11 +58,11 @@ def direct_shift(x,a,period=None): class TestDiff(TestCase): def bench_random(self): - print - print 'Differentiation of periodic functions' - print '=====================================' - print ' size | convolve | naive' - print '-------------------------------------' + print() + print('Differentiation of periodic functions') + print('=====================================') + print(' size | convolve | naive') + print('-------------------------------------') for size,repeat in [(100,1500),(1000,300), (256,1500), (512,1000), @@ -68,7 +71,7 @@ def bench_random(self): (2048*2,100), (2048*4,50), ]: - print '%6s' % size, + print('%6s' % size, end=' ') sys.stdout.flush() x = arange (size)*2*pi/size if size<2000: @@ -77,21 +80,21 @@ def bench_random(self): f = sin(x)*cos(4*x) assert_array_almost_equal(diff(f,1),direct_diff(f,1)) assert_array_almost_equal(diff(f,2),direct_diff(f,2)) - print '| %9.2f' % measure('diff(f,3)',repeat), + print('| %9.2f' % measure('diff(f,3)',repeat), end=' ') sys.stdout.flush() - print '| %9.2f' % measure('direct_diff(f,3)',repeat), + print('| %9.2f' % measure('direct_diff(f,3)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) class TestTilbert(TestCase): def bench_random(self): - print - print ' Tilbert transform of periodic functions' - print '=========================================' - print ' size | optimized | naive' - print '-----------------------------------------' + print() + print(' Tilbert transform of periodic functions') + print('=========================================') + print(' size | optimized | naive') + print('-----------------------------------------') for size,repeat in [(100,1500),(1000,300), (256,1500), (512,1000), @@ -100,7 +103,7 @@ def bench_random(self): (2048*2,100), (2048*4,50), ]: - print '%6s' % size, + print('%6s' % size, end=' ') sys.stdout.flush() x = arange (size)*2*pi/size if size<2000: @@ -108,21 +111,21 @@ def bench_random(self): else: f = sin(x)*cos(4*x) assert_array_almost_equal(tilbert(f,1),direct_tilbert(f,1)) - print '| %9.2f' % measure('tilbert(f,1)',repeat), + print('| %9.2f' % measure('tilbert(f,1)',repeat), end=' ') sys.stdout.flush() - print '| %9.2f' % measure('direct_tilbert(f,1)',repeat), + print('| %9.2f' % measure('direct_tilbert(f,1)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) class TestHilbert(TestCase): def bench_random(self): - print - print ' Hilbert transform of periodic functions' - print '=========================================' - print ' size | optimized | naive' - print '-----------------------------------------' + print() + print(' Hilbert transform of periodic functions') + print('=========================================') + print(' size | optimized | naive') + print('-----------------------------------------') for size,repeat in [(100,1500),(1000,300), (256,1500), (512,1000), @@ -131,7 +134,7 @@ def bench_random(self): (2048*2,100), (2048*4,50), ]: - print '%6s' % size, + print('%6s' % size, end=' ') sys.stdout.flush() x = arange (size)*2*pi/size if size<2000: @@ -139,21 +142,21 @@ def bench_random(self): else: f = sin(x)*cos(4*x) assert_array_almost_equal(hilbert(f),direct_hilbert(f)) - print '| %9.2f' % measure('hilbert(f)',repeat), + print('| %9.2f' % measure('hilbert(f)',repeat), end=' ') sys.stdout.flush() - print '| %9.2f' % measure('direct_hilbert(f)',repeat), + print('| %9.2f' % measure('direct_hilbert(f)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) class TestShift(TestCase): def bench_random(self): - print - print ' Shifting periodic functions' - print '==============================' - print ' size | optimized | naive' - print '------------------------------' + print() + print(' Shifting periodic functions') + print('==============================') + print(' size | optimized | naive') + print('------------------------------') for size,repeat in [(100,1500),(1000,300), (256,1500), (512,1000), @@ -162,7 +165,7 @@ def bench_random(self): (2048*2,100), (2048*4,50), ]: - print '%6s' % size, + print('%6s' % size, end=' ') sys.stdout.flush() x = arange (size)*2*pi/size a = 1 @@ -174,11 +177,11 @@ def bench_random(self): sf = sin(x+a)*cos(4*(x+a)) assert_array_almost_equal(direct_shift(f,1),sf) assert_array_almost_equal(shift(f,1),sf) - print '| %9.2f' % measure('shift(f,a)',repeat), + print('| %9.2f' % measure('shift(f,a)',repeat), end=' ') sys.stdout.flush() - print '| %9.2f' % measure('direct_shift(f,a)',repeat), + print('| %9.2f' % measure('direct_shift(f,a)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) if __name__ == "__main__": run_module_suite() diff --git a/scipy/fftpack/fftpack_version.py b/scipy/fftpack/fftpack_version.py index 1c9e5ddebb7e..a8716ff960cd 100644 --- a/scipy/fftpack/fftpack_version.py +++ b/scipy/fftpack/fftpack_version.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + major = 0 minor = 4 micro = 3 diff --git a/scipy/fftpack/helper.py b/scipy/fftpack/helper.py index 6d7eeab9ac0c..e167be97c02e 100644 --- a/scipy/fftpack/helper.py +++ b/scipy/fftpack/helper.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + __all__ = ['fftshift', 'ifftshift', 'fftfreq', 'rfftfreq'] from numpy import arange diff --git a/scipy/fftpack/pseudo_diffs.py b/scipy/fftpack/pseudo_diffs.py index 9e37fff57573..5a7bf6b30fa0 100644 --- a/scipy/fftpack/pseudo_diffs.py +++ b/scipy/fftpack/pseudo_diffs.py @@ -2,6 +2,8 @@ Differential and pseudo-differential operators. """ # Created by Pearu Peterson, September 2002 +from __future__ import division, print_function, absolute_import + __all__ = ['diff', 'tilbert','itilbert','hilbert','ihilbert', @@ -9,7 +11,7 @@ 'shift'] from numpy import pi, asarray, sin, cos, sinh, cosh, tanh, iscomplexobj -import convolve +from . import convolve from scipy.fftpack.basic import _datacopied diff --git a/scipy/fftpack/realtransforms.py b/scipy/fftpack/realtransforms.py index 656997cf6189..3e9d4c2ef47c 100644 --- a/scipy/fftpack/realtransforms.py +++ b/scipy/fftpack/realtransforms.py @@ -1,6 +1,8 @@ """ Real spectrum tranforms (DCT, DST, MDCT) """ +from __future__ import division, print_function, absolute_import + __all__ = ['dct', 'idct', 'dst', 'idst'] diff --git a/scipy/fftpack/setup.py b/scipy/fftpack/setup.py index 97a9f9553aa0..92d681b7d825 100755 --- a/scipy/fftpack/setup.py +++ b/scipy/fftpack/setup.py @@ -1,5 +1,7 @@ #!/usr/bin/env python # Created by Pearu Peterson, August 2002 +from __future__ import division, print_function, absolute_import + from os.path import join diff --git a/scipy/fftpack/setupscons.py b/scipy/fftpack/setupscons.py index ebe3b535fa4e..d5e8494f58ca 100755 --- a/scipy/fftpack/setupscons.py +++ b/scipy/fftpack/setupscons.py @@ -1,5 +1,7 @@ #!/usr/bin/env python # Created by Pearu Peterson, August 2002 +from __future__ import division, print_function, absolute_import + from os.path import join @@ -15,7 +17,7 @@ def configuration(parent_package='',top_path=None): if __name__ == '__main__': from numpy.distutils.core import setup - from fftpack_version import fftpack_version + from .fftpack_version import fftpack_version setup(version=fftpack_version, description='fftpack - Discrete Fourier Transform package', author='Pearu Peterson', diff --git a/scipy/fftpack/tests/gen_fftw_ref.py b/scipy/fftpack/tests/gen_fftw_ref.py index ceba0dcc4813..eedf491f5477 100644 --- a/scipy/fftpack/tests/gen_fftw_ref.py +++ b/scipy/fftpack/tests/gen_fftw_ref.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from subprocess import Popen, PIPE, STDOUT import numpy as np diff --git a/scipy/fftpack/tests/gendata.py b/scipy/fftpack/tests/gendata.py index 981b958017c6..0a59c63e56fd 100644 --- a/scipy/fftpack/tests/gendata.py +++ b/scipy/fftpack/tests/gendata.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.io import loadmat diff --git a/scipy/fftpack/tests/test_basic.py b/scipy/fftpack/tests/test_basic.py index 4322757aac0a..fb808bbfa985 100644 --- a/scipy/fftpack/tests/test_basic.py +++ b/scipy/fftpack/tests/test_basic.py @@ -2,6 +2,8 @@ # Created by Pearu Peterson, September 2002 """ Test functions for fftpack.basic module """ +from __future__ import division, print_function, absolute_import + __usage__ = """ Build fftpack: python setup_fftpack.py build @@ -149,7 +151,7 @@ def _test_n_argument_complex(self): def test_djbfft(self): for i in range(2,14): n = 2**i - x = range(n) + x = list(range(n)) y = fftpack.zfft(x) y2 = numpy.fft.fft(x) assert_array_almost_equal(y,y2) @@ -201,7 +203,7 @@ def test_definition_real(self): def test_djbfft(self): for i in range(2,14): n = 2**i - x = range(n) + x = list(range(n)) y = fftpack.zfft(x,direction=-1) y2 = numpy.fft.ifft(x) assert_array_almost_equal(y,y2) @@ -285,7 +287,7 @@ def test_djbfft(self): from numpy.fft import fft as numpy_fft for i in range(2,14): n = 2**i - x = range(n) + x = list(range(n)) y2 = numpy_fft(x) y1 = zeros((n,),dtype=double) y1[0] = y2[0].real @@ -331,7 +333,7 @@ def test_djbfft(self): from numpy.fft import ifft as numpy_ifft for i in range(2,14): n = 2**i - x = range(n) + x = list(range(n)) x1 = zeros((n,),dtype=cdouble) x1[0] = x[0] for k in range(1, int(n/2)): diff --git a/scipy/fftpack/tests/test_helper.py b/scipy/fftpack/tests/test_helper.py index 19b7b53d63ce..efbfd0fb3ae9 100644 --- a/scipy/fftpack/tests/test_helper.py +++ b/scipy/fftpack/tests/test_helper.py @@ -2,6 +2,8 @@ # Created by Pearu Peterson, September 2002 """ Test functions for fftpack.helper module """ +from __future__ import division, print_function, absolute_import + __usage__ = """ Build fftpack: python setup_fftpack.py build diff --git a/scipy/fftpack/tests/test_pseudo_diffs.py b/scipy/fftpack/tests/test_pseudo_diffs.py index a39d0b03920d..cf4b87efb8dc 100644 --- a/scipy/fftpack/tests/test_pseudo_diffs.py +++ b/scipy/fftpack/tests/test_pseudo_diffs.py @@ -2,6 +2,8 @@ # Created by Pearu Peterson, September 2002 """ Test functions for fftpack.pseudo_diffs module """ +from __future__ import division, print_function, absolute_import + __usage__ = """ Build fftpack: python setup_fftpack.py build diff --git a/scipy/fftpack/tests/test_real_transforms.py b/scipy/fftpack/tests/test_real_transforms.py index d547d18e1c64..3fdb451047d8 100644 --- a/scipy/fftpack/tests/test_real_transforms.py +++ b/scipy/fftpack/tests/test_real_transforms.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + from os.path import join, dirname import numpy as np diff --git a/scipy/integrate/__init__.py b/scipy/integrate/__init__.py index 38e50484341b..202eb759796c 100644 --- a/scipy/integrate/__init__.py +++ b/scipy/integrate/__init__.py @@ -46,12 +46,13 @@ complex_ode -- Convert a complex-valued ODE to real-valued and integrate. """ +from __future__ import division, print_function, absolute_import -from quadrature import * -from odepack import * -from quadpack import * -from _ode import * +from .quadrature import * +from .odepack import * +from .quadpack import * +from ._ode import * -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test diff --git a/scipy/integrate/_ode.py b/scipy/integrate/_ode.py index 8b0d3a6614e9..42bb39af891f 100644 --- a/scipy/integrate/_ode.py +++ b/scipy/integrate/_ode.py @@ -37,6 +37,8 @@ class complex_ode real valued system. It supports the real valued solvers (i.e not zvode) and is an alternative to ode with the zvode solver, sometimes performing better. """ +from __future__ import division, print_function, absolute_import + # XXX: Integrators must have: # =========================== @@ -88,9 +90,9 @@ class complex_ode from numpy import asarray, array, zeros, int32, isscalar, real, imag -import vode as _vode -import _dop -import lsoda as _lsoda +from . import vode as _vode +from . import _dop +from . import lsoda as _lsoda #------------------------------------------------------------------------------ diff --git a/scipy/integrate/odepack.py b/scipy/integrate/odepack.py index 0fdf197a5ec5..77adfd8c8496 100644 --- a/scipy/integrate/odepack.py +++ b/scipy/integrate/odepack.py @@ -1,8 +1,9 @@ # Author: Travis Oliphant +from __future__ import division, print_function, absolute_import __all__ = ['odeint'] -import _odepack +from . import _odepack from copy import copy _msgs = {2: "Integration successful.", @@ -143,11 +144,11 @@ def odeint(func, y0, t, args=(), Dfun=None, col_deriv=0, full_output=0, full_output, rtol, atol, tcrit, h0, hmax, hmin, ixpr, mxstep, mxhnil, mxordn, mxords) if output[-1] < 0: - print _msgs[output[-1]] - print "Run with full_output = 1 to get quantitative information." + print(_msgs[output[-1]]) + print("Run with full_output = 1 to get quantitative information.") else: if printmessg: - print _msgs[output[-1]] + print(_msgs[output[-1]]) if full_output: output[1]['message'] = _msgs[output[-1]] diff --git a/scipy/integrate/quadpack.py b/scipy/integrate/quadpack.py index 88b4de7f8ad6..58a7016d81f9 100644 --- a/scipy/integrate/quadpack.py +++ b/scipy/integrate/quadpack.py @@ -1,6 +1,7 @@ # Author: Travis Oliphant 2001 +from __future__ import division, print_function, absolute_import -import _quadpack +from . import _quadpack import sys import numpy from numpy import inf, Inf @@ -347,13 +348,13 @@ def thefunc(x,*myargs): y = -x func = myargs[0] myargs = (y,) + myargs[1:] - return apply(func,myargs) + return func(*myargs) else: def thefunc(x,*myargs): y = -x func = myargs[0] myargs = (y,) + myargs[1:] - return -apply(func,myargs) + return -func(*myargs) args = (func,) + args return _quadpack._qawfe(thefunc,-b,wvar,integr,args,full_output,epsabs,limlst,limit,maxp1) else: diff --git a/scipy/integrate/quadrature.py b/scipy/integrate/quadrature.py index e6c51d84430f..7ce392a5a046 100644 --- a/scipy/integrate/quadrature.py +++ b/scipy/integrate/quadrature.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import __all__ = ['fixed_quad','quadrature','romberg','trapz','simps','romb', 'cumtrapz','newton_cotes'] @@ -10,6 +11,8 @@ import math import warnings +from scipy.lib.six.moves import xrange + class AccuracyWarning(Warning): pass @@ -451,8 +454,8 @@ def romb(y, dx=1.0, axis=-1, show=False): if show: if not isscalar(R[(1,1)]): - print "*** Printing table only supported for integrals" + \ - " of a single data set." + print("*** Printing table only supported for integrals" + \ + " of a single data set.") else: try: precis = show[0] @@ -464,13 +467,13 @@ def romb(y, dx=1.0, axis=-1, show=False): width = 8 formstr = "%" + str(width) + '.' + str(precis)+'f' - print "\n Richardson Extrapolation Table for Romberg Integration " - print "====================================================================" + print("\n Richardson Extrapolation Table for Romberg Integration ") + print("====================================================================") for i in range(1,k+1): for j in range(1,i+1): - print formstr % R[(i,j)], - print - print "====================================================================\n" + print(formstr % R[(i,j)], end=' ') + print() + print("====================================================================\n") return R[(k,k)] @@ -523,18 +526,18 @@ def _romberg_diff(b, c, k): def _printresmat(function, interval, resmat): # Print the Romberg result matrix. i = j = 0 - print 'Romberg integration of', `function`, - print 'from', interval - print '' - print '%6s %9s %9s' % ('Steps', 'StepSize', 'Results') + print('Romberg integration of', repr(function), end=' ') + print('from', interval) + print('') + print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results')) for i in range(len(resmat)): - print '%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), + print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ') for j in range(i+1): - print '%9f' % (resmat[i][j]), - print '' - print '' - print 'The final result is', resmat[i][j], - print 'after', 2**(len(resmat)-1)+1, 'function evaluations.' + print('%9f' % (resmat[i][j]), end=' ') + print('') + print('') + print('The final result is', resmat[i][j], end=' ') + print('after', 2**(len(resmat)-1)+1, 'function evaluations.') def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False, divmax=10, vec_func=False): diff --git a/scipy/integrate/setup.py b/scipy/integrate/setup.py index 477423d5f2c5..2816a5233333 100755 --- a/scipy/integrate/setup.py +++ b/scipy/integrate/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/integrate/setupscons.py b/scipy/integrate/setupscons.py index 085c7affc3f7..aed47c822d55 100755 --- a/scipy/integrate/setupscons.py +++ b/scipy/integrate/setupscons.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + from os.path import join diff --git a/scipy/integrate/tests/test_integrate.py b/scipy/integrate/tests/test_integrate.py index b2ca753603b2..550362f0c257 100644 --- a/scipy/integrate/tests/test_integrate.py +++ b/scipy/integrate/tests/test_integrate.py @@ -2,11 +2,14 @@ """ Tests for numerical integration. """ +from __future__ import division, print_function, absolute_import import numpy from numpy import arange, zeros, array, dot, sqrt, cos, sin, eye, pi, exp, \ allclose +from scipy.lib.six.moves import xrange + from numpy.testing import assert_, TestCase, run_module_suite, \ assert_array_almost_equal, assert_raises, assert_allclose, \ assert_array_equal diff --git a/scipy/integrate/tests/test_quadpack.py b/scipy/integrate/tests/test_quadpack.py index 62a32cf14fbe..dd40da4455fa 100644 --- a/scipy/integrate/tests/test_quadpack.py +++ b/scipy/integrate/tests/test_quadpack.py @@ -1,6 +1,9 @@ +from __future__ import division, print_function, absolute_import + from numpy import sqrt, cos, sin, arctan, exp, log, pi, Inf from numpy.testing import assert_, TestCase, run_module_suite, dec from scipy.integrate import quad, dblquad, tplquad +from scipy.lib.six.moves import xrange import sys import math @@ -10,7 +13,8 @@ except ImportError: _ctypes_missing = True -def assert_quad((value, err), tabledValue, errTol=1.5e-8): +def assert_quad(value_and_err, tabledValue, errTol=1.5e-8): + value, err = value_and_err assert_(abs(value-tabledValue) < err, (value, tabledValue, err)) if errTol is not None: assert_(err < errTol, (err, errTol)) diff --git a/scipy/integrate/tests/test_quadrature.py b/scipy/integrate/tests/test_quadrature.py index c2fb7f7e4601..6f660104184e 100644 --- a/scipy/integrate/tests/test_quadrature.py +++ b/scipy/integrate/tests/test_quadrature.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy import cos, sin, pi from numpy.testing import TestCase, run_module_suite, assert_equal, \ diff --git a/scipy/interpolate/__init__.py b/scipy/interpolate/__init__.py index a123ce5139c7..92aec2054a7e 100644 --- a/scipy/interpolate/__init__.py +++ b/scipy/interpolate/__init__.py @@ -144,19 +144,20 @@ `scipy.signal.cspline2d`. """ +from __future__ import division, print_function, absolute_import -from interpolate import * -from fitpack import * +from .interpolate import * +from .fitpack import * # New interface to fitpack library: -from fitpack2 import * +from .fitpack2 import * -from rbf import Rbf +from .rbf import Rbf -from polyint import * +from .polyint import * -from ndgriddata import * +from .ndgriddata import * -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test diff --git a/scipy/interpolate/fitpack.py b/scipy/interpolate/fitpack.py index 1a8ce87638da..84e2326072c5 100644 --- a/scipy/interpolate/fitpack.py +++ b/scipy/interpolate/fitpack.py @@ -21,18 +21,20 @@ For univariate splines: cocosp, concon, fourco, insert For bivariate splines: profil, regrid, parsur, surev """ +from __future__ import division, print_function, absolute_import + __all__ = ['splrep', 'splprep', 'splev', 'splint', 'sproot', 'spalde', 'bisplrep', 'bisplev', 'insert'] __version__ = "$Revision$"[10:-1] -import _fitpack +from . import _fitpack from numpy import atleast_1d, array, ones, zeros, sqrt, ravel, transpose, \ dot, sin, cos, pi, arange, empty, int32, asarray myasarray = atleast_1d # Try to replace _fitpack interface with # f2py-generated version -import dfitpack +from . import dfitpack _iermess = {0:["""\ The spline has a residual sum of squares fp such that abs(fp-s)/s<=0.001""",None], @@ -211,7 +213,7 @@ def splprep(x,w=None,u=None,ub=None,ue=None,k=3,task=0,s=None,t=None, if per: for i in range(idim): if x[i][0]!=x[i][-1]: - if quiet<2:print 'Warning: Setting x[%d][%d]=x[%d][0]'%(i,m,i) + if quiet<2:print('Warning: Setting x[%d][%d]=x[%d][0]'%(i,m,i)) x[i][-1]=x[i][0] if not 0 < idim < 11: raise TypeError('0 < idim < 11 must hold') @@ -268,11 +270,11 @@ def splprep(x,w=None,u=None,ub=None,ue=None,k=3,task=0,s=None,t=None, c.shape=idim,n-k-1 tcku = [t,list(c),k],u if ier<=0 and not quiet: - print _iermess[ier][0] - print "\tk=%d n=%d m=%d fp=%f s=%f"%(k,len(t),m,fp,s) + print(_iermess[ier][0]) + print("\tk=%d n=%d m=%d fp=%f s=%f"%(k,len(t),m,fp,s)) if ier>0 and not full_output: if ier in [1,2,3]: - print "Warning: "+_iermess[ier][0] + print("Warning: "+_iermess[ier][0]) else: try: raise _iermess[ier][1](_iermess[ier][0]) @@ -456,11 +458,11 @@ def splrep(x,y,w=None,xb=None,xe=None,k=3,task=0,s=None,t=None, n,c,fp,ier = dfitpack.percur(task, x, y, w, t, wrk, iwrk, k, s) tck = (t[:n],c[:n],k) if ier<=0 and not quiet: - print _iermess[ier][0] - print "\tk=%d n=%d m=%d fp=%f s=%f"%(k,len(t),m,fp,s) + print(_iermess[ier][0]) + print("\tk=%d n=%d m=%d fp=%f s=%f"%(k,len(t),m,fp,s)) if ier>0 and not full_output: if ier in [1,2,3]: - print "Warning: "+_iermess[ier][0] + print("Warning: "+_iermess[ier][0]) else: try: raise _iermess[ier][1](_iermess[ier][0]) @@ -538,7 +540,7 @@ def splev(x, tck, der=0, ext=0): except: parametric = False if parametric: - return map(lambda c, x=x, t=t, k=k, der=der : splev(x, [t,c,k], der, ext), c) + return list(map(lambda c, x=x, t=t, k=k, der=der : splev(x, [t,c,k], der, ext), c)) else: if not (0 <= der <= k): raise ValueError("0<=der=%d<=k=%d must hold"%(der,k)) @@ -605,7 +607,7 @@ def splint(a,b,tck,full_output=0): except: parametric = False if parametric: - return _ntlist(map(lambda c,a=a,b=b,t=t,k=k:splint(a,b,[t,c,k]),c)) + return _ntlist(list(map(lambda c,a=a,b=b,t=t,k=k:splint(a,b,[t,c,k]),c))) else: aint,wrk=_fitpack._splint(t,c,k,a,b) if full_output: return aint,wrk @@ -659,7 +661,7 @@ def sproot(tck,mest=10): except: parametric = False if parametric: - return _ntlist(map(lambda c,t=t,k=k,mest=mest:sproot([t,c,k],mest),c)) + return _ntlist(list(map(lambda c,t=t,k=k,mest=mest:sproot([t,c,k],mest),c))) else: if len(t)<8: raise TypeError("The number of knots %d>=8" % len(t)) @@ -668,7 +670,7 @@ def sproot(tck,mest=10): raise TypeError("Invalid input data. t1<=..<=t41: - return map(lambda x,tck=tck:spalde(x,tck),x) + return list(map(lambda x,tck=tck:spalde(x,tck),x)) d,ier=_fitpack._spalde(t,c,k,x[0]) if ier==0: return d if ier==10: @@ -882,14 +884,14 @@ def bisplrep(x,y,z,w=None,xb=None,xe=None,yb=None,ye=None,kx=3,ky=3,task=0, ierm=min(11,max(-3,ier)) if ierm<=0 and not quiet: - print _iermess2[ierm][0] - print "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f"%(kx,ky,len(tx), - len(ty),m,fp,s) + print(_iermess2[ierm][0]) + print("\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f"%(kx,ky,len(tx), + len(ty),m,fp,s)) if ierm>0 and not full_output: if ier in [1,2,3,4,5]: - print "Warning: "+_iermess2[ierm][0] - print "\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f"%(kx,ky,len(tx), - len(ty),m,fp,s) + print("Warning: "+_iermess2[ierm][0]) + print("\tkx,ky=%d,%d nx,ny=%d,%d m=%d fp=%f s=%f"%(kx,ky,len(tx), + len(ty),m,fp,s)) else: try: raise _iermess2[ierm][1](_iermess2[ierm][0]) diff --git a/scipy/interpolate/fitpack2.py b/scipy/interpolate/fitpack2.py index 1a3ac9cbba26..7a3e68715cef 100644 --- a/scipy/interpolate/fitpack2.py +++ b/scipy/interpolate/fitpack2.py @@ -6,6 +6,7 @@ to double routines by Pearu Peterson. """ # Created by Pearu Peterson, June,August 2003 +from __future__ import division, print_function, absolute_import __all__ = [ 'UnivariateSpline', @@ -26,8 +27,8 @@ from numpy import zeros, concatenate, alltrue, ravel, all, diff, array, ones import numpy as np -import fitpack -import dfitpack +from . import fitpack +from . import dfitpack ################ Univariate spline #################### diff --git a/scipy/interpolate/generate_interpnd.py b/scipy/interpolate/generate_interpnd.py index 93ca3dbba640..61424b6ab2d7 100755 --- a/scipy/interpolate/generate_interpnd.py +++ b/scipy/interpolate/generate_interpnd.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + import tempfile import subprocess import os diff --git a/scipy/interpolate/interpnd_info.py b/scipy/interpolate/interpnd_info.py index 864d227f3991..e3314f4a0ff1 100644 --- a/scipy/interpolate/interpnd_info.py +++ b/scipy/interpolate/interpnd_info.py @@ -3,6 +3,8 @@ interpolation routines in `interpnd.pyx`. """ +from __future__ import division, print_function, absolute_import + from sympy import * def _estimate_gradients_2d_global(): @@ -28,9 +30,9 @@ def _estimate_gradients_2d_global(): B = Matrix([[intwpp2.coeff(df1).subs(df2, 0)], [intwpp2.coeff(df2).subs(df1, 0)]]) / 2 - print "A" - print A - print "B" - print B - print "solution" - print A.inv() * B + print("A") + print(A) + print("B") + print(B) + print("solution") + print(A.inv() * B) diff --git a/scipy/interpolate/interpolate.py b/scipy/interpolate/interpolate.py index 70409dcd9747..5ad8d1fe0749 100644 --- a/scipy/interpolate/interpolate.py +++ b/scipy/interpolate/interpolate.py @@ -1,6 +1,6 @@ - """ Classes for interpolating values. """ +from __future__ import division, print_function, absolute_import __all__ = ['interp1d', 'interp2d', 'spline', 'spleval', 'splmake', 'spltopp', 'ppform', 'lagrange'] @@ -12,9 +12,11 @@ import scipy.special as spec import math -import fitpack -import _fitpack -import dfitpack +from scipy.lib.six.moves import xrange + +from . import fitpack +from . import _fitpack +from . import dfitpack def reduce_sometrue(a): all = a @@ -312,7 +314,7 @@ def __init__(self, x, y, kind='linear', axis=-1, if kind in ('linear', 'nearest'): # Make a "view" of the y array that is rotated to the interpolation # axis. - axes = range(y.ndim) + axes = list(range(y.ndim)) del axes[self.axis] axes.append(self.axis) oriented_y = y.transpose(axes) @@ -324,7 +326,7 @@ def __init__(self, x, y, kind='linear', axis=-1, self.x_bds = (x[1:] + x[:-1]) / 2.0 self._call = self._call_nearest else: - axes = range(y.ndim) + axes = list(range(y.ndim)) del axes[self.axis] axes.insert(0, self.axis) oriented_y = y.transpose(axes) @@ -439,12 +441,12 @@ def __call__(self, x_new): return asarray(y_new) elif self._kind in ('linear', 'nearest'): y_new[..., out_of_bounds] = self.fill_value - axes = range(ny - nx) + axes = list(range(ny - nx)) axes[self.axis:self.axis] = range(ny - nx, ny) return y_new.transpose(axes) else: y_new[out_of_bounds] = self.fill_value - axes = range(nx, ny) + axes = list(range(nx, ny)) axes[self.axis:self.axis] = range(nx) return y_new.transpose(axes) @@ -534,7 +536,7 @@ def _dot0(a, b): if b.ndim <= 2: return dot(a, b) else: - axes = range(b.ndim) + axes = list(range(b.ndim)) axes.insert(-1, 0) axes.pop(0) return dot(a, b.transpose(axes)) @@ -832,7 +834,7 @@ def splmake(xk,yk,order=3,kind='smoothest',conds=None): coefs = func(xk, yk, order, conds, B) return xk, coefs, order -def spleval((xj,cvals,k),xnew,deriv=0): +def spleval(xck,xnew,deriv=0): """Evaluate a fixed spline represented by the given tuple at the new x-values. The xj values are the interior knot points. The approximation region is xj[0] to xj[-1]. If N+1 is the length of xj, then cvals should @@ -845,6 +847,7 @@ def spleval((xj,cvals,k),xnew,deriv=0): N-d, then the result is xnew.shape + cvals.shape[1:] providing the interpolation of multiple curves. """ + (xj,cvals,k) = xck oldshape = np.shape(xnew) xx = np.ravel(xnew) sh = cvals.shape[1:] diff --git a/scipy/interpolate/interpolate_wrapper.py b/scipy/interpolate/interpolate_wrapper.py index 39253e7a5541..c09720a44733 100644 --- a/scipy/interpolate/interpolate_wrapper.py +++ b/scipy/interpolate/interpolate_wrapper.py @@ -1,9 +1,10 @@ """ helper_funcs.py. scavenged from enthought,interpolate """ +from __future__ import division, print_function, absolute_import import numpy as np -import _interpolate # C extension. Does all the real work. +from . import _interpolate # C extension. Does all the real work. def atleast_1d_and_contiguous(ary, dtype = np.float64): return np.atleast_1d( np.ascontiguousarray(ary, dtype) ) diff --git a/scipy/interpolate/ndgriddata.py b/scipy/interpolate/ndgriddata.py index 31f907da4e79..51c2aad40d4f 100644 --- a/scipy/interpolate/ndgriddata.py +++ b/scipy/interpolate/ndgriddata.py @@ -4,9 +4,10 @@ .. versionadded:: 0.9 """ +from __future__ import division, print_function, absolute_import import numpy as np -from interpnd import LinearNDInterpolator, NDInterpolatorBase, \ +from .interpnd import LinearNDInterpolator, NDInterpolatorBase, \ CloughTocher2DInterpolator, _ndim_coords_from_arrays from scipy.spatial import cKDTree @@ -163,7 +164,7 @@ def griddata(points, values, xi, method='linear', fill_value=np.nan): ndim = points.shape[-1] if ndim == 1 and method in ('nearest', 'linear', 'cubic'): - from interpolate import interp1d + from .interpolate import interp1d points = points.ravel() if isinstance(xi, tuple): if len(xi) != 1: diff --git a/scipy/interpolate/polyint.py b/scipy/interpolate/polyint.py index f6a024452602..f717f4717d60 100644 --- a/scipy/interpolate/polyint.py +++ b/scipy/interpolate/polyint.py @@ -1,6 +1,10 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.misc import factorial +from scipy.lib.six.moves import xrange + __all__ = ["KroghInterpolator", "krogh_interpolate", "BarycentricInterpolator", "barycentric_interpolate", "PiecewisePolynomial", "piecewise_polynomial_interpolate","approximate_taylor_polynomial", "pchip"] class KroghInterpolator(object): @@ -968,4 +972,4 @@ def pchip(x, y): """ derivs = _find_derivatives(x,y) - return PiecewisePolynomial(x, zip(y, derivs), orders=3, direction=None) + return PiecewisePolynomial(x, list(zip(y, derivs)), orders=3, direction=None) diff --git a/scipy/interpolate/rbf.py b/scipy/interpolate/rbf.py index 5e5b43879e64..f13cc679aecc 100644 --- a/scipy/interpolate/rbf.py +++ b/scipy/interpolate/rbf.py @@ -42,11 +42,15 @@ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. """ +from __future__ import division, print_function, absolute_import + import sys from numpy import (sqrt, log, asarray, newaxis, all, dot, exp, eye, float_) from scipy import linalg +from scipy.lib.six import callable, get_method_function, \ + get_function_code __all__ = ['Rbf'] @@ -151,13 +155,13 @@ def _init_function(self, r): val = self.function allow_one = True elif hasattr(self.function, "im_func"): - val = self.function.im_func + val = get_method_function(self.function) elif hasattr(self.function, "__call__"): - val = self.function.__call__.im_func + val = get_method_function(self.function.__call__) else: raise ValueError("Cannot determine number of arguments to function") - argcount = val.func_code.co_argcount + argcount = get_function_code(val).co_argcount if allow_one and argcount == 1: self._function = self.function elif argcount == 2: diff --git a/scipy/interpolate/setup.py b/scipy/interpolate/setup.py index 8b3604207387..442d7e112cfc 100755 --- a/scipy/interpolate/setup.py +++ b/scipy/interpolate/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/interpolate/setupscons.py b/scipy/interpolate/setupscons.py index e3d5e1a6a397..b8e83d58b783 100755 --- a/scipy/interpolate/setupscons.py +++ b/scipy/interpolate/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/interpolate/tests/test_fitpack.py b/scipy/interpolate/tests/test_fitpack.py index 122c7336b7d1..daa6d9b1b048 100644 --- a/scipy/interpolate/tests/test_fitpack.py +++ b/scipy/interpolate/tests/test_fitpack.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, \ assert_array_almost_equal, assert_allclose, assert_, TestCase from numpy import array, diff, shape, asarray, pi, sin, cos, arange, dot, \ @@ -71,13 +73,13 @@ def err_est(k, d): nd.append((err, tol)) nk.append(nd) put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]"%(f(None), - `round(xb,3)`,`round(xe,3)`, - `round(a,3)`,`round(b,3)`)) + repr(round(xb,3)),repr(round(xe,3)), + repr(round(a,3)),repr(round(b,3)))) if at: str="at knots" else: str="at the middle of nodes" - put(" per=%d s=%s Evaluation %s"%(per,`s`,str)) + put(" per=%d s=%s Evaluation %s"%(per,repr(s),str)) put(" k : |f-s|^2 |f'-s'| |f''-.. |f'''-. |f''''- |f'''''") k=1 for l in nk: @@ -107,9 +109,9 @@ def err_est(k, d): tck=splrep(x,v,s=s,per=per,k=k,xe=xe) nk.append([splint(ia,ib,tck),spalde(dx,tck)]) put("\nf = %s s=S_k(x;t,c) x in [%s, %s] > [%s, %s]"%(f(None), - `round(xb,3)`,`round(xe,3)`, - `round(a,3)`,`round(b,3)`)) - put(" per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s"%(per,`s`,N,`round(ia,3)`,`round(ib,3)`,`round(dx,3)`)) + repr(round(xb,3)),repr(round(xe,3)), + repr(round(a,3)),repr(round(b,3)))) + put(" per=%d s=%s N=%d [a, b] = [%s, %s] dx=%s"%(per,repr(s),N,repr(round(ia,3)),repr(round(ib,3)),repr(round(dx,3)))) put(" k : int(s,[a,b]) Int.Error Rel. error of s^(d)(dx) d = 0, .., k") k=1 for r in nk: @@ -135,14 +137,14 @@ def check_3(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, v=f(x) nk=[] put(" k : Roots of s(x) approx %s x in [%s,%s]:"%\ - (f(None),`round(a,3)`,`round(b,3)`)) + (f(None),repr(round(a,3)),repr(round(b,3)))) for k in range(1,6): tck=splrep(x,v,s=s,per=per,k=k,xe=xe) roots = sproot(tck) if k == 3: assert_allclose(roots, pi*array([1, 2, 3, 4]), rtol=1e-3) - put(' %d : %s'%(k,`roots.tolist()`)) + put(' %d : %s'%(k,repr(roots.tolist()))) def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, ia=0,ib=2*pi,dx=0.2*pi): @@ -152,7 +154,7 @@ def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, x1=a+(b-a)*arange(1,N,dtype=float)/float(N-1) # middle points of the nodes v,v1=f(x),f(x1) nk=[] - put(" u = %s N = %d"%(`round(dx,3)`,N)) + put(" u = %s N = %d"%(repr(round(dx,3)),N)) put(" k : [x(u), %s(x(u))] Error of splprep Error of splrep "%(f(0,None))) for k in range(1,6): tckp,u=splprep([x,v],s=s,per=per,k=k,nest=-1) @@ -163,7 +165,7 @@ def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, assert_(err1 < 1e-2) assert_(err2 < 1e-2) put(" %d : %s %.1e %.1e"%\ - (k,`map(lambda x:round(x,3),uv)`, + (k,repr([round(z,3) for z in uv]), err1, err2)) put("Derivatives of parametric cubic spline at u (first function):") @@ -171,7 +173,7 @@ def check_4(self,f=f1,per=0,s=0,a=0,b=2*pi,N=20,xb=None,xe=None, tckp,u=splprep([x,v],s=s,per=per,k=k,nest=-1) for d in range(1,k+1): uv=splev(dx,tckp,d) - put(" %s "%(`uv[0]`)) + put(" %s "%(repr(uv[0]))) def check_5(self,f=f2,kx=3,ky=3,xb=0,xe=2*pi,yb=0,ye=2*pi,Nx=20,Ny=20,s=0): x=xb+(xe-xb)*arange(Nx+1,dtype=float)/float(Nx) diff --git a/scipy/interpolate/tests/test_fitpack2.py b/scipy/interpolate/tests/test_fitpack2.py index 34a4d9221711..bf8def161157 100644 --- a/scipy/interpolate/tests/test_fitpack2.py +++ b/scipy/interpolate/tests/test_fitpack2.py @@ -1,5 +1,7 @@ #!/usr/bin/env python # Created by Pearu Peterson, June 2003 +from __future__ import division, print_function, absolute_import + import warnings from numpy.testing import assert_equal, assert_almost_equal, assert_array_equal, \ diff --git a/scipy/interpolate/tests/test_interpnd.py b/scipy/interpolate/tests/test_interpnd.py index 31baed6e38ff..fbb3c626c742 100644 --- a/scipy/interpolate/tests/test_interpnd.py +++ b/scipy/interpolate/tests/test_interpnd.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_equal, assert_allclose, assert_almost_equal, \ run_module_suite, assert_raises @@ -144,8 +146,8 @@ def _check_accuracy(self, func, x=None, tol=1e-6, alternate=False, **kw): try: assert_allclose(a, b, **kw) except AssertionError: - print abs(a - b) - print ip.grad + print(abs(a - b)) + print(ip.grad) raise def test_linear_smoketest(self): diff --git a/scipy/interpolate/tests/test_interpolate.py b/scipy/interpolate/tests/test_interpolate.py index 93cd2e3367c0..3900a619a08c 100644 --- a/scipy/interpolate/tests/test_interpolate.py +++ b/scipy/interpolate/tests/test_interpolate.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy.testing import assert_, assert_equal, assert_almost_equal, \ assert_array_almost_equal, assert_raises, assert_array_equal, \ dec, TestCase, run_module_suite diff --git a/scipy/interpolate/tests/test_interpolate_wrapper.py b/scipy/interpolate/tests/test_interpolate_wrapper.py index 2675f6da2db7..d1188af032e5 100644 --- a/scipy/interpolate/tests/test_interpolate_wrapper.py +++ b/scipy/interpolate/tests/test_interpolate_wrapper.py @@ -1,5 +1,6 @@ """ module to test interpolate_wrapper.py """ +from __future__ import division, print_function, absolute_import # Unit Test import unittest diff --git a/scipy/interpolate/tests/test_ndgriddata.py b/scipy/interpolate/tests/test_ndgriddata.py index c29e9ce8b736..48995f96af70 100644 --- a/scipy/interpolate/tests/test_ndgriddata.py +++ b/scipy/interpolate/tests/test_ndgriddata.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_equal, assert_array_equal, assert_allclose, \ run_module_suite diff --git a/scipy/interpolate/tests/test_polyint.py b/scipy/interpolate/tests/test_polyint.py index 74db358a9f1f..ff2306c2d962 100644 --- a/scipy/interpolate/tests/test_polyint.py +++ b/scipy/interpolate/tests/test_polyint.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import from numpy.testing import assert_almost_equal, assert_array_equal, \ TestCase, run_module_suite @@ -5,6 +6,7 @@ BarycentricInterpolator, barycentric_interpolate, \ PiecewisePolynomial, piecewise_polynomial_interpolate, \ approximate_taylor_polynomial +from scipy.lib.six.moves import xrange import scipy import numpy as np from scipy.interpolate import splrep, splev diff --git a/scipy/interpolate/tests/test_rbf.py b/scipy/interpolate/tests/test_rbf.py index a4045dc708bb..81b075562809 100644 --- a/scipy/interpolate/tests/test_rbf.py +++ b/scipy/interpolate/tests/test_rbf.py @@ -1,6 +1,8 @@ #!/usr/bin/env python # Created by John Travers, Robert Hetland, 2007 """ Test functions for rbf module """ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import (assert_, assert_array_almost_equal, diff --git a/scipy/interpolate/tests/test_regression.py b/scipy/interpolate/tests/test_regression.py index 08ad96f796ad..42507cb8fba8 100644 --- a/scipy/interpolate/tests/test_regression.py +++ b/scipy/interpolate/tests/test_regression.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np import scipy.interpolate as interp from numpy.testing import assert_almost_equal, TestCase diff --git a/scipy/io/__init__.py b/scipy/io/__init__.py index 49e502a01d80..531fb5e871f8 100644 --- a/scipy/io/__init__.py +++ b/scipy/io/__init__.py @@ -80,18 +80,20 @@ netcdf_variable - A data object for the netcdf module """ +from __future__ import division, print_function, absolute_import + # matfile read and write -from matlab import loadmat, savemat, whosmat, byteordercodes +from .matlab import loadmat, savemat, whosmat, byteordercodes # netCDF file support -from netcdf import netcdf_file, netcdf_variable +from .netcdf import netcdf_file, netcdf_variable -from data_store import save_as_module -from mmio import mminfo, mmread, mmwrite -from idl import readsav -from harwell_boeing import hb_read, hb_write +from .data_store import save_as_module +from .mmio import mminfo, mmread, mmwrite +from .idl import readsav +from .harwell_boeing import hb_read, hb_write -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test diff --git a/scipy/io/arff/__init__.py b/scipy/io/arff/__init__.py index e8e34e419f21..30855518c301 100644 --- a/scipy/io/arff/__init__.py +++ b/scipy/io/arff/__init__.py @@ -35,9 +35,10 @@ \tcolor's type is nominal, range is ('red', 'green', 'blue', 'yellow', 'black') """ +from __future__ import division, print_function, absolute_import -from arffread import * -import arffread +from .arffread import * +from . import arffread __all__ = arffread.__all__ diff --git a/scipy/io/arff/arffread.py b/scipy/io/arff/arffread.py index cf1e10dc633d..b835fc9a472c 100644 --- a/scipy/io/arff/arffread.py +++ b/scipy/io/arff/arffread.py @@ -1,11 +1,14 @@ #! /usr/bin/env python # Last Change: Mon Aug 20 08:00 PM 2007 J +from __future__ import division, print_function, absolute_import + import re import itertools import numpy as np from scipy.io.arff.utils import partial +from scipy.lib.six import next """A module to read arff files.""" @@ -86,7 +89,7 @@ def get_nominal(attribute): def read_data_list(ofile): """Read each line of the iterable and put it in a list.""" - data = [ofile.next()] + data = [next(ofile)] if data[0].strip()[0] == '{': raise ValueError("This looks like a sparse ARFF: not supported yet") data.extend([i for i in ofile]) @@ -95,7 +98,7 @@ def read_data_list(ofile): def get_ndata(ofile): """Read the whole file to get number of data attributes.""" - data = [ofile.next()] + data = [next(ofile)] loc = 1 if data[0].strip()[0] == '{': raise ValueError("This looks like a sparse ARFF: not supported yet") @@ -219,10 +222,10 @@ def tokenize_attribute(iterable, attribute): atrv = mattr.group(1) if r_comattrval.match(atrv): name, type = tokenize_single_comma(atrv) - next_item = iterable.next() + next_item = next(iterable) elif r_wcomattrval.match(atrv): name, type = tokenize_single_wcomma(atrv) - next_item = iterable.next() + next_item = next(iterable) else: # Not sure we should support this, as it does not seem supported by # weka. @@ -242,13 +245,13 @@ def tokenize_multilines(iterable, val): # line with meta character, and try to parse everything up to there. if not r_mcomattrval.match(val): all = [val] - i = iterable.next() + i = next(iterable) while not r_meta.match(i): all.append(i) - i = iterable.next() + i = next(iterable) if r_mend.search(i): raise ValueError("relational attribute not supported yet") - print "".join(all[:-1]) + print("".join(all[:-1])) m = r_comattrval.match("".join(all[:-1])) return m.group(1), m.group(2), i else: @@ -288,11 +291,11 @@ def tokenize_single_wcomma(val): def read_header(ofile): """Read the header of the iterable ofile.""" - i = ofile.next() + i = next(ofile) # Pass first comments while r_comment.match(i): - i = ofile.next() + i = next(ofile) # Header is everything up to DATA attribute ? relation = None @@ -310,9 +313,9 @@ def read_header(ofile): relation = isrel.group(1) else: raise ValueError("Error parsing line %s" % i) - i = ofile.next() + i = next(ofile) else: - i = ofile.next() + i = next(ofile) return relation, attributes @@ -513,7 +516,7 @@ def _loadarff(ofile): # Parse the header file try: rel, attr = read_header(ofile) - except ValueError, e: + except ValueError as e: msg = "Error while parsing header, error was: " + str(e) raise ParseArffError(msg) @@ -562,24 +565,24 @@ def _loadarff(ofile): # Get the delimiter from the first line of data: def next_data_line(row_iter): """Assumes we are already in the data part (eg after @data).""" - raw = row_iter.next() + raw = next(row_iter) while r_empty.match(raw): - raw = row_iter.next() + raw = next(row_iter) while r_comment.match(raw): - raw = row_iter.next() + raw = next(row_iter) return raw try: try: dtline = next_data_line(ofile) delim = get_delim(dtline) - except ValueError, e: + except ValueError as e: raise ParseArffError("Error while parsing delimiter: " + str(e)) finally: ofile.seek(0, 0) ofile = go_data(ofile) # skip the @data line - ofile.next() + next(ofile) def generator(row_iter, delim = ','): # TODO: this is where we are spending times (~80%). I think things @@ -595,24 +598,24 @@ def generator(row_iter, delim = ','): # We do not abstract skipping comments and empty lines for performances # reason. - raw = row_iter.next() + raw = next(row_iter) while r_empty.match(raw): - raw = row_iter.next() + raw = next(row_iter) while r_comment.match(raw): - raw = row_iter.next() + raw = next(row_iter) # 'compiling' the range since it does not change # Note, I have already tried zipping the converters and # row elements and got slightly worse performance. - elems = range(ni) + elems = list(range(ni)) row = raw.split(delim) yield tuple([convertors[i](row[i]) for i in elems]) for raw in row_iter: while r_comment.match(raw): - raw = row_iter.next() + raw = next(row_iter) while r_empty.match(raw): - raw = row_iter.next() + raw = next(row_iter) row = raw.split(delim) yield tuple([convertors[i](row[i]) for i in elems]) @@ -634,20 +637,20 @@ def print_attribute(name, tp, data): type = tp[0] if type == 'numeric' or type == 'real' or type == 'integer': min, max, mean, std = basic_stats(data) - print "%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std) + print("%s,%s,%f,%f,%f,%f" % (name, type, min, max, mean, std)) else: msg = name + ",{" for i in range(len(tp[1])-1): msg += tp[1][i] + "," msg += tp[1][-1] msg += "}" - print msg + print(msg) def test_weka(filename): data, meta = loadarff(filename) - print len(data.dtype) - print data.size + print(len(data.dtype)) + print(data.size) for i in meta: print_attribute(i,meta[i],data[i]) @@ -659,10 +662,10 @@ def floupi(filename): data, meta = loadarff(filename) from attrselect import print_dataset_info print_dataset_info(data) - print "relation %s, has %d instances" % (meta.name, data.size) + print("relation %s, has %d instances" % (meta.name, data.size)) itp = iter(types) for i in data.dtype.names: - print_attribute(i,itp.next(),data[i]) + print_attribute(i,next(itp),data[i]) #tp = itp.next() #if tp == 'numeric' or tp == 'real' or tp == 'integer': # min, max, mean, std = basic_stats(data[i]) diff --git a/scipy/io/arff/myfunctools.py b/scipy/io/arff/myfunctools.py index 445ed9ab6cde..661838fa8e6c 100644 --- a/scipy/io/arff/myfunctools.py +++ b/scipy/io/arff/myfunctools.py @@ -1,6 +1,7 @@ # Last Change: Mon Aug 20 01:00 PM 2007 J # Implement partial application (should only be used if functools is not # available (eg python < 2.5) +from __future__ import division, print_function, absolute_import class partial: def __init__(self, fun, *args, **kwargs): diff --git a/scipy/io/arff/setup.py b/scipy/io/arff/setup.py index 824108517e72..af468458a826 100755 --- a/scipy/io/arff/setup.py +++ b/scipy/io/arff/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='io',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/io/arff/tests/test_arffread.py b/scipy/io/arff/tests/test_arffread.py index f5d962430ba8..a4b690d27dc4 100644 --- a/scipy/io/arff/tests/test_arffread.py +++ b/scipy/io/arff/tests/test_arffread.py @@ -1,7 +1,14 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + import os +import sys from os.path import join as pjoin -from cStringIO import StringIO + +if sys.version_info[0] >= 3: + from io import StringIO +else: + from cStringIO import StringIO import numpy as np diff --git a/scipy/io/arff/utils.py b/scipy/io/arff/utils.py index 3c7f4418c06b..91b08e9fe400 100644 --- a/scipy/io/arff/utils.py +++ b/scipy/io/arff/utils.py @@ -1,7 +1,8 @@ #! /usr/bin/env python # Last Change: Mon Aug 20 02:00 PM 2007 J +from __future__ import division, print_function, absolute_import try: from functools import partial except ImportError: - from myfunctools import partial + from .myfunctools import partial diff --git a/scipy/io/data_store.py b/scipy/io/data_store.py index 9b2ec317f37d..fbc0fe3bddf7 100644 --- a/scipy/io/data_store.py +++ b/scipy/io/data_store.py @@ -25,10 +25,11 @@ 1 """ +from __future__ import division, print_function, absolute_import __all__ = ['save_as_module'] -import dumb_shelve +from . import dumb_shelve import os import numpy as np diff --git a/scipy/io/dumb_shelve.py b/scipy/io/dumb_shelve.py index e22e02ca6b81..f67caf307343 100644 --- a/scipy/io/dumb_shelve.py +++ b/scipy/io/dumb_shelve.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from shelve import Shelf try: import zlib @@ -5,7 +7,7 @@ # Some python installations don't have zlib. pass -import cPickle +import pickle class DbfilenameShelf(Shelf): """Shelf implementation using the "anydbm" generic dbm interface. @@ -15,7 +17,7 @@ class DbfilenameShelf(Shelf): """ def __init__(self, filename, flag='c'): - import dumbdbm_patched + from . import dumbdbm_patched Shelf.__init__(self, dumbdbm_patched.open(filename, flag)) def __getitem__(self, key): @@ -27,10 +29,10 @@ def __getitem__(self, key): except NameError: r = compressed - return cPickle.loads(r) + return pickle.loads(r) def __setitem__(self, key, value): - s = cPickle.dumps(value,1) + s = pickle.dumps(value,1) try: self.dict[key] = zlib.compress(s) except NameError: diff --git a/scipy/io/dumbdbm_patched.py b/scipy/io/dumbdbm_patched.py index 5cc440ee5aee..1bc9828c0735 100644 --- a/scipy/io/dumbdbm_patched.py +++ b/scipy/io/dumbdbm_patched.py @@ -20,11 +20,14 @@ - support opening for read-only (flag = 'm') """ +from __future__ import division, print_function, absolute_import _os = __import__('os') -import __builtin__ -_open = __builtin__.open +from scipy.lib.six.moves import builtins +from scipy.lib.six import string_types + +_open = builtins.open _BLOCKSIZE = 512 @@ -66,7 +69,7 @@ def _commit(self): except _os.error: pass f = _open(self._dirfile, 'w') for key, (pos, siz) in self._index.items(): - f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`)) + f.write("%s, (%s, %s)\n" % (repr(key), repr(pos), repr(siz))) f.close() def __getitem__(self, key): @@ -102,16 +105,17 @@ def _setval(self, pos, val): f.close() return (pos, len(val)) - def _addkey(self, key, (pos, siz)): + def _addkey(self, key, pos_and_siz): + (pos, siz) = pos_and_siz self._index[key] = (pos, siz) f = _open(self._dirfile, 'a') - f.write("%s, (%s, %s)\n" % (`key`, `pos`, `siz`)) + f.write("%s, (%s, %s)\n" % (repr(key), repr(pos), repr(siz))) f.close() def __setitem__(self, key, val): - if not isinstance(key, str) or not isinstance(val, str): + if not isinstance(key, string_types) or not isinstance(val, string_types): raise TypeError("keys and values must be strings") - if not self._index.has_key(key): + if key not in self._index: (pos, siz) = self._addval(val) self._addkey(key, (pos, siz)) else: @@ -131,10 +135,10 @@ def __delitem__(self, key): self._commit() def keys(self): - return self._index.keys() + return list(self._index.keys()) def has_key(self, key): - return self._index.has_key(key) + return key in self._index def __len__(self): return len(self._index) diff --git a/scipy/io/harwell_boeing/__init__.py b/scipy/io/harwell_boeing/__init__.py index ba4641bc3f56..a14a024dcf74 100644 --- a/scipy/io/harwell_boeing/__init__.py +++ b/scipy/io/harwell_boeing/__init__.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from scipy.io.harwell_boeing.hb import MalformedHeader, HBInfo, HBFile, \ HBMatrixType, hb_read, hb_write diff --git a/scipy/io/harwell_boeing/_fortran_format_parser.py b/scipy/io/harwell_boeing/_fortran_format_parser.py index 4cb129a271d5..ce6c92d28005 100644 --- a/scipy/io/harwell_boeing/_fortran_format_parser.py +++ b/scipy/io/harwell_boeing/_fortran_format_parser.py @@ -6,6 +6,7 @@ FortranFormatParser can create *Format instances from raw fortran format strings (e.g. '(3I4)', '(10I3)', etc...) """ +from __future__ import division, print_function, absolute_import import re import warnings @@ -182,7 +183,7 @@ def __repr__(self): class Tokenizer(object): def __init__(self): - self.tokens = TOKENS.keys() + self.tokens = list(TOKENS.keys()) self.res = [re.compile(TOKENS[i]) for i in self.tokens] def input(self, s): @@ -247,7 +248,7 @@ def parse(self, s): else: tokens.append(t) return self._parse_format(tokens) - except SyntaxError, e: + except SyntaxError as e: raise BadFortranFormat(str(e)) def _get_min(self, tokens): diff --git a/scipy/io/harwell_boeing/hb.py b/scipy/io/harwell_boeing/hb.py index 5284faba647f..b6ca887533a3 100644 --- a/scipy/io/harwell_boeing/hb.py +++ b/scipy/io/harwell_boeing/hb.py @@ -9,6 +9,7 @@ - exponential format for float values, and int format """ +from __future__ import division, print_function, absolute_import # TODO: # - Add more support (symmetric/complex matrices, non-assembled matrices ?) @@ -26,6 +27,7 @@ from scipy.io.harwell_boeing._fortran_format_parser import \ FortranFormatParser, IntFormat, ExpFormat +from scipy.lib.six import string_types __all__ = ["MalformedHeader", "read_hb", "write", "HBInfo", "HBFile", "HBMatrixType"] @@ -326,7 +328,7 @@ def _read_hb_data(content, header): try: return csc_matrix((val, ind-1, ptr-1), shape=(header.nrows, header.ncols)) - except ValueError, e: + except ValueError as e: raise e @@ -493,7 +495,7 @@ def _get_matrix(fid): hb = HBFile(fid) return hb.read_matrix() - if isinstance(file, basestring): + if isinstance(file, string_types): fid = open(file) try: return _get_matrix(fid) @@ -537,7 +539,7 @@ def _set_matrix(fid): hb = HBFile(fid, hb_info) return hb.write_matrix(m) - if isinstance(file, basestring): + if isinstance(file, string_types): fid = open(file, "w") try: return _set_matrix(fid) diff --git a/scipy/io/harwell_boeing/setup.py b/scipy/io/harwell_boeing/setup.py index 36447ec3699d..d4054fe41aff 100755 --- a/scipy/io/harwell_boeing/setup.py +++ b/scipy/io/harwell_boeing/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/io/harwell_boeing/setupscons.py b/scipy/io/harwell_boeing/setupscons.py index 1c7ce65cd37a..925cf0d8bc8f 100755 --- a/scipy/io/harwell_boeing/setupscons.py +++ b/scipy/io/harwell_boeing/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/io/harwell_boeing/tests/test_fortran_format.py b/scipy/io/harwell_boeing/tests/test_fortran_format.py index 1d887fac6d80..2fd040fcc26e 100644 --- a/scipy/io/harwell_boeing/tests/test_fortran_format.py +++ b/scipy/io/harwell_boeing/tests/test_fortran_format.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import TestCase, assert_equal, assert_raises diff --git a/scipy/io/harwell_boeing/tests/test_hb.py b/scipy/io/harwell_boeing/tests/test_hb.py index e69b8008da48..b01b6caee64c 100644 --- a/scipy/io/harwell_boeing/tests/test_hb.py +++ b/scipy/io/harwell_boeing/tests/test_hb.py @@ -1,5 +1,11 @@ +from __future__ import division, print_function, absolute_import + import os -from cStringIO import StringIO +import sys +if sys.version_info[0] >= 3: + from io import StringIO +else: + from StringIO import StringIO import tempfile import numpy as np diff --git a/scipy/io/idl.py b/scipy/io/idl.py index 4f19de1044a7..f78f734b12c0 100644 --- a/scipy/io/idl.py +++ b/scipy/io/idl.py @@ -26,10 +26,11 @@ # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. +from __future__ import division, print_function, absolute_import import struct import numpy as np -from numpy.compat import asbytes, asstr +from numpy.compat import asstr import tempfile import zlib import warnings @@ -716,20 +717,20 @@ def readsav(file_name, idict=None, python_dict=False, # Read the signature, which should be 'SR' signature = _read_bytes(f, 2) - if signature != asbytes('SR'): + if signature != b'SR': raise Exception("Invalid SIGNATURE: %s" % signature) # Next, the record format, which is '\x00\x04' for normal .sav # files, and '\x00\x06' for compressed .sav files. recfmt = _read_bytes(f, 2) - if recfmt == asbytes('\x00\x04'): + if recfmt == b'\x00\x04': pass - elif recfmt == asbytes('\x00\x06'): + elif recfmt == b'\x00\x06': if verbose: - print "IDL Save file is compressed" + print("IDL Save file is compressed") if uncompressed_file_name: fout = open(uncompressed_file_name, 'w+b') @@ -737,10 +738,10 @@ def readsav(file_name, idict=None, python_dict=False, fout = tempfile.NamedTemporaryFile(suffix='.sav') if verbose: - print " -> expanding to %s" % fout.name + print(" -> expanding to %s" % fout.name) # Write header - fout.write(asbytes('SR\x00\x04')) + fout.write(b'SR\x00\x04') # Cycle through records while True: @@ -818,48 +819,48 @@ def readsav(file_name, idict=None, python_dict=False, # Print out timestamp info about the file for record in records: if record['rectype'] == "TIMESTAMP": - print "-"*50 - print "Date: %s" % record['date'] - print "User: %s" % record['user'] - print "Host: %s" % record['host'] + print("-"*50) + print("Date: %s" % record['date']) + print("User: %s" % record['user']) + print("Host: %s" % record['host']) break # Print out version info about the file for record in records: if record['rectype'] == "VERSION": - print "-"*50 - print "Format: %s" % record['format'] - print "Architecture: %s" % record['arch'] - print "Operating System: %s" % record['os'] - print "IDL Version: %s" % record['release'] + print("-"*50) + print("Format: %s" % record['format']) + print("Architecture: %s" % record['arch']) + print("Operating System: %s" % record['os']) + print("IDL Version: %s" % record['release']) break # Print out identification info about the file for record in records: if record['rectype'] == "IDENTIFICATON": - print "-"*50 - print "Author: %s" % record['author'] - print "Title: %s" % record['title'] - print "ID Code: %s" % record['idcode'] + print("-"*50) + print("Author: %s" % record['author']) + print("Title: %s" % record['title']) + print("ID Code: %s" % record['idcode']) break - print "-"*50 - print "Successfully read %i records of which:" % \ - (len(records)) + print("-"*50) + print("Successfully read %i records of which:" % \ + (len(records))) # Create convenience list of record types rectypes = [r['rectype'] for r in records] for rt in set(rectypes): if rt != 'END_MARKER': - print " - %i are of type %s" % (rectypes.count(rt), rt) - print "-"*50 + print(" - %i are of type %s" % (rectypes.count(rt), rt)) + print("-"*50) if 'VARIABLE' in rectypes: - print "Available variables:" + print("Available variables:") for var in variables: - print " - %s [%s]" % (var, type(variables[var])) - print "-"*50 + print(" - %s [%s]" % (var, type(variables[var]))) + print("-"*50) if idict: for var in variables: diff --git a/scipy/io/matlab/__init__.py b/scipy/io/matlab/__init__.py index a5b1e724c05c..f6ac1a0b43a7 100644 --- a/scipy/io/matlab/__init__.py +++ b/scipy/io/matlab/__init__.py @@ -7,9 +7,11 @@ Drive, Natick, MA 01760-2098, USA. """ +from __future__ import division, print_function, absolute_import + # Matlab file read and write utilities -from mio import loadmat, savemat, whosmat -import byteordercodes +from .mio import loadmat, savemat, whosmat +from . import byteordercodes __all__ = ['loadmat', 'savemat', 'whosmat', 'byteordercodes'] diff --git a/scipy/io/matlab/benchmarks/bench_structarr.py b/scipy/io/matlab/benchmarks/bench_structarr.py index c3a86c1f226a..7837f69e26eb 100644 --- a/scipy/io/matlab/benchmarks/bench_structarr.py +++ b/scipy/io/matlab/benchmarks/bench_structarr.py @@ -1,7 +1,8 @@ -from __future__ import division +from __future__ import division, print_function, absolute_import + from numpy.testing import * -from cStringIO import StringIO +from io import StringIO import numpy as np import scipy.io as sio @@ -19,24 +20,24 @@ def make_structarr(n_vars, n_fields, n_structs): def bench_run(): str_io = StringIO() - print - print 'Read / writing matlab structs' - print '='*60 - print ' write | read | vars | fields | structs ' - print '-'*60 - print + print() + print('Read / writing matlab structs') + print('='*60) + print(' write | read | vars | fields | structs ') + print('-'*60) + print() for n_vars, n_fields, n_structs in ( (10, 10, 20),): var_dict = make_structarr(n_vars, n_fields, n_structs) str_io = StringIO() write_time = measure('sio.savemat(str_io, var_dict)') read_time = measure('sio.loadmat(str_io)') - print '%.5f | %.5f | %5d | %5d | %5d ' % ( + print('%.5f | %.5f | %5d | %5d | %5d ' % ( write_time, read_time, n_vars, n_fields, - n_structs) + n_structs)) if __name__ == '__main__' : diff --git a/scipy/io/matlab/byteordercodes.py b/scipy/io/matlab/byteordercodes.py index 40ab4aa50e46..461a5a92d032 100644 --- a/scipy/io/matlab/byteordercodes.py +++ b/scipy/io/matlab/byteordercodes.py @@ -5,6 +5,7 @@ codes - one of '<' (little endian) or '>' (big endian) ''' +from __future__ import division, print_function, absolute_import import sys diff --git a/scipy/io/matlab/mio.py b/scipy/io/matlab/mio.py index 1181a71e1b33..fd5739a94923 100644 --- a/scipy/io/matlab/mio.py +++ b/scipy/io/matlab/mio.py @@ -1,19 +1,19 @@ -"""Module for reading and writing MATLAB .mat files""" -# Authors: Travis Oliphant, Matthew Brett - """ Module for reading and writing matlab (TM) .mat files """ +# Authors: Travis Oliphant, Matthew Brett + +from __future__ import division, print_function, absolute_import import os import sys import warnings -from numpy.compat import asbytes +from scipy.lib.six import string_types -from miobase import get_matfile_version, docfiller -from mio4 import MatFile4Reader, MatFile4Writer -from mio5 import MatFile5Reader, MatFile5Writer +from .miobase import get_matfile_version, docfiller +from .mio4 import MatFile4Reader, MatFile4Writer +from .mio5 import MatFile5Reader, MatFile5Writer __all__ = ['find_mat_file', 'mat_reader_factory', 'loadmat', 'savemat', 'whosmat'] @@ -61,7 +61,7 @@ def find_mat_file(file_name, appendmat=True): def _open_file(file_like, appendmat): ''' Open `file_like` and return as file-like object ''' - if isinstance(file_like, basestring): + if isinstance(file_like, string_types): try: return open(file_like, 'rb') except IOError: @@ -178,7 +178,7 @@ def loadmat(file_name, mdict=None, appendmat=True, **kwargs): mdict.update(matfile_dict) else: mdict = matfile_dict - if isinstance(file_name, basestring): + if isinstance(file_name, string_types): MR.mat_stream.close() return mdict @@ -242,14 +242,14 @@ def savemat(file_name, mdict, place. """ - file_is_string = isinstance(file_name, basestring) + file_is_string = isinstance(file_name, string_types) if file_is_string: if appendmat and file_name[-4:] != ".mat": file_name = file_name + ".mat" file_stream = open(file_name, 'wb') else: try: - file_name.write(asbytes('')) + file_name.write(b'') except AttributeError: raise IOError('Writer needs file name or writeable ' 'file-like object') @@ -305,7 +305,7 @@ def whosmat(file_name, appendmat=True, **kwargs): """ ML = mat_reader_factory(file_name, **kwargs) variables = ML.list_variables() - if isinstance(file_name, basestring): + if isinstance(file_name, string_types): ML.mat_stream.close() return variables diff --git a/scipy/io/matlab/mio4.py b/scipy/io/matlab/mio4.py index fe09f98bfe11..b827f823c258 100644 --- a/scipy/io/matlab/mio4.py +++ b/scipy/io/matlab/mio4.py @@ -1,5 +1,7 @@ ''' Classes for read / write of matlab (TM) 4 files ''' +from __future__ import division, print_function, absolute_import + import sys import warnings @@ -8,11 +10,14 @@ import scipy.sparse -from miobase import MatFileReader, docfiller, matdims, \ +from scipy.lib.six import string_types + +from .miobase import MatFileReader, docfiller, matdims, \ read_dtype, convert_dtypes, arr_to_chars, arr_dtype_number, \ MatWriteError -from mio_utils import squeeze_element, chars_to_strings +from .mio_utils import squeeze_element, chars_to_strings +from functools import reduce SYS_LITTLE_ENDIAN = sys.byteorder == 'little' @@ -103,7 +108,7 @@ def __init__(self, file_reader): def read_header(self): ''' Read and return header for variable ''' data = read_dtype(self.mat_stream, self.dtypes['header']) - name = self.mat_stream.read(int(data['namlen'])).strip(asbytes('\x00')) + name = self.mat_stream.read(int(data['namlen'])).strip(b'\x00') if data['mopt'] < 0 or data['mopt'] > 5000: raise ValueError('Mat 4 mopt wrong format, byteswapping problem?') M, rest = divmod(data['mopt'], 1000) # order code @@ -372,7 +377,7 @@ def get_variables(self, variable_names=None): variable name, or sequence of variable names to get from Mat file / file stream. If None, then get all variables in file ''' - if isinstance(variable_names, basestring): + if isinstance(variable_names, string_types): variable_names = [variable_names] self.mat_stream.seek(0) # set up variable reader diff --git a/scipy/io/matlab/mio5.py b/scipy/io/matlab/mio5.py index 98bb7392722f..0019a0edf716 100644 --- a/scipy/io/matlab/mio5.py +++ b/scipy/io/matlab/mio5.py @@ -6,6 +6,7 @@ (as of December 5 2008) ''' +from __future__ import division, print_function, absolute_import ''' ================================= @@ -75,10 +76,9 @@ import time import sys import zlib -if sys.version_info[0] >= 3: - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO + +from io import BytesIO + import warnings import numpy as np @@ -86,17 +86,19 @@ import scipy.sparse -import byteordercodes as boc +from scipy.lib.six import string_types + +from . import byteordercodes as boc -from miobase import MatFileReader, docfiller, matdims, \ +from .miobase import MatFileReader, docfiller, matdims, \ read_dtype, arr_to_chars, arr_dtype_number, \ MatWriteError, MatReadError, MatReadWarning # Reader object for matlab 5 format variables -from mio5_utils import VarReader5 +from .mio5_utils import VarReader5 # Constants and helper objects -from mio5_params import MatlabObject, MatlabFunction, \ +from .mio5_params import MatlabObject, MatlabFunction, \ MDTYPES, NP_TO_MTYPES, NP_TO_MXTYPES, \ miCOMPRESSED, miMATRIX, miINT8, miUTF8, miUINT32, \ mxCELL_CLASS, mxSTRUCT_CLASS, mxOBJECT_CLASS, mxCHAR_CLASS, \ @@ -166,14 +168,14 @@ def guess_byte_order(self): self.mat_stream.seek(126) mi = self.mat_stream.read(2) self.mat_stream.seek(0) - return mi == asbytes('IM') and '<' or '>' + return mi == b'IM' and '<' or '>' def read_file_header(self): ''' Read in mat 5 file header ''' hdict = {} hdr_dtype = MDTYPES[self.byte_order]['dtypes']['file_header'] hdr = read_dtype(self.mat_stream, hdr_dtype) - hdict['__header__'] = hdr['description'].item().strip(asbytes(' \t\n\000')) + hdict['__header__'] = hdr['description'].item().strip(b' \t\n\000') v_major = hdr['version'] >> 8 v_minor = hdr['version'] & 0xFF hdict['__version__'] = '%d.%d' % (v_major, v_minor) @@ -223,7 +225,7 @@ def read_var_header(self): dcor = zlib.decompressobj() stream = BytesIO(dcor.decompress(data)) # Check the stream is not so broken as to leave cruft behind - if not dcor.flush() == asbytes(''): + if not dcor.flush() == b'': raise ValueError("Something wrong with byte stream.") del data self._matrix_reader.set_stream(stream) @@ -261,7 +263,7 @@ def get_variables(self, variable_names=None): If variable_names is None, then get all variables in file ''' - if isinstance(variable_names, basestring): + if isinstance(variable_names, string_types): variable_names = [variable_names] self.mat_stream.seek(0) # Here we pass all the parameters in self to the reading objects @@ -290,7 +292,7 @@ def get_variables(self, variable_names=None): continue try: res = self.read_var_array(hdr, process) - except MatReadError, err: + except MatReadError as err: warnings.warn( 'Unreadable variable "%s", because "%s"' % \ (name, err), @@ -470,7 +472,7 @@ def to_writeable(source): dtype = [] values = [] for field, value in source.items(): - if (isinstance(field, basestring) and + if (isinstance(field, string_types) and not field[0] in '_0123456789'): dtype.append((field,object)) values.append(value) @@ -542,7 +544,7 @@ def write_regular_element(self, arr, mdtype, byte_count): # pad to next 64-bit boundary bc_mod_8 = byte_count % 8 if bc_mod_8: - self.file_stream.write(asbytes('\x00') * (8-bc_mod_8)) + self.file_stream.write(b'\x00' * (8-bc_mod_8)) def write_header(self, shape, diff --git a/scipy/io/matlab/mio5_params.py b/scipy/io/matlab/mio5_params.py index 4d060e315062..ed2f1d7d92fc 100644 --- a/scipy/io/matlab/mio5_params.py +++ b/scipy/io/matlab/mio5_params.py @@ -4,10 +4,11 @@ If you make changes in this file, don't forget to change mio5_utils.pyx ''' +from __future__ import division, print_function, absolute_import import numpy as np -from miobase import convert_dtypes +from .miobase import convert_dtypes miINT8 = 1 miUINT8 = 2 diff --git a/scipy/io/matlab/miobase.py b/scipy/io/matlab/miobase.py index 442154f93927..ba9cfbd9c58c 100644 --- a/scipy/io/matlab/miobase.py +++ b/scipy/io/matlab/miobase.py @@ -5,9 +5,10 @@ MATLAB is a registered trademark of the Mathworks inc. """ +from __future__ import division, print_function, absolute_import + import sys import numpy as np -from numpy.compat import asbytes if sys.version_info[0] >= 3: byteord = int @@ -16,7 +17,7 @@ from scipy.misc import doccer -import byteordercodes as boc +from . import byteordercodes as boc class MatReadError(Exception): pass @@ -211,7 +212,7 @@ def get_matfile_version(fileobj): fileobj.seek(124) tst_str = fileobj.read(4) fileobj.seek(0) - maj_ind = int(tst_str[2] == asbytes('I')[0]) + maj_ind = int(tst_str[2] == b'I'[0]) maj_val = byteord(tst_str[maj_ind]) min_val = byteord(tst_str[1-maj_ind]) ret = (maj_val, min_val) diff --git a/scipy/io/matlab/setup.py b/scipy/io/matlab/setup.py index d11d5c99ced7..b161ffa7782c 100755 --- a/scipy/io/matlab/setup.py +++ b/scipy/io/matlab/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='io',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/io/matlab/setupscons.py b/scipy/io/matlab/setupscons.py index c95ccf929c61..040fdee602e8 100755 --- a/scipy/io/matlab/setupscons.py +++ b/scipy/io/matlab/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='io',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/io/matlab/tests/test_byteordercodes.py b/scipy/io/matlab/tests/test_byteordercodes.py index 5a70758e9ef0..81b08319ef9e 100644 --- a/scipy/io/matlab/tests/test_byteordercodes.py +++ b/scipy/io/matlab/tests/test_byteordercodes.py @@ -1,5 +1,7 @@ ''' Tests for byteorder module ''' +from __future__ import division, print_function, absolute_import + import sys from numpy.testing import assert_raises, assert_, run_module_suite diff --git a/scipy/io/matlab/tests/test_mio.py b/scipy/io/matlab/tests/test_mio.py index 304cf203aa12..bb33a292126c 100644 --- a/scipy/io/matlab/tests/test_mio.py +++ b/scipy/io/matlab/tests/test_mio.py @@ -4,14 +4,13 @@ Need function load / save / roundtrip tests ''' +from __future__ import division, print_function, absolute_import + import sys import os from os.path import join as pjoin, dirname from glob import glob -if sys.version_info[0] >= 3: - from io import BytesIO -else: - from StringIO import StringIO as BytesIO +from io import BytesIO from tempfile import mkdtemp # functools is only available in Python >=2.5 try: @@ -19,6 +18,8 @@ except ImportError: from scipy.io.arff.myfunctools import partial +from scipy.lib.six import u, text_type, string_types + import warnings import shutil import gzip @@ -74,7 +75,7 @@ def mlarr(*args, **kwargs): {'name': 'string', 'classes': {'teststring': 'char'}, 'expected': {'teststring': - array([u'"Do nine men interpret?" "Nine men," I nod.'])}, + array([u('"Do nine men interpret?" "Nine men," I nod.')])}, }) case_table4.append( {'name': 'complex', @@ -82,8 +83,8 @@ def mlarr(*args, **kwargs): 'expected': {'testcomplex': np.cos(theta) + 1j*np.sin(theta)} }) A = np.zeros((3,5)) -A[0] = range(1,6) -A[:,0] = range(1,4) +A[0] = list(range(1,6)) +A[:,0] = list(range(1,4)) case_table4.append( {'name': 'matrix', 'classes': {'testmatrix': 'double'}, @@ -115,7 +116,7 @@ def mlarr(*args, **kwargs): case_table4.append( {'name': 'onechar', 'classes': {'testonechar': 'char'}, - 'expected': {'testonechar': array([u'r'])}, + 'expected': {'testonechar': array([u('r')])}, }) # Cell arrays stored as object arrays CA = mlarr(( # tuple for object array creation @@ -124,7 +125,7 @@ def mlarr(*args, **kwargs): mlarr([[1,2]]), mlarr([[1,2,3]])), dtype=object).reshape(1,-1) CA[0,0] = array( - [u'This cell contains this string and 3 arrays of increasing length']) + [u('This cell contains this string and 3 arrays of increasing length')]) case_table5 = [ {'name': 'cell', 'classes': {'testcell': 'cell'}, @@ -150,18 +151,18 @@ def mlarr(*args, **kwargs): {'name': 'stringarray', 'classes': {'teststringarray': 'char'}, 'expected': {'teststringarray': array( - [u'one ', u'two ', u'three'])}, + [u('one '), u('two '), u('three')])}, }) case_table5.append( {'name': '3dmatrix', 'classes': {'test3dmatrix': 'double'}, 'expected': { - 'test3dmatrix': np.transpose(np.reshape(range(1,25), (4,3,2)))} + 'test3dmatrix': np.transpose(np.reshape(list(range(1,25)), (4,3,2)))} }) st_sub_arr = array([np.sqrt(2),np.exp(1),np.pi]).reshape(1,3) dtype = [(n, object) for n in ['stringfield', 'doublefield', 'complexfield']] st1 = np.zeros((1,1), dtype) -st1['stringfield'][0,0] = array([u'Rats live on no evil star.']) +st1['stringfield'][0,0] = array([u('Rats live on no evil star.')]) st1['doublefield'][0,0] = st_sub_arr st1['complexfield'][0,0] = st_sub_arr * (1 + 1j) case_table5.append( @@ -185,7 +186,7 @@ def mlarr(*args, **kwargs): st2 = np.empty((1,1), dtype=[(n, object) for n in ['one', 'two']]) st2[0,0]['one'] = mlarr(1) st2[0,0]['two'] = np.empty((1,1), dtype=[('three', object)]) -st2[0,0]['two'][0,0]['three'] = array([u'number 3']) +st2[0,0]['two'][0,0]['three'] = array([u('number 3')]) case_table5.append( {'name': 'structnest', 'classes': {'teststructnest': 'struct'}, @@ -194,8 +195,8 @@ def mlarr(*args, **kwargs): a = np.empty((1,2), dtype=[(n, object) for n in ['one', 'two']]) a[0,0]['one'] = mlarr(1) a[0,0]['two'] = mlarr(2) -a[0,1]['one'] = array([u'number 1']) -a[0,1]['two'] = array([u'number 2']) +a[0,1]['one'] = array([u('number 1')]) +a[0,1]['two'] = array([u('number 2')]) case_table5.append( {'name': 'structarr', 'classes': {'teststructarr': 'struct'}, @@ -206,9 +207,9 @@ def mlarr(*args, **kwargs): 'isEmpty', 'numArgs', 'version']]) MO = MatlabObject(np.zeros((1,1), dtype=ODT), 'inline') m0 = MO[0,0] -m0['expr'] = array([u'x']) -m0['inputExpr'] = array([u' x = INLINE_INPUTS_{1};']) -m0['args'] = array([u'x']) +m0['expr'] = array([u('x')]) +m0['inputExpr'] = array([u(' x = INLINE_INPUTS_{1};')]) +m0['args'] = array([u('x')]) m0['isEmpty'] = mlarr(0) m0['numArgs'] = mlarr(1) m0['version'] = mlarr(1) @@ -299,7 +300,7 @@ def _check_level(label, expected, actual): _check_level(level_label, expected[fn], actual[fn]) return - if ex_dtype.type in (np.unicode, # string + if ex_dtype.type in (text_type, # string np.unicode_): assert_equal(actual, expected, err_msg=label) return @@ -678,8 +679,8 @@ def test_skip_variable(): # Prove that it loads with loadmat # d = loadmat(filename, struct_as_record=True) - yield assert_true, d.has_key('first') - yield assert_true, d.has_key('second') + yield assert_true, 'first' in d + yield assert_true, 'second' in d # # Make the factory # @@ -688,7 +689,7 @@ def test_skip_variable(): # This is where the factory breaks with an error in MatMatrixGetter.to_next # d = factory.get_variables('second') - yield assert_true, d.has_key('second') + yield assert_true, 'second' in d factory.mat_stream.close() @@ -894,7 +895,7 @@ def test_scalar_squeeze(): savemat_future(stream, in_d) out_d = loadmat(stream, squeeze_me=True) assert_true(isinstance(out_d['scalar'], float)) - assert_true(isinstance(out_d['string'], basestring)) + assert_true(isinstance(out_d['string'], string_types)) assert_true(isinstance(out_d['st'], np.ndarray)) diff --git a/scipy/io/matlab/tests/test_mio5_utils.py b/scipy/io/matlab/tests/test_mio5_utils.py index c760f6282e41..a8fcb45f9d2d 100644 --- a/scipy/io/matlab/tests/test_mio5_utils.py +++ b/scipy/io/matlab/tests/test_mio5_utils.py @@ -1,25 +1,23 @@ """ Testing """ +from __future__ import division, print_function, absolute_import + import sys -if sys.version_info[0] >= 3: - from io import BytesIO - cStringIO = BytesIO -else: - from cStringIO import StringIO as cStringIO - from StringIO import StringIO as BytesIO +from io import BytesIO +cStringIO = BytesIO import numpy as np -from numpy.compat import asbytes - from nose.tools import assert_true, assert_false, \ assert_equal, assert_raises from numpy.testing import assert_array_equal, assert_array_almost_equal, \ run_module_suite +from scipy.lib.six import u + import scipy.io.matlab.byteordercodes as boc import scipy.io.matlab.streams as streams import scipy.io.matlab.mio5_params as mio5p @@ -173,21 +171,21 @@ def test_zero_byte_string(): hdr = m5u.VarHeader5() # Try when string is 1 length hdr.set_dims([1,]) - _write_stream(str_io, tag.tostring() + asbytes(' ')) + _write_stream(str_io, tag.tostring() + b' ') str_io.seek(0) val = c_reader.read_char(hdr) - assert_equal(val, u' ') + assert_equal(val, u(' ')) # Now when string has 0 bytes 1 length tag['byte_count'] = 0 _write_stream(str_io, tag.tostring()) str_io.seek(0) val = c_reader.read_char(hdr) - assert_equal(val, u' ') + assert_equal(val, u(' ')) # Now when string has 0 bytes 4 length str_io.seek(0) hdr.set_dims([4,]) val = c_reader.read_char(hdr) - assert_array_equal(val, [u' '] * 4) + assert_array_equal(val, [u(' ')] * 4) if __name__ == "__main__": diff --git a/scipy/io/matlab/tests/test_mio_funcs.py b/scipy/io/matlab/tests/test_mio_funcs.py index b12416aca30e..f13c68b9bdf7 100644 --- a/scipy/io/matlab/tests/test_mio_funcs.py +++ b/scipy/io/matlab/tests/test_mio_funcs.py @@ -3,13 +3,12 @@ of mat file. ''' +from __future__ import division, print_function, absolute_import + from os.path import join as pjoin, dirname import sys -if sys.version_info[0] >= 3: - from io import BytesIO -else: - from cStringIO import StringIO as BytesIO +from io import BytesIO from numpy.testing import \ assert_array_equal, \ @@ -20,7 +19,7 @@ from nose.tools import assert_true import numpy as np -from numpy.compat import asbytes, asstr +from numpy.compat import asstr from scipy.io.matlab.mio5 import MatlabObject, MatFile5Writer, \ MatFile5Reader, MatlabFunction @@ -54,7 +53,7 @@ def read_workspace_vars(fname): rdr.mat_stream = ws_bs # Guess byte order. mi = rdr.mat_stream.read(2) - rdr.byte_order = mi == asbytes('IM') and '<' or '>' + rdr.byte_order = mi == b'IM' and '<' or '>' rdr.mat_stream.read(4) # presumably byte padding mdict = read_minimat_vars(rdr) fp.close() diff --git a/scipy/io/matlab/tests/test_mio_utils.py b/scipy/io/matlab/tests/test_mio_utils.py index 96dc0cb492d0..ca7a143fb05d 100644 --- a/scipy/io/matlab/tests/test_mio_utils.py +++ b/scipy/io/matlab/tests/test_mio_utils.py @@ -2,6 +2,8 @@ """ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_array_equal, assert_array_almost_equal, \ diff --git a/scipy/io/matlab/tests/test_pathological.py b/scipy/io/matlab/tests/test_pathological.py index 1ab7855a2e53..7b36076b00a8 100644 --- a/scipy/io/matlab/tests/test_pathological.py +++ b/scipy/io/matlab/tests/test_pathological.py @@ -2,6 +2,8 @@ We try and read any file that matlab reads, these files included """ +from __future__ import division, print_function, absolute_import + from os.path import dirname, join as pjoin import sys @@ -9,8 +11,8 @@ from io import BytesIO cStringIO = BytesIO else: - from cStringIO import StringIO as cStringIO - from StringIO import StringIO as BytesIO + from io import StringIO as cStringIO + from io import StringIO as BytesIO import numpy as np diff --git a/scipy/io/matlab/tests/test_streams.py b/scipy/io/matlab/tests/test_streams.py index 57a0f6629cac..60d026fd0ab1 100644 --- a/scipy/io/matlab/tests/test_streams.py +++ b/scipy/io/matlab/tests/test_streams.py @@ -2,21 +2,22 @@ """ +from __future__ import division, print_function, absolute_import + import os import sys +from io import BytesIO + if sys.version_info[0] >= 3: - from io import BytesIO cStringIO = BytesIO else: from cStringIO import StringIO as cStringIO - from StringIO import StringIO as BytesIO from tempfile import mkstemp import numpy as np -from numpy.compat import asbytes from nose.tools import assert_true, assert_false, \ assert_equal, assert_raises @@ -30,7 +31,7 @@ def setup(): - val = asbytes('a\x00string') + val = b'a\x00string' global fs, gs, cs, fname fd, fname = mkstemp() fs = os.fdopen(fd, 'wb') @@ -81,23 +82,23 @@ def test_read(): st = make_stream(s) st.seek(0) res = st.read(-1) - yield assert_equal, res, asbytes('a\x00string') + yield assert_equal, res, b'a\x00string' st.seek(0) res = st.read(4) - yield assert_equal, res, asbytes('a\x00st') + yield assert_equal, res, b'a\x00st' # read into st.seek(0) res = _read_into(st, 4) - yield assert_equal, res, asbytes('a\x00st') + yield assert_equal, res, b'a\x00st' res = _read_into(st, 4) - yield assert_equal, res, asbytes('ring') + yield assert_equal, res, b'ring' yield assert_raises, IOError, _read_into, st, 2 # read alloc st.seek(0) res = _read_string(st, 4) - yield assert_equal, res, asbytes('a\x00st') + yield assert_equal, res, b'a\x00st' res = _read_string(st, 4) - yield assert_equal, res, asbytes('ring') + yield assert_equal, res, b'ring' yield assert_raises, IOError, _read_string, st, 2 if __name__ == "__main__": diff --git a/scipy/io/mmio.py b/scipy/io/mmio.py index dcd940371d20..8d57ec618dab 100644 --- a/scipy/io/mmio.py +++ b/scipy/io/mmio.py @@ -9,6 +9,8 @@ # http://math.nist.gov/MatrixMarket/ # +from __future__ import division, print_function, absolute_import + import os from numpy import asarray, real, imag, conj, zeros, ndarray, concatenate, \ ones, ascontiguousarray, vstack, savetxt, fromfile, fromstring @@ -195,7 +197,7 @@ def info(self, source): format = self.FORMAT_COORDINATE # skip comments - while line.startswith(asbytes('%')): line = source.readline() + while line.startswith(b'%'): line = source.readline() line = line.split() if format == self.FORMAT_ARRAY: @@ -362,7 +364,7 @@ def _parse_body(self, stream): i,j = 0,0 while line: line = stream.readline() - if not line or line.startswith(asbytes('%')): + if not line or line.startswith(b'%'): continue if is_complex: aij = complex(*map(float,line.split())) @@ -394,7 +396,7 @@ def _parse_body(self, stream): k = 0 while line: line = stream.readline() - if not line or line.startswith(asbytes('%')): + if not line or line.startswith(b'%'): continue l = line.split() i,j = map(int,l[:2]) @@ -610,8 +612,8 @@ def _write(self, stream, a, comment='', field=None, precision=None): import sys import time for filename in sys.argv[1:]: - print 'Reading',filename,'...', + print('Reading',filename,'...', end=' ') sys.stdout.flush() t = time.time() mmread(filename) - print 'took %s seconds' % (time.time() - t) + print('took %s seconds' % (time.time() - t)) diff --git a/scipy/io/netcdf.py b/scipy/io/netcdf.py index 072b6d35606d..86ea90bf93d2 100644 --- a/scipy/io/netcdf.py +++ b/scipy/io/netcdf.py @@ -12,6 +12,8 @@ with NetCDF files. """ +from __future__ import division, print_function, absolute_import + #TODO: # * properly implement ``_FillValue``. # * implement Jeff Whitaker's patch for masked variables. @@ -37,19 +39,22 @@ from numpy.compat import asbytes, asstr from numpy import fromstring, ndarray, dtype, empty, array, asarray from numpy import little_endian as LITTLE_ENDIAN +from functools import reduce + +from scipy.lib.six import integer_types -ABSENT = asbytes('\x00\x00\x00\x00\x00\x00\x00\x00') -ZERO = asbytes('\x00\x00\x00\x00') -NC_BYTE = asbytes('\x00\x00\x00\x01') -NC_CHAR = asbytes('\x00\x00\x00\x02') -NC_SHORT = asbytes('\x00\x00\x00\x03') -NC_INT = asbytes('\x00\x00\x00\x04') -NC_FLOAT = asbytes('\x00\x00\x00\x05') -NC_DOUBLE = asbytes('\x00\x00\x00\x06') -NC_DIMENSION = asbytes('\x00\x00\x00\n') -NC_VARIABLE = asbytes('\x00\x00\x00\x0b') -NC_ATTRIBUTE = asbytes('\x00\x00\x00\x0c') +ABSENT = b'\x00\x00\x00\x00\x00\x00\x00\x00' +ZERO = b'\x00\x00\x00\x00' +NC_BYTE = b'\x00\x00\x00\x01' +NC_CHAR = b'\x00\x00\x00\x02' +NC_SHORT = b'\x00\x00\x00\x03' +NC_INT = b'\x00\x00\x00\x04' +NC_FLOAT = b'\x00\x00\x00\x05' +NC_DOUBLE = b'\x00\x00\x00\x06' +NC_DIMENSION = b'\x00\x00\x00\n' +NC_VARIABLE = b'\x00\x00\x00\x0b' +NC_ATTRIBUTE = b'\x00\x00\x00\x0c' TYPEMAP = { NC_BYTE: ('b', 1), @@ -304,7 +309,7 @@ def flush(self): def _write(self): self.fp.seek(0) - self.fp.write(asbytes('CDF')) + self.fp.write(b'CDF') self.fp.write(array(self.version_byte, '>b').tostring()) # Write headers and data. @@ -415,7 +420,7 @@ def _write_var_data(self, name): if not var.isrec: self.fp.write(var.data.tostring()) count = var.data.size * var.data.itemsize - self.fp.write(asbytes('0') * (var._vsize - count)) + self.fp.write(b'0' * (var._vsize - count)) else: # record variable # Handle rec vars with shape[0] < nrecs. if self._recs > len(var.data): @@ -433,7 +438,7 @@ def _write_var_data(self, name): self.fp.write(rec.tostring()) # Padding count = rec.size * rec.itemsize - self.fp.write(asbytes('0') * (var._vsize - count)) + self.fp.write(b'0' * (var._vsize - count)) pos += self._recsize self.fp.seek(pos) self.fp.seek(pos0 + var._vsize) @@ -442,11 +447,10 @@ def _write_values(self, values): if hasattr(values, 'dtype'): nc_type = REVERSE[values.dtype.char, values.dtype.itemsize] else: - types = [ - (int, NC_INT), - (long, NC_INT), + types = [(t, NC_INT) for t in integer_types] + types += [ (float, NC_FLOAT), - (basestring, NC_CHAR), + (str, NC_CHAR), ] try: sample = values[0] @@ -473,12 +477,12 @@ def _write_values(self, values): values = values.byteswap() self.fp.write(values.tostring()) count = values.size * values.itemsize - self.fp.write(asbytes('0') * (-count % 4)) # pad + self.fp.write(b'0' * (-count % 4)) # pad def _read(self): # Check magic bytes and version magic = self.fp.read(3) - if not magic == asbytes('CDF'): + if not magic == b'CDF': raise TypeError("Error: %s is not a valid NetCDF 3 file" % self.filename) self.__dict__['version_byte'] = fromstring(self.fp.read(1), '>b')[0] @@ -643,7 +647,7 @@ def _read_values(self): values = fromstring(values, dtype='>%s' % typecode) if values.shape == (1,): values = values[0] else: - values = values.rstrip(asbytes('\x00')) + values = values.rstrip(b'\x00') return values def _pack_begin(self, begin): @@ -670,11 +674,11 @@ def _pack_string(self, s): count = len(s) self._pack_int(count) self.fp.write(asbytes(s)) - self.fp.write(asbytes('0') * (-count % 4)) # pad + self.fp.write(b'0' * (-count % 4)) # pad def _unpack_string(self): count = self._unpack_int() - s = self.fp.read(count).rstrip(asbytes('\x00')) + s = self.fp.read(count).rstrip(b'\x00') self.fp.read(-count % 4) # read padding return s diff --git a/scipy/io/setup.py b/scipy/io/setup.py index 32f167c38bd7..e466ebb7883c 100755 --- a/scipy/io/setup.py +++ b/scipy/io/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/io/setupscons.py b/scipy/io/setupscons.py index ee4eaccc61c4..c1a01f3a5485 100755 --- a/scipy/io/setupscons.py +++ b/scipy/io/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/io/tests/test_idl.py b/scipy/io/tests/test_idl.py index 74927642fb07..8655b7f1e65e 100644 --- a/scipy/io/tests/test_idl.py +++ b/scipy/io/tests/test_idl.py @@ -1,10 +1,11 @@ +from __future__ import division, print_function, absolute_import + from os import path import warnings DATA_PATH = path.join(path.dirname(__file__), 'data') import numpy as np -from numpy.compat import asbytes_nested, asbytes from numpy.testing import assert_equal, assert_array_equal, run_module_suite from numpy.testing.utils import WarningManager from nose.tools import assert_true @@ -132,7 +133,7 @@ def test_compressed(self): assert_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16)) assert_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)])) - assert_identical(s.arrays.d[0], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object)) + assert_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=np.object)) class TestArrayDimensions: @@ -180,7 +181,7 @@ def test_scalars(self): assert_identical(s.scalars.b, np.array(np.int32(2))) assert_identical(s.scalars.c, np.array(np.float32(3.))) assert_identical(s.scalars.d, np.array(np.float64(4.))) - assert_identical(s.scalars.e, np.array(asbytes_nested(["spam"]), dtype=np.object)) + assert_identical(s.scalars.e, np.array([b"spam"], dtype=np.object)) assert_identical(s.scalars.f, np.array(np.complex64(-1.+3j))) def test_scalars_replicated(self): @@ -189,7 +190,7 @@ def test_scalars_replicated(self): assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 5)) assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 5)) assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 5)) - assert_identical(s.scalars_rep.e, np.repeat(asbytes("spam"), 5).astype(np.object)) + assert_identical(s.scalars_rep.e, np.repeat(b"spam", 5).astype(np.object)) assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 5)) def test_scalars_replicated_3d(self): @@ -198,7 +199,7 @@ def test_scalars_replicated_3d(self): assert_identical(s.scalars_rep.b, np.repeat(np.int32(2), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.c, np.repeat(np.float32(3.), 24).reshape(4, 3, 2)) assert_identical(s.scalars_rep.d, np.repeat(np.float64(4.), 24).reshape(4, 3, 2)) - assert_identical(s.scalars_rep.e, np.repeat(asbytes("spam"), 24).reshape(4, 3, 2).astype(np.object)) + assert_identical(s.scalars_rep.e, np.repeat(b"spam", 24).reshape(4, 3, 2).astype(np.object)) assert_identical(s.scalars_rep.f, np.repeat(np.complex64(-1.+3j), 24).reshape(4, 3, 2)) def test_arrays(self): @@ -206,7 +207,7 @@ def test_arrays(self): assert_array_identical(s.arrays.a[0], np.array([1, 2, 3], dtype=np.int16)) assert_array_identical(s.arrays.b[0], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_array_identical(s.arrays.c[0], np.array([np.complex64(1+2j), np.complex64(7+8j)])) - assert_array_identical(s.arrays.d[0], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object)) + assert_array_identical(s.arrays.d[0], np.array([b"cheese", b"bacon", b"spam"], dtype=np.object)) def test_arrays_replicated(self): @@ -229,7 +230,7 @@ def test_arrays_replicated(self): assert_array_identical(s.arrays_rep.a[i], np.array([1, 2, 3], dtype=np.int16)) assert_array_identical(s.arrays_rep.b[i], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_array_identical(s.arrays_rep.c[i], np.array([np.complex64(1+2j), np.complex64(7+8j)])) - assert_array_identical(s.arrays_rep.d[i], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object)) + assert_array_identical(s.arrays_rep.d[i], np.array([b"cheese", b"bacon", b"spam"], dtype=np.object)) def test_arrays_replicated_3d(self): @@ -254,7 +255,7 @@ def test_arrays_replicated_3d(self): assert_array_identical(s.arrays_rep.a[i, j, k], np.array([1, 2, 3], dtype=np.int16)) assert_array_identical(s.arrays_rep.b[i, j, k], np.array([4., 5., 6., 7.], dtype=np.float32)) assert_array_identical(s.arrays_rep.c[i, j, k], np.array([np.complex64(1+2j), np.complex64(7+8j)])) - assert_array_identical(s.arrays_rep.d[i, j, k], np.array(asbytes_nested(["cheese", "bacon", "spam"]), dtype=np.object)) + assert_array_identical(s.arrays_rep.d[i, j, k], np.array([b"cheese", b"bacon", b"spam"], dtype=np.object)) def test_inheritance(self): s = readsav(path.join(DATA_PATH, 'struct_inherit.sav'), verbose=False) diff --git a/scipy/io/tests/test_mmio.py b/scipy/io/tests/test_mmio.py index 14b59872f633..28c4d97f4074 100644 --- a/scipy/io/tests/test_mmio.py +++ b/scipy/io/tests/test_mmio.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from tempfile import mktemp from numpy import array,transpose diff --git a/scipy/io/tests/test_netcdf.py b/scipy/io/tests/test_netcdf.py index fa3aee21f3ec..2b02ec5bc4f4 100644 --- a/scipy/io/tests/test_netcdf.py +++ b/scipy/io/tests/test_netcdf.py @@ -1,4 +1,5 @@ ''' Tests for netcdf ''' +from __future__ import division, print_function, absolute_import import os from os.path import join as pjoin, dirname @@ -6,14 +7,10 @@ import tempfile import time import sys -if sys.version_info[0] >= 3: - from io import BytesIO -else: - from StringIO import StringIO as BytesIO +from io import BytesIO from glob import glob import numpy as np -from numpy.compat import asbytes from numpy.testing import dec, assert_ from scipy.io.netcdf import netcdf_file @@ -39,9 +36,9 @@ def make_simple(*args, **kwargs): def gen_for_simple(ncfileobj): ''' Generator for example fileobj tests ''' - yield assert_equal, ncfileobj.history, asbytes('Created for a test') + yield assert_equal, ncfileobj.history, b'Created for a test' time = ncfileobj.variables['time'] - yield assert_equal, time.units, asbytes('days since 2008-01-01') + yield assert_equal, time.units, b'days since 2008-01-01' yield assert_equal, time.shape, (N_EG_ELS,) yield assert_equal, time[-1], N_EG_ELS-1 diff --git a/scipy/io/tests/test_wavfile.py b/scipy/io/tests/test_wavfile.py index 901a018f4641..bd0a9ee14496 100644 --- a/scipy/io/tests/test_wavfile.py +++ b/scipy/io/tests/test_wavfile.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import os import tempfile import warnings diff --git a/scipy/io/wavfile.py b/scipy/io/wavfile.py index 1724b6293bcc..c8f10ef3d746 100644 --- a/scipy/io/wavfile.py +++ b/scipy/io/wavfile.py @@ -8,8 +8,9 @@ `write`: Write a numpy array as a WAV file. """ +from __future__ import division, print_function, absolute_import + import numpy -from numpy.compat import asbytes import struct import warnings @@ -76,9 +77,9 @@ def _skip_unknown_chunk(fid): def _read_riff_chunk(fid): global _big_endian str1 = fid.read(4) - if str1 == asbytes('RIFX'): + if str1 == b'RIFX': _big_endian = True - elif str1 != asbytes('RIFF'): + elif str1 != b'RIFF': raise ValueError("Not a WAV file.") if _big_endian: fmt = '>I' @@ -86,9 +87,9 @@ def _read_riff_chunk(fid): fmt = '' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'): diff --git a/scipy/lib/__init__.py b/scipy/lib/__init__.py index 88e651259cd6..b4175a378d6f 100644 --- a/scipy/lib/__init__.py +++ b/scipy/lib/__init__.py @@ -6,6 +6,7 @@ blas -- wrappers for BLAS/ATLAS libraries """ +from __future__ import division, print_function, absolute_import __all__ = ['lapack','blas'] diff --git a/scipy/lib/blas/__init__.py b/scipy/lib/blas/__init__.py index 82d03f74a735..ff6b54de8cc2 100644 --- a/scipy/lib/blas/__init__.py +++ b/scipy/lib/blas/__init__.py @@ -80,13 +80,14 @@ Prefixes: axpy: s,d,c,z """ +from __future__ import division, print_function, absolute_import from warnings import warn __all__ = ['fblas','cblas','get_blas_funcs'] -import fblas -import cblas +from . import fblas +from . import cblas from numpy import deprecate @@ -95,7 +96,7 @@ def _deprecated(): pass try: _deprecated() -except DeprecationWarning, e: +except DeprecationWarning as e: # don't fail import if DeprecationWarnings raise error -- works around # the situation with Numpy's test framework pass diff --git a/scipy/lib/blas/scons_support.py b/scipy/lib/blas/scons_support.py index f78d0474cf32..e8695b67dbe0 100644 --- a/scipy/lib/blas/scons_support.py +++ b/scipy/lib/blas/scons_support.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from os.path import join as pjoin, splitext, basename as pbasename def generate_interface_emitter(target, source, env): diff --git a/scipy/lib/blas/setup.py b/scipy/lib/blas/setup.py index 18a69d62a357..2a678c9c1538 100755 --- a/scipy/lib/blas/setup.py +++ b/scipy/lib/blas/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import import os import sys @@ -45,7 +46,7 @@ def configuration(parent_package='',top_path=None): atlas_version = ([v[3:-3] for k,v in blas_opt.get('define_macros',[]) \ if k=='ATLAS_INFO']+[None])[0] if atlas_version: - print ('ATLAS version: %s' % atlas_version) + print(('ATLAS version: %s' % atlas_version)) target_dir = '' diff --git a/scipy/lib/blas/setupscons.py b/scipy/lib/blas/setupscons.py index 2df548879738..87801cfe9200 100755 --- a/scipy/lib/blas/setupscons.py +++ b/scipy/lib/blas/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/lib/blas/tests/test_blas.py b/scipy/lib/blas/tests/test_blas.py index 943df7f8fa90..a32209d8571f 100644 --- a/scipy/lib/blas/tests/test_blas.py +++ b/scipy/lib/blas/tests/test_blas.py @@ -3,6 +3,8 @@ # Created by: Pearu Peterson, April 2002 # +from __future__ import division, print_function, absolute_import + __usage__ = """ Build linalg: python setup.py build diff --git a/scipy/lib/blas/tests/test_fblas.py b/scipy/lib/blas/tests/test_fblas.py index bf966bc29a52..14380dbf4957 100644 --- a/scipy/lib/blas/tests/test_fblas.py +++ b/scipy/lib/blas/tests/test_fblas.py @@ -6,10 +6,13 @@ # !! Complex calculations really aren't checked that carefully. # !! Only real valued complex numbers are used in tests. +from __future__ import division, print_function, absolute_import + from numpy import zeros, transpose, newaxis, shape, float32, float64, \ complex64, complex128, arange, array, common_type, conjugate from numpy.testing import assert_equal, assert_array_almost_equal, \ run_module_suite, TestCase +from scipy.lib.six.moves import xrange from scipy.lib.blas import fblas #decimal accuracy to require between Python and LAPACK/BLAS calculations diff --git a/scipy/lib/decorator.py b/scipy/lib/decorator.py index 2e8c123380e8..6d443cbbdb9a 100644 --- a/scipy/lib/decorator.py +++ b/scipy/lib/decorator.py @@ -28,12 +28,16 @@ for the documentation. """ +from __future__ import division, print_function, absolute_import + __version__ = '3.3.2' __all__ = ["decorator", "FunctionMaker", "partial"] import sys, re, inspect +from scipy.lib.six import exec_ + try: from functools import partial except ImportError: # for Python version < 2.5 @@ -126,7 +130,10 @@ def update(self, func, **kw): func.__name__ = self.name func.__doc__ = getattr(self, 'doc', None) func.__dict__ = getattr(self, 'dict', {}) - func.func_defaults = getattr(self, 'defaults', ()) + if sys.version_info[0] >= 3: + func.__defaults__ = getattr(self, 'defaults', ()) + else: + func.func_defaults = getattr(self, 'defaults', ()) func.__kwdefaults__ = getattr(self, 'kwonlydefaults', None) callermodule = sys._getframe(3).f_globals.get('__name__', '?') func.__module__ = getattr(self, 'module', callermodule) @@ -150,10 +157,10 @@ def make(self, src_templ, evaldict=None, addsource=False, **attrs): try: code = compile(src, '', 'single') # print >> sys.stderr, 'Compiling %s' % src - exec code in evaldict + exec_(code, evaldict) except: - print >> sys.stderr, 'Error in generated code:' - print >> sys.stderr, src + print('Error in generated code:', file=sys.stderr) + print(src, file=sys.stderr) raise func = evaldict[name] if addsource: @@ -189,7 +196,10 @@ def decorator(caller, func=None): decorator(caller, func) decorates a function using a caller. """ if func is not None: # returns a decorated function - evaldict = func.func_globals.copy() + if sys.version_info[0] >= 3: + evaldict = func.__globals__.copy() + else: + evaldict = func.func_globals.copy() evaldict['_call_'] = caller evaldict['_func_'] = func return FunctionMaker.create( @@ -200,7 +210,10 @@ def decorator(caller, func=None): return partial(decorator, caller) # otherwise assume caller is a function first = inspect.getargspec(caller)[0][0] # first arg - evaldict = caller.func_globals.copy() + if sys.version_info[0] >= 3: + evaldict = caller.__globals__.copy() + else: + evaldict = caller.func_globals.copy() evaldict['_call_'] = caller evaldict['decorator'] = decorator return FunctionMaker.create( diff --git a/scipy/lib/lapack/__init__.py b/scipy/lib/lapack/__init__.py index 3bc698cf7c08..ed380e7df5a5 100644 --- a/scipy/lib/lapack/__init__.py +++ b/scipy/lib/lapack/__init__.py @@ -136,12 +136,13 @@ """ +from __future__ import division, print_function, absolute_import __all__ = ['get_lapack_funcs','calc_lwork','flapack','clapack'] from numpy import deprecate -import calc_lwork +from . import calc_lwork # The following ensures that possibly missing flavor (C or Fortran) is # replaced with the available one. If none is available, exception @@ -152,13 +153,13 @@ def _deprecated(): pass try: _deprecated() -except DeprecationWarning, e: +except DeprecationWarning as e: # don't fail import if DeprecationWarnings raise error -- works around # the situation with Numpy's test framework pass -import flapack -import clapack +from . import flapack +from . import clapack _use_force_clapack = 1 if hasattr(clapack,'empty_module'): @@ -220,7 +221,7 @@ def get_lapack_funcs(names,arrays=(),debug=0,force_clapack=1): func2 = getattr(m2,func_name,None) if func2 is not None: import new - exec _colmajor_func_template % {'func_name':func_name} + exec(_colmajor_func_template % {'func_name':func_name}) func = new.function(func_code,{'clapack_func':func2},func_name) func.module_name = m2_name func.__doc__ = func2.__doc__ diff --git a/scipy/lib/lapack/scons_support.py b/scipy/lib/lapack/scons_support.py index f78d0474cf32..e8695b67dbe0 100644 --- a/scipy/lib/lapack/scons_support.py +++ b/scipy/lib/lapack/scons_support.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from os.path import join as pjoin, splitext, basename as pbasename def generate_interface_emitter(target, source, env): diff --git a/scipy/lib/lapack/setup.py b/scipy/lib/lapack/setup.py index 22f760765ce2..d4d1df3ef1a8 100755 --- a/scipy/lib/lapack/setup.py +++ b/scipy/lib/lapack/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import import os from glob import glob @@ -26,7 +27,7 @@ def configuration(parent_package='',top_path=None): atlas_version = ([v[3:-3] for k,v in lapack_opt.get('define_macros',[]) \ if k=='ATLAS_INFO']+[None])[0] if atlas_version: - print ('ATLAS version: %s' % atlas_version) + print(('ATLAS version: %s' % atlas_version)) target_dir = '' diff --git a/scipy/lib/lapack/setupscons.py b/scipy/lib/lapack/setupscons.py index f747424770a4..264a1eb6a66c 100755 --- a/scipy/lib/lapack/setupscons.py +++ b/scipy/lib/lapack/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import import os from glob import glob diff --git a/scipy/lib/lapack/tests/common.py b/scipy/lib/lapack/tests/common.py index 5374af3e2398..3255895aba0c 100644 --- a/scipy/lib/lapack/tests/common.py +++ b/scipy/lib/lapack/tests/common.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.lib.lapack import flapack, clapack diff --git a/scipy/lib/lapack/tests/test_esv.py b/scipy/lib/lapack/tests/test_esv.py index 120f64277b75..3d5dee2cc962 100644 --- a/scipy/lib/lapack/tests/test_esv.py +++ b/scipy/lib/lapack/tests/test_esv.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import TestCase, assert_array_almost_equal, dec, \ assert_equal, assert_ diff --git a/scipy/lib/lapack/tests/test_gesv.py b/scipy/lib/lapack/tests/test_gesv.py index 9b7f4bb4f034..80e9eb9da1db 100644 --- a/scipy/lib/lapack/tests/test_gesv.py +++ b/scipy/lib/lapack/tests/test_gesv.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import TestCase, assert_array_almost_equal, dec, \ assert_equal, assert_ diff --git a/scipy/lib/lapack/tests/test_lapack.py b/scipy/lib/lapack/tests/test_lapack.py index b413b395a70c..817029818126 100644 --- a/scipy/lib/lapack/tests/test_lapack.py +++ b/scipy/lib/lapack/tests/test_lapack.py @@ -2,6 +2,8 @@ # # Created by: Pearu Peterson, September 2002 # +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import * diff --git a/scipy/lib/setup.py b/scipy/lib/setup.py index e89cc7f07434..810afba65e23 100644 --- a/scipy/lib/setup.py +++ b/scipy/lib/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/lib/setupscons.py b/scipy/lib/setupscons.py index a0766d05f36e..c8f78e230e9c 100644 --- a/scipy/lib/setupscons.py +++ b/scipy/lib/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/lib/six.py b/scipy/lib/six.py new file mode 100644 index 000000000000..883a987780a4 --- /dev/null +++ b/scipy/lib/six.py @@ -0,0 +1,388 @@ +"""Utilities for writing code that runs on Python 2 and 3""" + +# Copyright (c) 2010-2012 Benjamin Peterson +# +# Permission is hereby granted, free of charge, to any person obtaining a copy of +# this software and associated documentation files (the "Software"), to deal in +# the Software without restriction, including without limitation the rights to +# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +# the Software, and to permit persons to whom the Software is furnished to do so, +# subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in all +# copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +# FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +# COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +# IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + +import operator +import sys +import types + +__author__ = "Benjamin Peterson " +__version__ = "1.2.0" + + +# True if we are running on Python 3. +PY3 = sys.version_info[0] == 3 + +if PY3: + string_types = str, + integer_types = int, + class_types = type, + text_type = str + binary_type = bytes + + MAXSIZE = sys.maxsize +else: + string_types = basestring, + integer_types = (int, long) + class_types = (type, types.ClassType) + text_type = unicode + binary_type = str + + if sys.platform.startswith("java"): + # Jython always uses 32 bits. + MAXSIZE = int((1 << 31) - 1) + else: + # It's possible to have sizeof(long) != sizeof(Py_ssize_t). + class X(object): + def __len__(self): + return 1 << 31 + try: + len(X()) + except OverflowError: + # 32-bit + MAXSIZE = int((1 << 31) - 1) + else: + # 64-bit + MAXSIZE = int((1 << 63) - 1) + del X + + +def _add_doc(func, doc): + """Add documentation to a function.""" + func.__doc__ = doc + + +def _import_module(name): + """Import module, returning the module after the last dot.""" + __import__(name) + return sys.modules[name] + + +class _LazyDescr(object): + + def __init__(self, name): + self.name = name + + def __get__(self, obj, tp): + result = self._resolve() + setattr(obj, self.name, result) + # This is a bit ugly, but it avoids running this again. + delattr(tp, self.name) + return result + + +class MovedModule(_LazyDescr): + + def __init__(self, name, old, new=None): + super(MovedModule, self).__init__(name) + if PY3: + if new is None: + new = name + self.mod = new + else: + self.mod = old + + def _resolve(self): + return _import_module(self.mod) + + +class MovedAttribute(_LazyDescr): + + def __init__(self, name, old_mod, new_mod, old_attr=None, new_attr=None): + super(MovedAttribute, self).__init__(name) + if PY3: + if new_mod is None: + new_mod = name + self.mod = new_mod + if new_attr is None: + if old_attr is None: + new_attr = name + else: + new_attr = old_attr + self.attr = new_attr + else: + self.mod = old_mod + if old_attr is None: + old_attr = name + self.attr = old_attr + + def _resolve(self): + module = _import_module(self.mod) + return getattr(module, self.attr) + + + +class _MovedItems(types.ModuleType): + """Lazy loading of moved objects""" + + +_moved_attributes = [ + MovedAttribute("cStringIO", "cStringIO", "io", "StringIO"), + MovedAttribute("filter", "itertools", "builtins", "ifilter", "filter"), + MovedAttribute("input", "__builtin__", "builtins", "raw_input", "input"), + MovedAttribute("map", "itertools", "builtins", "imap", "map"), + MovedAttribute("reload_module", "__builtin__", "imp", "reload"), + MovedAttribute("reduce", "__builtin__", "functools"), + MovedAttribute("StringIO", "StringIO", "io"), + MovedAttribute("xrange", "__builtin__", "builtins", "xrange", "range"), + MovedAttribute("zip", "itertools", "builtins", "izip", "zip"), + + MovedModule("builtins", "__builtin__"), + MovedModule("configparser", "ConfigParser"), + MovedModule("copyreg", "copy_reg"), + MovedModule("http_cookiejar", "cookielib", "http.cookiejar"), + MovedModule("http_cookies", "Cookie", "http.cookies"), + MovedModule("html_entities", "htmlentitydefs", "html.entities"), + MovedModule("html_parser", "HTMLParser", "html.parser"), + MovedModule("http_client", "httplib", "http.client"), + MovedModule("email_mime_multipart", "email.MIMEMultipart", "email.mime.multipart"), + MovedModule("email_mime_text", "email.MIMEText", "email.mime.text"), + MovedModule("email_mime_base", "email.MIMEBase", "email.mime.base"), + MovedModule("BaseHTTPServer", "BaseHTTPServer", "http.server"), + MovedModule("CGIHTTPServer", "CGIHTTPServer", "http.server"), + MovedModule("SimpleHTTPServer", "SimpleHTTPServer", "http.server"), + MovedModule("cPickle", "cPickle", "pickle"), + MovedModule("queue", "Queue"), + MovedModule("reprlib", "repr"), + MovedModule("socketserver", "SocketServer"), + MovedModule("tkinter", "Tkinter"), + MovedModule("tkinter_dialog", "Dialog", "tkinter.dialog"), + MovedModule("tkinter_filedialog", "FileDialog", "tkinter.filedialog"), + MovedModule("tkinter_scrolledtext", "ScrolledText", "tkinter.scrolledtext"), + MovedModule("tkinter_simpledialog", "SimpleDialog", "tkinter.simpledialog"), + MovedModule("tkinter_tix", "Tix", "tkinter.tix"), + MovedModule("tkinter_constants", "Tkconstants", "tkinter.constants"), + MovedModule("tkinter_dnd", "Tkdnd", "tkinter.dnd"), + MovedModule("tkinter_colorchooser", "tkColorChooser", + "tkinter.colorchooser"), + MovedModule("tkinter_commondialog", "tkCommonDialog", + "tkinter.commondialog"), + MovedModule("tkinter_tkfiledialog", "tkFileDialog", "tkinter.filedialog"), + MovedModule("tkinter_font", "tkFont", "tkinter.font"), + MovedModule("tkinter_messagebox", "tkMessageBox", "tkinter.messagebox"), + MovedModule("tkinter_tksimpledialog", "tkSimpleDialog", + "tkinter.simpledialog"), + MovedModule("urllib_robotparser", "robotparser", "urllib.robotparser"), + MovedModule("winreg", "_winreg"), +] +for attr in _moved_attributes: + setattr(_MovedItems, attr.name, attr) +del attr + +moves = sys.modules[__name__ + ".moves"] = _MovedItems("moves") + + +def add_move(move): + """Add an item to six.moves.""" + setattr(_MovedItems, move.name, move) + + +def remove_move(name): + """Remove item from six.moves.""" + try: + delattr(_MovedItems, name) + except AttributeError: + try: + del moves.__dict__[name] + except KeyError: + raise AttributeError("no such move, %r" % (name,)) + + +if PY3: + _meth_func = "__func__" + _meth_self = "__self__" + + _func_code = "__code__" + _func_defaults = "__defaults__" + + _iterkeys = "keys" + _itervalues = "values" + _iteritems = "items" +else: + _meth_func = "im_func" + _meth_self = "im_self" + + _func_code = "func_code" + _func_defaults = "func_defaults" + + _iterkeys = "iterkeys" + _itervalues = "itervalues" + _iteritems = "iteritems" + + +try: + advance_iterator = next +except NameError: + def advance_iterator(it): + return it.next() +next = advance_iterator + + +if PY3: + def get_unbound_function(unbound): + return unbound + + Iterator = object + + def callable(obj): + return any("__call__" in klass.__dict__ for klass in type(obj).__mro__) +else: + def get_unbound_function(unbound): + return unbound.im_func + + class Iterator(object): + + def next(self): + return type(self).__next__(self) + + callable = callable +_add_doc(get_unbound_function, + """Get the function out of a possibly unbound function""") + + +get_method_function = operator.attrgetter(_meth_func) +get_method_self = operator.attrgetter(_meth_self) +get_function_code = operator.attrgetter(_func_code) +get_function_defaults = operator.attrgetter(_func_defaults) + + +def iterkeys(d): + """Return an iterator over the keys of a dictionary.""" + return iter(getattr(d, _iterkeys)()) + +def itervalues(d): + """Return an iterator over the values of a dictionary.""" + return iter(getattr(d, _itervalues)()) + +def iteritems(d): + """Return an iterator over the (key, value) pairs of a dictionary.""" + return iter(getattr(d, _iteritems)()) + + +if PY3: + def b(s): + return s.encode("latin-1") + def u(s): + return s + if sys.version_info[1] <= 1: + def int2byte(i): + return bytes((i,)) + else: + # This is about 2x faster than the implementation above on 3.2+ + int2byte = operator.methodcaller("to_bytes", 1, "big") + import io + StringIO = io.StringIO + BytesIO = io.BytesIO +else: + def b(s): + return s + def u(s): + return unicode(s, "unicode_escape") + int2byte = chr + import StringIO + StringIO = BytesIO = StringIO.StringIO +_add_doc(b, """Byte literal""") +_add_doc(u, """Text literal""") + + +if PY3: + import builtins + exec_ = getattr(builtins, "exec") + + + def reraise(tp, value, tb=None): + if value.__traceback__ is not tb: + raise value.with_traceback(tb) + raise value + + + print_ = getattr(builtins, "print") + del builtins + +else: + def exec_(code, globs=None, locs=None): + """Execute code in a namespace.""" + if globs is None: + frame = sys._getframe(1) + globs = frame.f_globals + if locs is None: + locs = frame.f_locals + del frame + elif locs is None: + locs = globs + exec("""exec code in globs, locs""") + + + exec_("""def reraise(tp, value, tb=None): + raise tp, value, tb +""") + + + def print_(*args, **kwargs): + """The new-style print function.""" + fp = kwargs.pop("file", sys.stdout) + if fp is None: + return + def write(data): + if not isinstance(data, basestring): + data = str(data) + fp.write(data) + want_unicode = False + sep = kwargs.pop("sep", None) + if sep is not None: + if isinstance(sep, unicode): + want_unicode = True + elif not isinstance(sep, str): + raise TypeError("sep must be None or a string") + end = kwargs.pop("end", None) + if end is not None: + if isinstance(end, unicode): + want_unicode = True + elif not isinstance(end, str): + raise TypeError("end must be None or a string") + if kwargs: + raise TypeError("invalid keyword arguments to print()") + if not want_unicode: + for arg in args: + if isinstance(arg, unicode): + want_unicode = True + break + if want_unicode: + newline = unicode("\n") + space = unicode(" ") + else: + newline = "\n" + space = " " + if sep is None: + sep = space + if end is None: + end = newline + for i, arg in enumerate(args): + if i: + write(sep) + write(arg) + write(end) + +_add_doc(reraise, """Reraise an exception.""") + + +def with_metaclass(meta, base=object): + """Create a base class with a metaclass.""" + return meta("NewBase", (base,), {}) diff --git a/scipy/linalg/__init__.py b/scipy/linalg/__init__.py index a7842a2eadc1..df83061a2605 100644 --- a/scipy/linalg/__init__.py +++ b/scipy/linalg/__init__.py @@ -140,24 +140,26 @@ """ -from linalg_version import linalg_version as __version__ - -from misc import * -from basic import * -from decomp import * -from decomp_lu import * -from decomp_cholesky import * -from decomp_qr import * -from _decomp_qz import * -from decomp_svd import * -from decomp_schur import * -from matfuncs import * -from blas import * -from lapack import * -from special_matrices import * -from _solvers import * - -__all__ = filter(lambda s: not s.startswith('_'), dir()) +from __future__ import division, print_function, absolute_import + +from .linalg_version import linalg_version as __version__ + +from .misc import * +from .basic import * +from .decomp import * +from .decomp_lu import * +from .decomp_cholesky import * +from .decomp_qr import * +from ._decomp_qz import * +from .decomp_svd import * +from .decomp_schur import * +from .matfuncs import * +from .blas import * +from .lapack import * +from .special_matrices import * +from ._solvers import * + +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.dual import register_func for k in ['norm', 'inv', 'svd', 'solve', 'det', 'eig', 'eigh', 'eigvals', diff --git a/scipy/linalg/_decomp_qz.py b/scipy/linalg/_decomp_qz.py index 39562d381578..736b1d472288 100644 --- a/scipy/linalg/_decomp_qz.py +++ b/scipy/linalg/_decomp_qz.py @@ -1,10 +1,14 @@ +from __future__ import division, print_function, absolute_import + import warnings import numpy as np from numpy import asarray_chkfinite, single -from misc import LinAlgError, _datacopied -from lapack import get_lapack_funcs +from .misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs + +from scipy.lib.six import callable __all__ = ['qz'] diff --git a/scipy/linalg/_solvers.py b/scipy/linalg/_solvers.py index 578b9466cecb..8c25815bb851 100644 --- a/scipy/linalg/_solvers.py +++ b/scipy/linalg/_solvers.py @@ -3,13 +3,15 @@ # Author: Jeffrey Armstrong # February 24, 2012 +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.linalg import inv, LinAlgError -from basic import solve -from lapack import get_lapack_funcs -from decomp_schur import schur -from special_matrices import kron +from .basic import solve +from .lapack import get_lapack_funcs +from .decomp_schur import schur +from .special_matrices import kron __all__ = ['solve_sylvester', 'solve_lyapunov', 'solve_discrete_lyapunov', 'solve_continuous_are', 'solve_discrete_are'] diff --git a/scipy/linalg/_testutils.py b/scipy/linalg/_testutils.py index 26be37cfe5e4..2fbdb841d557 100644 --- a/scipy/linalg/_testutils.py +++ b/scipy/linalg/_testutils.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np class _FakeMatrix(object): diff --git a/scipy/linalg/basic.py b/scipy/linalg/basic.py index 7b90f1e32009..5dce2ed97307 100644 --- a/scipy/linalg/basic.py +++ b/scipy/linalg/basic.py @@ -4,17 +4,19 @@ # w/ additions by Travis Oliphant, March 2002 # and Jake Vanderplas, August 2012 +from __future__ import division, print_function, absolute_import + __all__ = ['solve', 'solve_triangular', 'solveh_banded', 'solve_banded', 'inv', 'det', 'lstsq', 'pinv', 'pinv2', 'pinvh'] import numpy as np -from flinalg import get_flinalg_funcs -from lapack import get_lapack_funcs -from misc import LinAlgError, _datacopied +from .flinalg import get_flinalg_funcs +from .lapack import get_lapack_funcs +from .misc import LinAlgError, _datacopied from scipy.linalg import calc_lwork -from decomp_schur import schur -import decomp, decomp_svd +from .decomp_schur import schur +from . import decomp, decomp_svd # Linear equations @@ -79,8 +81,8 @@ def solve(a, b, sym_pos=False, lower=False, overwrite_a=False, overwrite_b=False overwrite_a = overwrite_a or _datacopied(a1, a) overwrite_b = overwrite_b or _datacopied(b1, b) if debug: - print 'solve:overwrite_a=',overwrite_a - print 'solve:overwrite_b=',overwrite_b + print('solve:overwrite_a=',overwrite_a) + print('solve:overwrite_b=',overwrite_b) if sym_pos: posv, = get_lapack_funcs(('posv',), (a1,b1)) c, x, info = posv(a1, b1, lower=lower, @@ -157,7 +159,7 @@ def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, raise ValueError('incompatible dimensions') overwrite_b = overwrite_b or _datacopied(b1, b) if debug: - print 'solve:overwrite_b=',overwrite_b + print('solve:overwrite_b=',overwrite_b) trans = {'N': 0, 'T': 1, 'C': 2}.get(trans, trans) trtrs, = get_lapack_funcs(('trtrs',), (a1,b1)) x, info = trtrs(a1, b1, overwrite_b=overwrite_b, lower=lower, @@ -169,7 +171,7 @@ def solve_triangular(a, b, trans=0, lower=False, unit_diagonal=False, raise LinAlgError("singular matrix: resolution failed at diagonal %s" % (info-1)) raise ValueError('illegal value in %d-th argument of internal trtrs') -def solve_banded((l, u), ab, b, overwrite_ab=False, overwrite_b=False, +def solve_banded(l_and_u, ab, b, overwrite_ab=False, overwrite_b=False, debug=False, check_finite=True): """ Solve the equation a x = b for x, assuming a is banded matrix. @@ -207,7 +209,7 @@ def solve_banded((l, u), ab, b, overwrite_ab=False, overwrite_b=False, The solution to the system a x = b """ - + (l, u) = l_and_u if check_finite: a1, b1 = map(np.asarray_chkfinite, (ab, b)) else: diff --git a/scipy/linalg/benchmarks/bench_basic.py b/scipy/linalg/benchmarks/bench_basic.py index 176148e29311..06885ceb20a5 100644 --- a/scipy/linalg/benchmarks/bench_basic.py +++ b/scipy/linalg/benchmarks/bench_basic.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import sys from numpy.testing import * import numpy.linalg as linalg @@ -9,17 +11,17 @@ class TestSolve(TestCase): def bench_random(self): basic_solve = linalg.solve - print - print ' Solving system of linear equations' - print ' ==================================' + print() + print(' Solving system of linear equations') + print(' ==================================') - print ' | contiguous | non-contiguous ' - print '----------------------------------------------' - print ' size | scipy | basic | scipy | basic ' + print(' | contiguous | non-contiguous ') + print('----------------------------------------------') + print(' size | scipy | basic | scipy | basic ') for size,repeat in [(20,1000),(100,150),(500,2),(1000,1)][:-1]: repeat *= 2 - print '%5s' % size, + print('%5s' % size, end=' ') sys.stdout.flush() a = random([size,size]) @@ -27,95 +29,95 @@ def bench_random(self): for i in range(size): a[i,i] = 10*(.1+a[i,i]) b = random([size]) - print '| %6.2f ' % measure('solve(a,b)',repeat), + print('| %6.2f ' % measure('solve(a,b)',repeat), end=' ') sys.stdout.flush() - print '| %6.2f ' % measure('basic_solve(a,b)',repeat), + print('| %6.2f ' % measure('basic_solve(a,b)',repeat), end=' ') sys.stdout.flush() a = a[-1::-1,-1::-1] # turn into a non-contiguous array assert_(not a.flags['CONTIGUOUS']) - print '| %6.2f ' % measure('solve(a,b)',repeat), + print('| %6.2f ' % measure('solve(a,b)',repeat), end=' ') sys.stdout.flush() - print '| %6.2f ' % measure('basic_solve(a,b)',repeat), + print('| %6.2f ' % measure('basic_solve(a,b)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) class TestInv(TestCase): def bench_random(self): basic_inv = linalg.inv - print - print ' Finding matrix inverse' - print ' ==================================' - print ' | contiguous | non-contiguous ' - print '----------------------------------------------' - print ' size | scipy | basic | scipy | basic' + print() + print(' Finding matrix inverse') + print(' ==================================') + print(' | contiguous | non-contiguous ') + print('----------------------------------------------') + print(' size | scipy | basic | scipy | basic') for size,repeat in [(20,1000),(100,150),(500,2),(1000,1)][:-1]: repeat *= 2 - print '%5s' % size, + print('%5s' % size, end=' ') sys.stdout.flush() a = random([size,size]) # large diagonal ensures non-singularity: for i in range(size): a[i,i] = 10*(.1+a[i,i]) - print '| %6.2f ' % measure('inv(a)',repeat), + print('| %6.2f ' % measure('inv(a)',repeat), end=' ') sys.stdout.flush() - print '| %6.2f ' % measure('basic_inv(a)',repeat), + print('| %6.2f ' % measure('basic_inv(a)',repeat), end=' ') sys.stdout.flush() a = a[-1::-1,-1::-1] # turn into a non-contiguous array assert_(not a.flags['CONTIGUOUS']) - print '| %6.2f ' % measure('inv(a)',repeat), + print('| %6.2f ' % measure('inv(a)',repeat), end=' ') sys.stdout.flush() - print '| %6.2f ' % measure('basic_inv(a)',repeat), + print('| %6.2f ' % measure('basic_inv(a)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) class TestDet(TestCase): def bench_random(self): basic_det = linalg.det - print - print ' Finding matrix determinant' - print ' ==================================' - print ' | contiguous | non-contiguous ' - print '----------------------------------------------' - print ' size | scipy | basic | scipy | basic ' + print() + print(' Finding matrix determinant') + print(' ==================================') + print(' | contiguous | non-contiguous ') + print('----------------------------------------------') + print(' size | scipy | basic | scipy | basic ') for size,repeat in [(20,1000),(100,150),(500,2),(1000,1)][:-1]: repeat *= 2 - print '%5s' % size, + print('%5s' % size, end=' ') sys.stdout.flush() a = random([size,size]) - print '| %6.2f ' % measure('det(a)',repeat), + print('| %6.2f ' % measure('det(a)',repeat), end=' ') sys.stdout.flush() - print '| %6.2f ' % measure('basic_det(a)',repeat), + print('| %6.2f ' % measure('basic_det(a)',repeat), end=' ') sys.stdout.flush() a = a[-1::-1,-1::-1] # turn into a non-contiguous array assert_(not a.flags['CONTIGUOUS']) - print '| %6.2f ' % measure('det(a)',repeat), + print('| %6.2f ' % measure('det(a)',repeat), end=' ') sys.stdout.flush() - print '| %6.2f ' % measure('basic_det(a)',repeat), + print('| %6.2f ' % measure('basic_det(a)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) if __name__ == "__main__": diff --git a/scipy/linalg/benchmarks/bench_decom.py b/scipy/linalg/benchmarks/bench_decom.py index bf570f0fc1d5..77c11648010b 100644 --- a/scipy/linalg/benchmarks/bench_decom.py +++ b/scipy/linalg/benchmarks/bench_decom.py @@ -1,6 +1,9 @@ """ Benchmark functions for linalg.decomp module """ + +from __future__ import division, print_function, absolute_import + import sys from numpy import linalg @@ -11,21 +14,21 @@ def random(size): def bench_random(): Numeric_eigvals = linalg.eigvals - print - print ' Finding matrix eigenvalues' - print ' ==================================' - print ' | contiguous '#'| non-contiguous ' - print '----------------------------------------------' - print ' size | scipy '#'| core | scipy | core ' + print() + print(' Finding matrix eigenvalues') + print(' ==================================') + print(' | contiguous ')#'| non-contiguous ' + print('----------------------------------------------') + print(' size | scipy ')#'| core | scipy | core ' for size,repeat in [(20,150),(100,7),(200,2)]: repeat *= 1 - print '%5s' % size, + print('%5s' % size, end=' ') sys.stdout.flush() a = random([size,size]) - print '| %6.2f ' % measure('eigvals(a)',repeat), + print('| %6.2f ' % measure('eigvals(a)',repeat), end=' ') sys.stdout.flush() - print ' (secs for %s calls)' % (repeat) + print(' (secs for %s calls)' % (repeat)) diff --git a/scipy/linalg/blas.py b/scipy/linalg/blas.py index 72209c8f616f..69b0fbf1576d 100644 --- a/scipy/linalg/blas.py +++ b/scipy/linalg/blas.py @@ -104,6 +104,8 @@ # refactoring by Fabian Pedregosa, March 2010 # +from __future__ import division, print_function, absolute_import + __all__ = ['get_blas_funcs', 'find_best_blas_type'] import numpy as _np diff --git a/scipy/linalg/cblas.py b/scipy/linalg/cblas.py index 7e8dec585f65..52566df8835e 100644 --- a/scipy/linalg/cblas.py +++ b/scipy/linalg/cblas.py @@ -1,6 +1,8 @@ """ This module is deprecated -- use scipy.linalg.blas instead """ +from __future__ import division, print_function, absolute_import + try: from _cblas import * except ImportError: diff --git a/scipy/linalg/clapack.py b/scipy/linalg/clapack.py index 0a3b99946e2a..8a76f5d862b1 100644 --- a/scipy/linalg/clapack.py +++ b/scipy/linalg/clapack.py @@ -1,6 +1,8 @@ """ This module is deprecated -- use scipy.linalg.lapack instead """ +from __future__ import division, print_function, absolute_import + try: from _clapack import * except ImportError: diff --git a/scipy/linalg/decomp.py b/scipy/linalg/decomp.py index 6ad2c11ce384..7e7123624630 100644 --- a/scipy/linalg/decomp.py +++ b/scipy/linalg/decomp.py @@ -12,18 +12,19 @@ # moved to their own files. Still in this file are functions for eigenstuff # and for the Hessenberg form. +from __future__ import division, print_function, absolute_import + __all__ = ['eig','eigh','eig_banded','eigvals','eigvalsh', 'eigvals_banded', 'hessenberg'] import numpy from numpy import array, asarray_chkfinite, asarray, diag, zeros, ones, \ isfinite, inexact, nonzero, iscomplexobj, cast, flatnonzero, conj - # Local imports from scipy.linalg import calc_lwork -from misc import LinAlgError, _datacopied -from lapack import get_lapack_funcs -from blas import get_blas_funcs +from .misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs +from .blas import get_blas_funcs _I = cast['F'](1j) diff --git a/scipy/linalg/decomp_cholesky.py b/scipy/linalg/decomp_cholesky.py index 513346c871b6..00d63fdc93b6 100644 --- a/scipy/linalg/decomp_cholesky.py +++ b/scipy/linalg/decomp_cholesky.py @@ -1,10 +1,12 @@ """Cholesky decomposition functions.""" +from __future__ import division, print_function, absolute_import + from numpy import asarray_chkfinite, asarray # Local imports -from misc import LinAlgError, _datacopied -from lapack import get_lapack_funcs +from .misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs __all__ = ['cholesky', 'cho_factor', 'cho_solve', 'cholesky_banded', 'cho_solve_banded'] @@ -128,7 +130,7 @@ def cho_factor(a, lower=False, overwrite_a=False, check_finite=True): return c, lower -def cho_solve((c, lower), b, overwrite_b=False, check_finite=True): +def cho_solve(c_and_lower, b, overwrite_b=False, check_finite=True): """Solve the linear equations A x = b, given the Cholesky factorization of A. Parameters @@ -152,7 +154,7 @@ def cho_solve((c, lower), b, overwrite_b=False, check_finite=True): cho_factor : Cholesky factorization of a matrix """ - + (c, lower) = c_and_lower if check_finite: b1 = asarray_chkfinite(b) c = asarray_chkfinite(c) @@ -228,7 +230,7 @@ def cholesky_banded(ab, overwrite_ab=False, lower=False, check_finite=True): return c -def cho_solve_banded((cb, lower), b, overwrite_b=False, check_finite=True): +def cho_solve_banded(cb_and_lower, b, overwrite_b=False, check_finite=True): """Solve the linear equations A x = b, given the Cholesky factorization of A. Parameters @@ -260,7 +262,7 @@ def cho_solve_banded((cb, lower), b, overwrite_b=False, check_finite=True): .. versionadded:: 0.8.0 """ - + (cb, lower) = cb_and_lower if check_finite: cb = asarray_chkfinite(cb) b = asarray_chkfinite(b) diff --git a/scipy/linalg/decomp_lu.py b/scipy/linalg/decomp_lu.py index d19b149a7978..8a1502bb4ef2 100644 --- a/scipy/linalg/decomp_lu.py +++ b/scipy/linalg/decomp_lu.py @@ -1,13 +1,15 @@ """LU decomposition functions.""" +from __future__ import division, print_function, absolute_import + from warnings import warn from numpy import asarray, asarray_chkfinite # Local imports -from misc import _datacopied -from lapack import get_lapack_funcs -from flinalg import get_flinalg_funcs +from .misc import _datacopied +from .lapack import get_lapack_funcs +from .flinalg import get_flinalg_funcs __all__ = ['lu', 'lu_solve', 'lu_factor'] @@ -69,7 +71,7 @@ def lu_factor(a, overwrite_a=False, check_finite=True): return lu, piv -def lu_solve((lu, piv), b, trans=0, overwrite_b=False, check_finite=True): +def lu_solve(lu_and_piv, b, trans=0, overwrite_b=False, check_finite=True): """Solve an equation system, a x = b, given the LU factorization of a Parameters @@ -103,6 +105,7 @@ def lu_solve((lu, piv), b, trans=0, overwrite_b=False, check_finite=True): lu_factor : LU factorize a matrix """ + (lu, piv) = lu_and_piv if check_finite: b1 = asarray_chkfinite(b) else: diff --git a/scipy/linalg/decomp_qr.py b/scipy/linalg/decomp_qr.py index c551ff7e8066..63efd29522ba 100644 --- a/scipy/linalg/decomp_qr.py +++ b/scipy/linalg/decomp_qr.py @@ -1,11 +1,12 @@ """QR decomposition functions.""" +from __future__ import division, print_function, absolute_import import numpy # Local imports -from blas import get_blas_funcs -from lapack import get_lapack_funcs -from misc import _datacopied +from .blas import get_blas_funcs +from .lapack import get_lapack_funcs +from .misc import _datacopied # XXX: what is qr_old, should it be kept? __all__ = ['qr', 'qr_multiply', 'rq', 'qr_old'] diff --git a/scipy/linalg/decomp_schur.py b/scipy/linalg/decomp_schur.py index 17332fcb67dc..731903fb22ee 100644 --- a/scipy/linalg/decomp_schur.py +++ b/scipy/linalg/decomp_schur.py @@ -1,14 +1,16 @@ """Schur decomposition functions.""" +from __future__ import division, print_function, absolute_import import numpy from numpy import asarray_chkfinite, single, asarray -# Local imports. -import misc -from misc import LinAlgError, _datacopied -from lapack import get_lapack_funcs -from decomp import eigvals +from scipy.lib.six import callable +# Local imports. +from . import misc +from .misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs +from .decomp import eigvals __all__ = ['schur', 'rsf2csf'] diff --git a/scipy/linalg/decomp_svd.py b/scipy/linalg/decomp_svd.py index 4e5f4ffa730e..058c39e67d7f 100644 --- a/scipy/linalg/decomp_svd.py +++ b/scipy/linalg/decomp_svd.py @@ -1,12 +1,13 @@ """SVD decomposition functions.""" +from __future__ import division, print_function, absolute_import import numpy from numpy import asarray_chkfinite, asarray, zeros, r_, diag from scipy.linalg import calc_lwork # Local imports. -from misc import LinAlgError, _datacopied -from lapack import get_lapack_funcs +from .misc import LinAlgError, _datacopied +from .lapack import get_lapack_funcs __all__ = ['svd', 'svdvals', 'diagsvd', 'orth'] diff --git a/scipy/linalg/fblas.py b/scipy/linalg/fblas.py index ce44d05eee64..b8b7cb3cdd27 100644 --- a/scipy/linalg/fblas.py +++ b/scipy/linalg/fblas.py @@ -1,6 +1,8 @@ """ This module is deprecated -- use scipy.linalg.blas instead """ +from __future__ import division, print_function, absolute_import + from _fblas import * import numpy as _np @_np.deprecate(old_name="scipy.linalg.fblas", new_name="scipy.linalg.blas") diff --git a/scipy/linalg/flapack.py b/scipy/linalg/flapack.py index a878aa28fa5c..caca889db1c6 100644 --- a/scipy/linalg/flapack.py +++ b/scipy/linalg/flapack.py @@ -1,6 +1,8 @@ """ This module is deprecated -- use scipy.linalg.lapack instead """ +from __future__ import division, print_function, absolute_import + from _flapack import * import numpy as _np @_np.deprecate(old_name="scipy.linalg.flapack", new_name="scipy.linalg.lapack") diff --git a/scipy/linalg/flinalg.py b/scipy/linalg/flinalg.py index 85fb7601f9ca..009758f680f6 100644 --- a/scipy/linalg/flinalg.py +++ b/scipy/linalg/flinalg.py @@ -2,13 +2,15 @@ # Author: Pearu Peterson, March 2002 # +from __future__ import division, print_function, absolute_import + __all__ = ['get_flinalg_funcs'] # The following ensures that possibly missing flavor (C or Fortran) is # replaced with the available one. If none is available, exception # is raised at the first attempt to use the resources. try: - import _flinalg + from . import _flinalg except ImportError: _flinalg = None # from numpy.distutils.misc_util import PostponedException diff --git a/scipy/linalg/lapack.py b/scipy/linalg/lapack.py index ddf53890f418..8a5b73243025 100644 --- a/scipy/linalg/lapack.py +++ b/scipy/linalg/lapack.py @@ -204,12 +204,14 @@ # Author: Pearu Peterson, March 2002 # +from __future__ import division, print_function, absolute_import + __all__ = ['get_lapack_funcs'] -from blas import _get_funcs +from .blas import _get_funcs # Backward compatibility: -from blas import find_best_blas_type as find_best_lapack_type +from .blas import find_best_blas_type as find_best_lapack_type from scipy.linalg import _flapack try: diff --git a/scipy/linalg/linalg_version.py b/scipy/linalg/linalg_version.py index 0eba228ca04d..4df80fd90fa1 100644 --- a/scipy/linalg/linalg_version.py +++ b/scipy/linalg/linalg_version.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + major = 0 minor = 4 micro = 9 diff --git a/scipy/linalg/matfuncs.py b/scipy/linalg/matfuncs.py index 0d1c979251ea..dc931666a895 100644 --- a/scipy/linalg/matfuncs.py +++ b/scipy/linalg/matfuncs.py @@ -2,6 +2,8 @@ # Author: Travis Oliphant, March 2002 # +from __future__ import division, print_function, absolute_import + __all__ = ['expm','expm2','expm3','cosm','sinm','tanm','coshm','sinhm', 'tanhm','logm','funm','signm','sqrtm'] @@ -12,13 +14,15 @@ from numpy import matrix as mat import numpy as np +from scipy.lib.six.moves import xrange + # Local imports -from misc import norm -from basic import solve, inv -from special_matrices import triu, all_mat -from decomp import eig -from decomp_svd import orth, svd -from decomp_schur import schur, rsf2csf +from .misc import norm +from .basic import solve, inv +from .special_matrices import triu, all_mat +from .decomp import eig +from .decomp_svd import orth, svd +from .decomp_schur import schur, rsf2csf import warnings eps = np.finfo(float).eps @@ -325,7 +329,7 @@ def funm(A, func, disp=True): err = Inf if disp: if err > 1000*tol: - print "Result may be inaccurate, approximate err =", err + print("Result may be inaccurate, approximate err =", err) return F else: return F, err @@ -376,7 +380,7 @@ def logm(A, disp=True): errest = norm(expm(F)-A,1) / norm(A,1) if disp: if not isfinite(errest) or errest >= errtol: - print "Result may be inaccurate, approximate err =", errest + print("Result may be inaccurate, approximate err =", errest) return F else: return F, errest @@ -453,7 +457,7 @@ def rounded_sign(x): prev_errest = errest if disp: if not isfinite(errest) or errest >= errtol: - print "Result may be inaccurate, approximate err =", errest + print("Result may be inaccurate, approximate err =", errest) return S0 else: return S0, errest @@ -505,7 +509,7 @@ def sqrtm(A, disp=True): if disp: nzeig = np.any(diag(T)==0) if nzeig: - print "Matrix is singular and may not have a square root." + print("Matrix is singular and may not have a square root.") return X.A else: arg2 = norm(X*X - A,'fro')**2 / norm(A,'fro') diff --git a/scipy/linalg/misc.py b/scipy/linalg/misc.py index d9b83a5ee7b5..237f8c98f057 100644 --- a/scipy/linalg/misc.py +++ b/scipy/linalg/misc.py @@ -1,6 +1,8 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.linalg import LinAlgError -import blas +from . import blas __all__ = ['LinAlgError', 'norm'] diff --git a/scipy/linalg/setup.py b/scipy/linalg/setup.py index 4c3600198990..22de3dc0fb41 100755 --- a/scipy/linalg/setup.py +++ b/scipy/linalg/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import import os from distutils.dep_util import newer_group, newer @@ -36,7 +37,7 @@ def configuration(parent_package='',top_path=None): atlas_version = ([v[3:-3] for k,v in lapack_opt.get('define_macros',[]) \ if k=='ATLAS_INFO']+[None])[0] if atlas_version: - print ('ATLAS version: %s' % atlas_version) + print(('ATLAS version: %s' % atlas_version)) target_dir = '' diff --git a/scipy/linalg/setupscons.py b/scipy/linalg/setupscons.py index 1ff36d30add8..758a2e6f7a0a 100755 --- a/scipy/linalg/setupscons.py +++ b/scipy/linalg/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration @@ -12,7 +13,7 @@ def configuration(parent_package='',top_path=None): if __name__ == '__main__': from numpy.distutils.core import setup - from linalg_version import linalg_version + from .linalg_version import linalg_version setup(version=linalg_version, **configuration(top_path='').todict()) diff --git a/scipy/linalg/special_matrices.py b/scipy/linalg/special_matrices.py index ab153e653d8b..0d2d453c17fa 100644 --- a/scipy/linalg/special_matrices.py +++ b/scipy/linalg/special_matrices.py @@ -1,7 +1,9 @@ +from __future__ import division, print_function, absolute_import import math import numpy as np from scipy.misc import comb +from scipy.lib.six.moves import xrange __all__ = ['tri', 'tril', 'triu', 'toeplitz', 'circulant', 'hankel', @@ -416,12 +418,12 @@ def leslie(f, s): n = f.size a = np.zeros((n, n), dtype=tmp.dtype) a[0] = f - a[range(1, n), range(0, n - 1)] = s + a[list(range(1, n)), list(range(0, n - 1))] = s return a def all_mat(*args): - return map(np.matrix, args) + return list(map(np.matrix, args)) def kron(a, b): @@ -592,7 +594,7 @@ def companion(a): n = a.size c = np.zeros((n - 1, n - 1), dtype=first_row.dtype) c[0] = first_row - c[range(1, n - 1), range(0, n - 2)] = 1 + c[list(range(1, n - 1)), list(range(0, n - 2))] = 1 return c @@ -767,7 +769,7 @@ def pascal(n, kind='symmetric', exact=True): if exact: if n > 35: L_n = np.empty((n, n), dtype=object) - L_n.fill(0L) + L_n.fill(0) else: L_n = np.zeros((n, n), dtype=np.uint64) for i in range(n): diff --git a/scipy/linalg/tests/test_basic.py b/scipy/linalg/tests/test_basic.py index c3e448a009f9..b5cfd63e689a 100644 --- a/scipy/linalg/tests/test_basic.py +++ b/scipy/linalg/tests/test_basic.py @@ -5,6 +5,8 @@ """ Test functions for linalg.basic module """ +from __future__ import division, print_function, absolute_import + """ Bugs: 1) solve.check_random_sym_complex fails if a is complex diff --git a/scipy/linalg/tests/test_blas.py b/scipy/linalg/tests/test_blas.py index 58942266e2d9..dbf0408d71dd 100644 --- a/scipy/linalg/tests/test_blas.py +++ b/scipy/linalg/tests/test_blas.py @@ -2,6 +2,8 @@ # # Created by: Pearu Peterson, April 2002 # +from __future__ import division, print_function, absolute_import + __usage__ = """ Build linalg: diff --git a/scipy/linalg/tests/test_build.py b/scipy/linalg/tests/test_build.py index c7e1d15ab9a2..8f9655e82075 100644 --- a/scipy/linalg/tests/test_build.py +++ b/scipy/linalg/tests/test_build.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from subprocess import call, PIPE, Popen import sys import re diff --git a/scipy/linalg/tests/test_decomp.py b/scipy/linalg/tests/test_decomp.py index b890a99d1000..ba6372292dc3 100644 --- a/scipy/linalg/tests/test_decomp.py +++ b/scipy/linalg/tests/test_decomp.py @@ -1,6 +1,8 @@ """ Test functions for linalg.decomp module """ +from __future__ import division, print_function, absolute_import + __usage__ = """ Build linalg: python setup_linalg.py build @@ -14,6 +16,8 @@ from numpy.testing import TestCase, assert_equal, assert_array_almost_equal, \ assert_array_equal, assert_raises, assert_, run_module_suite, dec +from scipy.lib.six.moves import xrange + from scipy.linalg import eig, eigvals, lu, svd, svdvals, cholesky, qr, \ schur, rsf2csf, lu_solve, lu_factor, solve, diagsvd, hessenberg, rq, \ eig_banded, eigvals_banded, eigh, eigvalsh, qr_multiply, LinAlgError, \ diff --git a/scipy/linalg/tests/test_decomp_cholesky.py b/scipy/linalg/tests/test_decomp_cholesky.py index 803571d920dc..0682e15598e0 100644 --- a/scipy/linalg/tests/test_decomp_cholesky.py +++ b/scipy/linalg/tests/test_decomp_cholesky.py @@ -1,4 +1,4 @@ - +from __future__ import division, print_function, absolute_import from numpy.testing import TestCase, assert_array_almost_equal @@ -84,7 +84,7 @@ def test_check_finite(self): [4.0, 4.0, 4.0, 4.0]]) c = cholesky_banded(ab, lower=False, check_finite=False) ufac = zeros_like(a) - ufac[range(4),range(4)] = c[-1] + ufac[list(range(4)),list(range(4))] = c[-1] ufac[(0,1,2),(1,2,3)] = c[0,1:] assert_array_almost_equal(a, dot(ufac.T, ufac)) @@ -103,7 +103,7 @@ def test_upper_real(self): [4.0, 4.0, 4.0, 4.0]]) c = cholesky_banded(ab, lower=False) ufac = zeros_like(a) - ufac[range(4),range(4)] = c[-1] + ufac[list(range(4)),list(range(4))] = c[-1] ufac[(0,1,2),(1,2,3)] = c[0,1:] assert_array_almost_equal(a, dot(ufac.T, ufac)) @@ -122,7 +122,7 @@ def test_upper_complex(self): [4.0, 4.0, 4.0, 4.0]]) c = cholesky_banded(ab, lower=False) ufac = zeros_like(a) - ufac[range(4),range(4)] = c[-1] + ufac[list(range(4)),list(range(4))] = c[-1] ufac[(0,1,2),(1,2,3)] = c[0,1:] assert_array_almost_equal(a, dot(ufac.conj().T, ufac)) @@ -141,7 +141,7 @@ def test_lower_real(self): [1.0, 0.5, 0.2, -1.0]]) c = cholesky_banded(ab, lower=True) lfac = zeros_like(a) - lfac[range(4),range(4)] = c[0] + lfac[list(range(4)),list(range(4))] = c[0] lfac[(1,2,3),(0,1,2)] = c[1,:3] assert_array_almost_equal(a, dot(lfac, lfac.T)) @@ -160,7 +160,7 @@ def test_lower_complex(self): [1.0, 0.5, 0.2j, -1.0]]) c = cholesky_banded(ab, lower=True) lfac = zeros_like(a) - lfac[range(4),range(4)] = c[0] + lfac[list(range(4)),list(range(4))] = c[0] lfac[(1,2,3),(0,1,2)] = c[1,:3] assert_array_almost_equal(a, dot(lfac, lfac.conj().T)) diff --git a/scipy/linalg/tests/test_fblas.py b/scipy/linalg/tests/test_fblas.py index 658e31c43942..58031b1796bc 100644 --- a/scipy/linalg/tests/test_fblas.py +++ b/scipy/linalg/tests/test_fblas.py @@ -6,10 +6,14 @@ # !! Complex calculations really aren't checked that carefully. # !! Only real valued complex numbers are used in tests. +from __future__ import division, print_function, absolute_import + from numpy import float32, float64, complex64, complex128, arange, array, \ zeros, shape, transpose, newaxis, common_type, conjugate from scipy.linalg import _fblas as fblas +from scipy.lib.six.moves import xrange + from numpy.testing import TestCase, run_module_suite, assert_array_equal, \ assert_array_almost_equal, assert_ diff --git a/scipy/linalg/tests/test_lapack.py b/scipy/linalg/tests/test_lapack.py index 11e2503962c9..15d894aacc6f 100644 --- a/scipy/linalg/tests/test_lapack.py +++ b/scipy/linalg/tests/test_lapack.py @@ -3,6 +3,8 @@ # Created by: Pearu Peterson, September 2002 # +from __future__ import division, print_function, absolute_import + from numpy.testing import TestCase, run_module_suite, assert_equal, \ assert_array_almost_equal, assert_, assert_raises @@ -31,13 +33,13 @@ def test_gebal(self): f = getattr(flapack,p+'gebal',None) if f is None: continue ba,lo,hi,pivscale,info = f(a) - assert_(not info,`info`) + assert_(not info,repr(info)) assert_array_almost_equal(ba,a) assert_equal((lo,hi),(0,len(a[0])-1)) assert_array_almost_equal(pivscale, np.ones(len(a))) ba,lo,hi,pivscale,info = f(a1,permute=1,scale=1) - assert_(not info,`info`) + assert_(not info,repr(info)) #print a1 #print ba,lo,hi,pivscale @@ -49,7 +51,7 @@ def test_gehrd(self): f = getattr(flapack,p+'gehrd',None) if f is None: continue ht,tau,info = f(a) - assert_(not info,`info`) + assert_(not info,repr(info)) def test_trsyl(self): a = np.array([[1, 2], [0, 4]]) diff --git a/scipy/linalg/tests/test_matfuncs.py b/scipy/linalg/tests/test_matfuncs.py index 60645d307624..81ec7a45123d 100644 --- a/scipy/linalg/tests/test_matfuncs.py +++ b/scipy/linalg/tests/test_matfuncs.py @@ -6,6 +6,8 @@ """ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy import array, identity, dot, sqrt, double, exp, random from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal, \ diff --git a/scipy/linalg/tests/test_solvers.py b/scipy/linalg/tests/test_solvers.py index 8c57fc30ecd8..e5306d680c74 100644 --- a/scipy/linalg/tests/test_solvers.py +++ b/scipy/linalg/tests/test_solvers.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.linalg import inv diff --git a/scipy/linalg/tests/test_special_matrices.py b/scipy/linalg/tests/test_special_matrices.py index fe370f7ffd12..a3e55cd5e6b2 100644 --- a/scipy/linalg/tests/test_special_matrices.py +++ b/scipy/linalg/tests/test_special_matrices.py @@ -1,10 +1,14 @@ """Tests for functions in special_matrices.py.""" +from __future__ import division, print_function, absolute_import + from numpy import arange, add, array, eye, copy from numpy.testing import TestCase, run_module_suite, assert_raises, \ assert_equal, assert_array_equal, assert_array_almost_equal, \ assert_allclose +from scipy.lib.six.moves import xrange + from scipy.misc import comb from scipy.linalg import toeplitz, hankel, circulant, hadamard, leslie, \ companion, tri, triu, tril, kron, block_diag, \ diff --git a/scipy/misc/__init__.py b/scipy/misc/__init__.py index cb2afff50cb1..bcc99fe9f350 100644 --- a/scipy/misc/__init__.py +++ b/scipy/misc/__init__.py @@ -37,10 +37,12 @@ who - Print the Numpy arrays in the given dictionary """ +from __future__ import division, print_function, absolute_import + __all__ = ['who', 'source', 'info', 'doccer'] -import doccer -from common import * +from . import doccer +from .common import * from numpy import who, source, info as _info import sys @@ -50,14 +52,14 @@ def info(object=None,maxwidth=76,output=sys.stdout,toplevel='scipy'): del sys try: - from pilutil import * - import pilutil + from .pilutil import * + from . import pilutil __all__ += pilutil.__all__ del pilutil except ImportError: pass -import common +from . import common __all__ += common.__all__ del common diff --git a/scipy/misc/common.py b/scipy/misc/common.py index 7aba547e322b..558ece68fc89 100644 --- a/scipy/misc/common.py +++ b/scipy/misc/common.py @@ -3,6 +3,10 @@ (special, linalg) """ +from __future__ import division, print_function, absolute_import + +from scipy.lib.six.moves import xrange + from numpy import exp, log, asarray, arange, newaxis, hstack, product, array, \ where, zeros, extract, place, pi, sqrt, eye, poly1d, dot, \ r_, rollaxis, sum, fromstring @@ -120,8 +124,8 @@ def factorial(n,exact=0): """ if exact: if n < 0: - return 0L - val = 1L + return 0 + val = 1 for k in xrange(1,n+1): val *= k return val @@ -170,10 +174,10 @@ def factorial2(n, exact=False): """ if exact: if n < -1: - return 0L + return 0 if n <= 0: - return 1L - val = 1L + return 1 + val = 1 for k in xrange(n,0,-2): val *= k return val @@ -226,10 +230,10 @@ def factorialk(n,k,exact=1): """ if exact: if n < 1-k: - return 0L + return 0 if n<=0: - return 1L - val = 1L + return 1 + val = 1 for j in xrange(n,0,-k): val = val*j return val @@ -274,8 +278,8 @@ def comb(N,k,exact=0): """ if exact: if (k > N) or (N < 0) or (k < 0): - return 0L - val = 1L + return 0 + val = 1 for j in xrange(min(k, N-k)): val = (val*(N-j))//(j+1) return val @@ -441,10 +445,10 @@ def lena(): >>> plt.show() """ - import cPickle, os + import pickle, os fname = os.path.join(os.path.dirname(__file__),'lena.dat') f = open(fname,'rb') - lena = array(cPickle.load(f)) + lena = array(pickle.load(f)) f.close() return lena @@ -479,10 +483,10 @@ def ascent(): >>> plt.show() """ - import cPickle, os + import pickle, os fname = os.path.join(os.path.dirname(__file__),'ascent.dat') f = open(fname,'rb') - ascent = array(cPickle.load(f)) + ascent = array(pickle.load(f)) f.close() return ascent diff --git a/scipy/misc/doccer.py b/scipy/misc/doccer.py index acf651285248..4aa9dd3efe76 100644 --- a/scipy/misc/doccer.py +++ b/scipy/misc/doccer.py @@ -1,6 +1,8 @@ ''' Utilities to allow inserting docstring fragments for common parameters into function and method docstrings''' +from __future__ import division, print_function, absolute_import + import sys __all__ = ['docformat', 'indentcount_lines', 'filldoc', @@ -81,12 +83,12 @@ def indentcount_lines(lines): >>> indentcount_lines([' ']) 0 ''' - indentno = sys.maxint + indentno = sys.maxsize for line in lines: stripped = line.lstrip() if stripped: indentno = min(indentno, len(line) - len(stripped)) - if indentno == sys.maxint: + if indentno == sys.maxsize: return 0 return indentno diff --git a/scipy/misc/pilutil.py b/scipy/misc/pilutil.py index c22cbf2dd5e8..df8812fb4a88 100644 --- a/scipy/misc/pilutil.py +++ b/scipy/misc/pilutil.py @@ -5,6 +5,8 @@ available on systems that don't have PIL installed. """ +from __future__ import division, print_function, absolute_import + # Functions which need the PIL import numpy diff --git a/scipy/misc/setup.py b/scipy/misc/setup.py index f100d485e11d..2f731aba59d8 100644 --- a/scipy/misc/setup.py +++ b/scipy/misc/setup.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/misc/setupscons.py b/scipy/misc/setupscons.py index 7ac655cd9a0c..a8d4eb78d6c9 100644 --- a/scipy/misc/setupscons.py +++ b/scipy/misc/setupscons.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/misc/tests/test_common.py b/scipy/misc/tests/test_common.py index 0ec44b1805d6..9c1a9f172e4a 100644 --- a/scipy/misc/tests/test_common.py +++ b/scipy/misc/tests/test_common.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_array_equal, assert_almost_equal, \ assert_array_almost_equal diff --git a/scipy/misc/tests/test_doccer.py b/scipy/misc/tests/test_doccer.py index 6204a9b60306..74de8f68aa3d 100644 --- a/scipy/misc/tests/test_doccer.py +++ b/scipy/misc/tests/test_doccer.py @@ -1,5 +1,7 @@ ''' Some tests for the documenting decorator and support functions ''' +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_equal, assert_raises diff --git a/scipy/misc/tests/test_pilutil.py b/scipy/misc/tests/test_pilutil.py index efaae6ef3b90..37fcc9e84059 100644 --- a/scipy/misc/tests/test_pilutil.py +++ b/scipy/misc/tests/test_pilutil.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import os.path import numpy as np @@ -67,7 +69,7 @@ def test_fromimage(): data = {'icon.png':(0,255), 'icon_mono.png':(0,2), 'icon_mono_flat.png':(0,1)} - for fn, irange in data.iteritems(): + for fn, irange in data.items(): yield tst_fromimage, os.path.join(datapath,'data',fn), irange decorate_methods(TestPILUtil, _pilskip) diff --git a/scipy/ndimage/__init__.py b/scipy/ndimage/__init__.py index 2d92d2791d4c..7726ecad702f 100644 --- a/scipy/ndimage/__init__.py +++ b/scipy/ndimage/__init__.py @@ -166,15 +166,17 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from filters import * -from fourier import * -from interpolation import * -from measurements import * -from morphology import * -from io import * +from __future__ import division, print_function, absolute_import + +from .filters import * +from .fourier import * +from .interpolation import * +from .measurements import * +from .morphology import * +from .io import * __version__ = '2.0' -__all__ = filter(lambda s: not s.startswith('_'), dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test diff --git a/scipy/ndimage/_ni_support.py b/scipy/ndimage/_ni_support.py index 385039127601..23dcf2078a77 100644 --- a/scipy/ndimage/_ni_support.py +++ b/scipy/ndimage/_ni_support.py @@ -28,9 +28,12 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import types +from __future__ import division, print_function, absolute_import + import numpy +from scipy.lib.six import integer_types, string_types + def _extend_mode_to_code(mode): """Convert an extension mode to the corresponding integer code. """ @@ -52,8 +55,7 @@ def _normalize_sequence(input, rank, array_type=None): rank by duplicating the input. If input is a sequence, check if its length is equal to the length of array. """ - if (isinstance(input, (types.IntType, types.LongType, - types.FloatType))): + if isinstance(input, integer_types + (float,)): normalized = [input] * rank else: normalized = list(input) @@ -68,10 +70,10 @@ def _get_output(output, input, shape=None): if output is None: output = numpy.zeros(shape, dtype = input.dtype.name) return_value = output - elif type(output) in [type(types.TypeType), type(numpy.zeros((4,)).dtype)]: + elif type(output) in [type(type), type(numpy.zeros((4,)).dtype)]: output = numpy.zeros(shape, dtype = output) return_value = output - elif type(output) is types.StringType: + elif type(output) in string_types: output = numpy.typeDict[output] output = numpy.zeros(shape, dtype = output) return_value = output diff --git a/scipy/ndimage/filters.py b/scipy/ndimage/filters.py index 96c6d45395e0..9f4af0191240 100644 --- a/scipy/ndimage/filters.py +++ b/scipy/ndimage/filters.py @@ -28,10 +28,12 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from __future__ import division, print_function, absolute_import + import math import numpy -import _ni_support -import _nd_image +from . import _ni_support +from . import _nd_image from scipy.misc import doccer __all__ = ['correlate1d', 'convolve1d', 'gaussian_filter1d', 'gaussian_filter', @@ -269,7 +271,7 @@ def gaussian_filter(input, sigma, order = 0, output = None, if not set(orders).issubset(set(range(4))): raise ValueError('Order outside 0..4 not implemented') sigmas = _ni_support._normalize_sequence(sigma, input.ndim) - axes = range(input.ndim) + axes = list(range(input.ndim)) axes = [(axes[ii], sigmas[ii], orders[ii]) for ii in range(len(axes)) if sigmas[ii] > 1e-15] if len(axes) > 0: @@ -354,7 +356,7 @@ def generic_laplace(input, derivative2, output = None, mode = "reflect", extra_keywords = {} input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) - axes = range(input.ndim) + axes = list(range(input.ndim)) if len(axes) > 0: derivative2(input, axes[0], output, mode, cval, *extra_arguments, **extra_keywords) @@ -440,7 +442,7 @@ def generic_gradient_magnitude(input, derivative, output = None, extra_keywords = {} input = numpy.asarray(input) output, return_value = _ni_support._get_output(output, input) - axes = range(input.ndim) + axes = list(range(input.ndim)) if len(axes) > 0: derivative(input, axes[0], output, mode, cval, *extra_arguments, **extra_keywords) @@ -727,7 +729,7 @@ def uniform_filter(input, size = 3, output = None, mode = "reflect", output, return_value = _ni_support._get_output(output, input) sizes = _ni_support._normalize_sequence(size, input.ndim) origins = _ni_support._normalize_sequence(origin, input.ndim) - axes = range(input.ndim) + axes = list(range(input.ndim)) axes = [(axes[ii], sizes[ii], origins[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if len(axes) > 0: @@ -839,7 +841,7 @@ def _min_or_max_filter(input, size, footprint, structure, output, mode, origins = _ni_support._normalize_sequence(origin, input.ndim) if separable: sizes = _ni_support._normalize_sequence(size, input.ndim) - axes = range(input.ndim) + axes = list(range(input.ndim)) axes = [(axes[ii], sizes[ii], origins[ii]) for ii in range(len(axes)) if sizes[ii] > 1] if minimum: diff --git a/scipy/ndimage/fourier.py b/scipy/ndimage/fourier.py index 05f08e2e4c50..501e82cec4e2 100644 --- a/scipy/ndimage/fourier.py +++ b/scipy/ndimage/fourier.py @@ -28,10 +28,11 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -import types +from __future__ import division, print_function, absolute_import + import numpy -import _ni_support -import _nd_image +from . import _ni_support +from . import _nd_image __all__ = ['fourier_gaussian', 'fourier_uniform', 'fourier_ellipsoid', 'fourier_shift'] @@ -45,7 +46,7 @@ def _get_output_fourier(output, input): else: output = numpy.zeros(input.shape, dtype = numpy.float64) return_value = output - elif type(output) is types.TypeType: + elif type(output) is type: if output not in [numpy.complex64, numpy.complex128, numpy.float32, numpy.float64]: raise RuntimeError("output type not supported") @@ -64,7 +65,7 @@ def _get_output_fourier_complex(output, input): else: output = numpy.zeros(input.shape, dtype = numpy.complex128) return_value = output - elif type(output) is types.TypeType: + elif type(output) is type: if output not in [numpy.complex64, numpy.complex128]: raise RuntimeError("output type not supported") output = numpy.zeros(input.shape, dtype = output) diff --git a/scipy/ndimage/interpolation.py b/scipy/ndimage/interpolation.py index 67e0ab1d0cc2..55f8b64f64ea 100644 --- a/scipy/ndimage/interpolation.py +++ b/scipy/ndimage/interpolation.py @@ -28,10 +28,12 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from __future__ import division, print_function, absolute_import + import math import numpy -import _ni_support -import _nd_image +from . import _ni_support +from . import _nd_image __all__ = ['spline_filter1d', 'spline_filter', 'geometric_transform', 'map_coordinates', 'affine_transform', 'shift', 'zoom', 'rotate'] @@ -651,7 +653,7 @@ def rotate(input, angle, axes=(1, 0), reshape=True, coordinates.append(0) else: coordinates.append(slice(None, None, None)) - iter_axes = range(input.ndim) + iter_axes = list(range(input.ndim)) iter_axes.reverse() iter_axes.remove(axes[0]) iter_axes.remove(axes[1]) diff --git a/scipy/ndimage/io.py b/scipy/ndimage/io.py index 20ab6e4471d0..f278c9067ef4 100644 --- a/scipy/ndimage/io.py +++ b/scipy/ndimage/io.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + __all__ = ['imread'] from numpy import array diff --git a/scipy/ndimage/measurements.py b/scipy/ndimage/measurements.py index 6715bf900483..65c90f481ae3 100644 --- a/scipy/ndimage/measurements.py +++ b/scipy/ndimage/measurements.py @@ -28,11 +28,13 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from __future__ import division, print_function, absolute_import + import numpy import numpy as np -import _ni_support -import _nd_image -import morphology +from . import _ni_support +from . import _nd_image +from . import morphology __all__ = ['label', 'find_objects', 'labeled_comprehension', 'sum', 'mean', 'variance', 'standard_deviation', 'minimum', 'maximum', 'median', diff --git a/scipy/ndimage/morphology.py b/scipy/ndimage/morphology.py index 57c2632a37c8..ea46f59727e3 100644 --- a/scipy/ndimage/morphology.py +++ b/scipy/ndimage/morphology.py @@ -28,10 +28,12 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from __future__ import division, print_function, absolute_import + import numpy -import _ni_support -import _nd_image -import filters +from . import _ni_support +from . import _nd_image +from . import filters __all__ = ['iterate_structure', 'generate_binary_structure', 'binary_erosion', 'binary_dilation', 'binary_opening', 'binary_closing', diff --git a/scipy/ndimage/setup.py b/scipy/ndimage/setup.py index 115769fa1022..910663015699 100644 --- a/scipy/ndimage/setup.py +++ b/scipy/ndimage/setup.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy.distutils.core import setup from numpy.distutils.misc_util import Configuration from numpy import get_include diff --git a/scipy/ndimage/setupscons.py b/scipy/ndimage/setupscons.py index d870ce818e38..ef84be5de44f 100644 --- a/scipy/ndimage/setupscons.py +++ b/scipy/ndimage/setupscons.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy.distutils.core import setup from numpy.distutils.misc_util import Configuration from numpy import get_include diff --git a/scipy/ndimage/tests/test_datatypes.py b/scipy/ndimage/tests/test_datatypes.py index 85027e31d042..59885e4e5348 100644 --- a/scipy/ndimage/tests/test_datatypes.py +++ b/scipy/ndimage/tests/test_datatypes.py @@ -1,5 +1,6 @@ """ Testing data types for ndimage calls """ +from __future__ import division, print_function, absolute_import import sys diff --git a/scipy/ndimage/tests/test_filters.py b/scipy/ndimage/tests/test_filters.py index cc1b4ab28ec7..fd1c432bbbcc 100644 --- a/scipy/ndimage/tests/test_filters.py +++ b/scipy/ndimage/tests/test_filters.py @@ -1,4 +1,5 @@ ''' Some tests for filters ''' +from __future__ import division, print_function, absolute_import import numpy as np @@ -47,8 +48,8 @@ def test_valid_origins(): sndi.median_filter, sndi.minimum_filter1d]: # This should work, since for size == 3, the valid range for origin is # -1 to 1. - filter(data, 3, origin=-1) - filter(data, 3, origin=1) + list(filter(data, 3, origin=-1)) + list(filter(data, 3, origin=1)) # Just check this raises an error instead of silently accepting or # segfaulting. assert_raises(ValueError, filter, data, 3, origin=2) diff --git a/scipy/ndimage/tests/test_io.py b/scipy/ndimage/tests/test_io.py index 2ebcb335afa5..7ff31ce00e7e 100644 --- a/scipy/ndimage/tests/test_io.py +++ b/scipy/ndimage/tests/test_io.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy.testing import assert_array_equal, dec, run_module_suite import scipy.ndimage as ndi diff --git a/scipy/ndimage/tests/test_measurements.py b/scipy/ndimage/tests/test_measurements.py index 8033fc3ec49c..64a6ab54a9be 100644 --- a/scipy/ndimage/tests/test_measurements.py +++ b/scipy/ndimage/tests/test_measurements.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy.testing import assert_, assert_array_almost_equal, assert_equal, \ assert_almost_equal, assert_array_equal, \ assert_raises, run_module_suite, TestCase diff --git a/scipy/ndimage/tests/test_ndimage.py b/scipy/ndimage/tests/test_ndimage.py index 1fdca19c38b0..bdb06549a325 100644 --- a/scipy/ndimage/tests/test_ndimage.py +++ b/scipy/ndimage/tests/test_ndimage.py @@ -27,7 +27,8 @@ # WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -from __future__ import division + +from __future__ import division, print_function, absolute_import import math import numpy @@ -2107,7 +2108,7 @@ def test_zoom1(self): "zoom 1" for order in range(0,6): for z in [2,[2,2]]: - arr = numpy.array(range(25)).reshape((5,5)).astype(float) + arr = numpy.array(list(range(25))).reshape((5,5)).astype(float) arr = ndimage.zoom(arr, z, order=order) assert_equal(arr.shape,(10,10)) assert_(numpy.all(arr[-1,:] != 0)) diff --git a/scipy/ndimage/tests/test_regression.py b/scipy/ndimage/tests/test_regression.py index 28d7553d436e..00979172a1d2 100644 --- a/scipy/ndimage/tests/test_regression.py +++ b/scipy/ndimage/tests/test_regression.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_array_almost_equal, run_module_suite diff --git a/scipy/odr/__init__.py b/scipy/odr/__init__.py index c90e1e2f0fee..cd0ca17c700b 100644 --- a/scipy/odr/__init__.py +++ b/scipy/odr/__init__.py @@ -79,10 +79,12 @@ # author: Robert Kern # date: 2006-09-21 -from odrpack import * -from models import * +from __future__ import division, print_function, absolute_import -__all__ = filter(lambda s: not s.startswith('_'), dir()) +from .odrpack import * +from .models import * + +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test diff --git a/scipy/odr/models.py b/scipy/odr/models.py index a7f228127ab3..2db2d92b0fe8 100644 --- a/scipy/odr/models.py +++ b/scipy/odr/models.py @@ -1,5 +1,6 @@ """ Collection of Model instances for use with the odrpack fitting package. """ +from __future__ import division, print_function, absolute_import import numpy as np from scipy.odr.odrpack import Model diff --git a/scipy/odr/odrpack.py b/scipy/odr/odrpack.py index 72f25b0333fe..a109f1888a06 100644 --- a/scipy/odr/odrpack.py +++ b/scipy/odr/odrpack.py @@ -109,6 +109,8 @@ def f(B, x): """ +from __future__ import division, print_function, absolute_import + import numpy from scipy.odr import __odrpack @@ -453,7 +455,7 @@ def __getattr__(self, attr): func, arg = lookup_tbl[(attr, self._ga_flags[attr])] if arg is not None: - return apply(func, (arg,)) + return func(*(arg,)) else: return None @@ -636,15 +638,15 @@ def pprint(self): """ Pretty-print important results. """ - print 'Beta:', self.beta - print 'Beta Std Error:', self.sd_beta - print 'Beta Covariance:', self.cov_beta + print('Beta:', self.beta) + print('Beta Std Error:', self.sd_beta) + print('Beta Covariance:', self.cov_beta) if hasattr(self, 'info'): - print 'Residual Variance:',self.res_var - print 'Inverse Condition #:', self.inv_condnum - print 'Reason(s) for Halting:' + print('Residual Variance:',self.res_var) + print('Inverse Condition #:', self.inv_condnum) + print('Reason(s) for Halting:') for r in self.stopreason: - print ' %s' % r + print(' %s' % r) class ODR(object): @@ -851,8 +853,8 @@ def _check(self): res = self.model.fcn(*arglist) if res.shape not in fcn_perms: - print res.shape - print fcn_perms + print(res.shape) + print(fcn_perms) raise odr_error("fcn does not output %s-shaped array" % y_s) if self.model.fjacd is not None: @@ -1116,7 +1118,7 @@ def run(self): if obj is not None: kwds[attr] = obj - self.output = Output(apply(odr, args, kwds)) + self.output = Output(odr(*args, **kwds)) return self.output diff --git a/scipy/odr/setup.py b/scipy/odr/setup.py index efc8ad1e6ebe..573611fb8b16 100644 --- a/scipy/odr/setup.py +++ b/scipy/odr/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/odr/setupscons.py b/scipy/odr/setupscons.py index a9b5af4d6af2..6ada71b83c94 100644 --- a/scipy/odr/setupscons.py +++ b/scipy/odr/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/odr/tests/test_odr.py b/scipy/odr/tests/test_odr.py index 4f3272f51457..90b3d54e7649 100644 --- a/scipy/odr/tests/test_odr.py +++ b/scipy/odr/tests/test_odr.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + # Scipy imports. import numpy as np from numpy import pi diff --git a/scipy/optimize/__init__.py b/scipy/optimize/__init__.py index 61658c993173..13a1ad6eb44c 100644 --- a/scipy/optimize/__init__.py +++ b/scipy/optimize/__init__.py @@ -141,20 +141,22 @@ """ -from optimize import * -from _minimize import * -from _root import * -from minpack import * -from zeros import * -from anneal import * -from lbfgsb import fmin_l_bfgs_b -from tnc import fmin_tnc -from cobyla import fmin_cobyla -from nonlin import * -from slsqp import fmin_slsqp -from nnls import nnls - -__all__ = filter(lambda s:not s.startswith('_'),dir()) +from __future__ import division, print_function, absolute_import + +from .optimize import * +from ._minimize import * +from ._root import * +from .minpack import * +from .zeros import * +from .anneal import * +from .lbfgsb import fmin_l_bfgs_b +from .tnc import fmin_tnc +from .cobyla import fmin_cobyla +from .nonlin import * +from .slsqp import fmin_slsqp +from .nnls import nnls + +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test bench = Tester().bench diff --git a/scipy/optimize/_minimize.py b/scipy/optimize/_minimize.py index b93b262cdd5f..549d02b75fa2 100644 --- a/scipy/optimize/_minimize.py +++ b/scipy/optimize/_minimize.py @@ -6,6 +6,7 @@ - minimize : minimization of a function of several variables. - minimize_scalar : minimization of a function of one variable. """ +from __future__ import division, print_function, absolute_import __all__ = ['minimize', 'minimize_scalar'] @@ -14,18 +15,21 @@ from warnings import warn from numpy import any + +from scipy.lib.six import callable + # unconstrained minimization -from optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg, +from .optimize import (_minimize_neldermead, _minimize_powell, _minimize_cg, _minimize_bfgs, _minimize_newtoncg, _minimize_scalar_brent, _minimize_scalar_bounded, _minimize_scalar_golden, MemoizeJac) -from anneal import _minimize_anneal +from .anneal import _minimize_anneal # contrained minimization -from lbfgsb import _minimize_lbfgsb -from tnc import _minimize_tnc -from cobyla import _minimize_cobyla -from slsqp import _minimize_slsqp +from .lbfgsb import _minimize_lbfgsb +from .tnc import _minimize_tnc +from .cobyla import _minimize_cobyla +from .slsqp import _minimize_slsqp def minimize(fun, x0, args=(), method='BFGS', jac=None, hess=None, hessp=None, bounds=None, constraints=(), tol=None, diff --git a/scipy/optimize/_root.py b/scipy/optimize/_root.py index d9d8a315578f..9ec02be2865b 100644 --- a/scipy/optimize/_root.py +++ b/scipy/optimize/_root.py @@ -5,16 +5,19 @@ --------- - root : find a root of a vector function. """ +from __future__ import division, print_function, absolute_import __all__ = ['root'] import numpy as np +from scipy.lib.six import callable + from warnings import warn -from optimize import MemoizeJac, Result, _check_unknown_options -from minpack import _root_hybr, leastsq -import nonlin +from .optimize import MemoizeJac, Result, _check_unknown_options +from .minpack import _root_hybr, leastsq +from . import nonlin def root(fun, x0, args=(), method='hybr', jac=None, tol=None, callback=None, options=None): diff --git a/scipy/optimize/_tstutils.py b/scipy/optimize/_tstutils.py index 995720a2d6fd..dcdafa737505 100644 --- a/scipy/optimize/_tstutils.py +++ b/scipy/optimize/_tstutils.py @@ -1,4 +1,5 @@ ''' Parameters used in test and benchmark methods ''' +from __future__ import division, print_function, absolute_import from random import random diff --git a/scipy/optimize/anneal.py b/scipy/optimize/anneal.py index bbecc3bb32d3..a0f0309e37b4 100644 --- a/scipy/optimize/anneal.py +++ b/scipy/optimize/anneal.py @@ -1,11 +1,13 @@ # Original Author: Travis Oliphant 2002 # Bug-fixes in 2006 by Tim Leslie +from __future__ import division, print_function, absolute_import import numpy from numpy import asarray, tan, exp, ones, squeeze, sign, \ all, log, sqrt, pi, shape, array, minimum, where, random -from optimize import Result, _check_unknown_options +from .optimize import Result, _check_unknown_options +from scipy.lib.six.moves import xrange __all__ = ['anneal'] @@ -417,10 +419,10 @@ def _minimize_anneal(func, x0, args=(), if abs(af[-1]-best_state.cost) > feps*10: retval = 5 if disp: - print "Warning: Cooled to %f at %s but this is not" \ + print("Warning: Cooled to %f at %s but this is not" \ % (squeeze(last_state.cost), str(squeeze(last_state.x))) \ - + " the smallest point found." + + " the smallest point found.") break if (Tf is not None) and (schedule.T < Tf): retval = 1 @@ -430,7 +432,7 @@ def _minimize_anneal(func, x0, args=(), break if (iters > maxiter): if disp: - print "Warning: Maximum number of iterations exceeded." + print("Warning: Maximum number of iterations exceeded.") retval = 3 break if (maxaccept is not None) and (schedule.accepted > maxaccept): @@ -456,12 +458,12 @@ def _minimize_anneal(func, x0, args=(), from numpy import cos # minimum expected at ~-0.195 func = lambda x: cos(14.5*x-0.3) + (x+0.2)*x - print anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='cauchy') - print anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='fast') - print anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='boltzmann') + print(anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='cauchy')) + print(anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='fast')) + print(anneal(func,1.0,full_output=1,upper=3.0,lower=-3.0,feps=1e-4,maxiter=2000,schedule='boltzmann')) # minimum expected at ~[-0.195, -0.1] func = lambda x: cos(14.5*x[0]-0.3) + (x[1]+0.2)*x[1] + (x[0]+0.2)*x[0] - print anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='cauchy') - print anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='fast') - print anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='boltzmann') + print(anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='cauchy')) + print(anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='fast')) + print(anneal(func,[1.0, 1.0],full_output=1,upper=[3.0, 3.0],lower=[-3.0, -3.0],feps=1e-4,maxiter=2000,schedule='boltzmann')) diff --git a/scipy/optimize/benchmarks/bench_zeros.py b/scipy/optimize/benchmarks/bench_zeros.py index 3168b9c6b152..43c69bcd551a 100644 --- a/scipy/optimize/benchmarks/bench_zeros.py +++ b/scipy/optimize/benchmarks/bench_zeros.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from math import sqrt from numpy.testing import * @@ -14,22 +16,22 @@ def bench_run(self): b = sqrt(3) repeat = 2000 - print description + print(description) - print 'TESTING SPEED\n' - print 'times in seconds for %d iterations \n'%repeat + print('TESTING SPEED\n') + print('times in seconds for %d iterations \n'%repeat) for i in range(len(functions)) : - print 'function %s\n'%fstrings[i] + print('function %s\n'%fstrings[i]) func = functions[i] for j in range(len(methods)) : meth = methods[j] try: t = measure("meth(func,a,b)",repeat) except: - print '%s : failed'%mstrings[j] + print('%s : failed'%mstrings[j]) else: - print '%s : %5.3f'%(mstrings[j],t) - print '\n\n' + print('%s : %5.3f'%(mstrings[j],t)) + print('\n\n') if __name__ == '__main__' : run_module_suite() diff --git a/scipy/optimize/cobyla.py b/scipy/optimize/cobyla.py index 56c02e7609d1..1d286d2cac65 100644 --- a/scipy/optimize/cobyla.py +++ b/scipy/optimize/cobyla.py @@ -10,9 +10,12 @@ """ +from __future__ import division, print_function, absolute_import + import numpy as np +from scipy.lib.six import callable from scipy.optimize import _cobyla -from optimize import Result, _check_unknown_options +from .optimize import Result, _check_unknown_options from warnings import warn __all__ = ['fmin_cobyla'] @@ -259,4 +262,4 @@ def cons(x): x = fmin_cobyla(fun, [1., 1.], cons, iprint = 3, disp = 1) - print '\nTheoretical solution: %e, %e' % (1. / sqrt(2.), -1. / sqrt(2.)) + print('\nTheoretical solution: %e, %e' % (1. / sqrt(2.), -1. / sqrt(2.))) diff --git a/scipy/optimize/lbfgsb.py b/scipy/optimize/lbfgsb.py index 6116f3eb0283..42a099e4d969 100755 --- a/scipy/optimize/lbfgsb.py +++ b/scipy/optimize/lbfgsb.py @@ -33,11 +33,12 @@ ## Modifications by Travis Oliphant and Enthought, Inc. for inclusion in SciPy +from __future__ import division, print_function, absolute_import + import numpy as np from numpy import array, asarray, float64, int32, zeros -import _lbfgsb -from optimize import approx_fprime, MemoizeJac, Result, _check_unknown_options -from numpy.compat import asbytes +from . import _lbfgsb +from .optimize import approx_fprime, MemoizeJac, Result, _check_unknown_options __all__ = ['fmin_l_bfgs_b'] @@ -300,7 +301,7 @@ def func_and_grad(x): pgtol, wa, iwa, task, iprint, csave, lsave, isave, dsave) task_str = task.tostring() - if task_str.startswith(asbytes('FG')): + if task_str.startswith(b'FG'): if n_function_evals > maxfun: task[:] = 'STOP: TOTAL NO. of f AND g EVALUATIONS EXCEEDS LIMIT' else: @@ -308,7 +309,7 @@ def func_and_grad(x): n_function_evals += 1 # Overwrite f and g: f, g = func_and_grad(x) - elif task_str.startswith(asbytes('NEW_X')): + elif task_str.startswith(b'NEW_X'): # new iteration if n_iterations > maxiter: task[:] = 'STOP: TOTAL NO. of ITERATIONS EXCEEDS LIMIT' @@ -319,8 +320,8 @@ def func_and_grad(x): else: break - task_str = task.tostring().strip(asbytes('\x00')).strip() - if task_str.startswith(asbytes('CONV')): + task_str = task.tostring().strip(b'\x00').strip() + if task_str.startswith(b'CONV'): warnflag = 0 elif n_function_evals > maxfun: warnflag = 1 @@ -375,22 +376,22 @@ def fun(self, x): x, f, d = fmin_l_bfgs_b(func, x0, fprime=grad, m=m, factr=factr, pgtol=pgtol) - print x - print f - print d + print(x) + print(f) + print(d) x, f, d = fmin_l_bfgs_b(func, x0, approx_grad=1, m=m, factr=factr, pgtol=pgtol) - print x - print f - print d + print(x) + print(f) + print(d) x, f, d = fmin_l_bfgs_b(func_and_grad, x0, approx_grad=0, m=m, factr=factr, pgtol=pgtol) - print x - print f - print d + print(x) + print(f) + print(d) p = Problem() x, f, d = fmin_l_bfgs_b(p.fun, x0, approx_grad=0, m=m, factr=factr, pgtol=pgtol) - print x - print f - print d + print(x) + print(f) + print(d) diff --git a/scipy/optimize/linesearch.py b/scipy/optimize/linesearch.py index 2ddf8d542d54..64812c6a64d6 100644 --- a/scipy/optimize/linesearch.py +++ b/scipy/optimize/linesearch.py @@ -11,9 +11,11 @@ scalar_search_wolfe2 """ +from __future__ import division, print_function, absolute_import + from scipy.optimize import minpack2 import numpy as np -from numpy.compat import asbytes +from scipy.lib.six.moves import xrange __all__ = ['line_search_wolfe1', 'line_search_wolfe2', 'scalar_search_wolfe1', 'scalar_search_wolfe2', @@ -152,14 +154,14 @@ def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, derphi1 = derphi0 isave = np.zeros((2,), np.intc) dsave = np.zeros((13,), float) - task = asbytes('START') + task = b'START' maxiter=30 for i in xrange(maxiter): stp, phi1, derphi1, task = minpack2.dcsrch(alpha1, phi1, derphi1, c1, c2, xtol, task, amin, amax, isave, dsave) - if task[:2] == asbytes('FG'): + if task[:2] == b'FG': alpha1 = stp phi1 = phi(stp) derphi1 = derphi(stp) @@ -169,7 +171,7 @@ def scalar_search_wolfe1(phi, derphi, phi0=None, old_phi0=None, derphi0=None, # maxiter reached, the line search did not converge stp=None - if task[:5] == asbytes('ERROR') or task[:4] == asbytes('WARN'): + if task[:5] == b'ERROR' or task[:4] == b'WARN': stp = None # failed return stp, phi1, phi0 diff --git a/scipy/optimize/minpack.py b/scipy/optimize/minpack.py index d1d1a0e66634..a16fcca46c05 100644 --- a/scipy/optimize/minpack.py +++ b/scipy/optimize/minpack.py @@ -1,10 +1,12 @@ +from __future__ import division, print_function, absolute_import + import warnings -import _minpack +from . import _minpack from numpy import atleast_1d, dot, take, triu, shape, eye, \ transpose, zeros, product, greater, array, \ all, where, isscalar, asarray, inf, abs -from optimize import Result, _check_unknown_options +from .optimize import Result, _check_unknown_options error = _minpack.error diff --git a/scipy/optimize/nnls.py b/scipy/optimize/nnls.py index 2c4aa2ee1fab..ff41149d62e2 100644 --- a/scipy/optimize/nnls.py +++ b/scipy/optimize/nnls.py @@ -1,4 +1,6 @@ -import _nnls +from __future__ import division, print_function, absolute_import + +from . import _nnls from numpy import asarray_chkfinite, zeros, double __all__ = ['nnls'] diff --git a/scipy/optimize/nonlin.py b/scipy/optimize/nonlin.py index f7b34b59722c..475a918339cc 100644 --- a/scipy/optimize/nonlin.py +++ b/scipy/optimize/nonlin.py @@ -111,15 +111,20 @@ def residual(P): # Copyright (C) 2009, Pauli Virtanen # Distributed under the same license as Scipy. +from __future__ import division, print_function, absolute_import + import sys import numpy as np +from scipy.lib.six import callable, exec_ +from scipy.lib.six.moves import xrange from scipy.linalg import norm, solve, inv, qr, svd, lstsq, LinAlgError from numpy import asarray, dot, vdot import scipy.sparse.linalg import scipy.sparse from scipy.linalg import get_blas_funcs import inspect -from linesearch import scalar_search_wolfe1, scalar_search_armijo +from .linesearch import scalar_search_wolfe1, scalar_search_armijo +import collections __all__ = [ 'broyden1', 'broyden2', 'anderson', 'linearmixing', @@ -1467,7 +1472,7 @@ def _nonlin_wrapper(name, jac): """ import inspect args, varargs, varkw, defaults = inspect.getargspec(jac.__init__) - kwargs = zip(args[-len(defaults):], defaults) + kwargs = list(zip(args[-len(defaults):], defaults)) kw_str = ", ".join(["%s=%r" % (k, v) for k, v in kwargs]) if kw_str: kw_str = ", " + kw_str @@ -1491,7 +1496,7 @@ def %(name)s(F, xin, iter=None %(kw)s, verbose=False, maxiter=None, kwkw=kwkw_str) ns = {} ns.update(globals()) - exec wrapper in ns + exec_(wrapper, ns) func = ns[name] func.__doc__ = jac.__doc__ _set_doc(func) diff --git a/scipy/optimize/optimize.py b/scipy/optimize/optimize.py index 3045cd477eba..7cb82a005088 100644 --- a/scipy/optimize/optimize.py +++ b/scipy/optimize/optimize.py @@ -13,6 +13,9 @@ # Finished line search satisfying strong Wolfe conditions (Mar. 2004) # Updated strong Wolfe conditions line search to use cubic-interpolation (Mar. 2004) +from __future__ import division, print_function, absolute_import + + # Minimization routines __all__ = ['fmin', 'fmin_powell', 'fmin_bfgs', 'fmin_ncg', 'fmin_cg', @@ -25,9 +28,10 @@ import warnings import numpy +from scipy.lib.six import callable from numpy import atleast_1d, eye, mgrid, argmin, zeros, shape, \ squeeze, vectorize, asarray, absolute, sqrt, Inf, asfarray, isinf -from linesearch import \ +from .linesearch import \ line_search_BFGS, line_search_wolfe1, line_search_wolfe2, \ line_search_wolfe2 as line_search @@ -104,9 +108,9 @@ def __getattr__(self, name): def __repr__(self): if self.keys(): - m = max(map(len, self.keys())) + 1 + m = max(map(len, list(self.keys()))) + 1 return '\n'.join([k.rjust(m) + ': ' + repr(v) - for k, v in self.iteritems()]) + for k, v in self.items()]) else: return self.__class__.__name__ + "()" @@ -142,9 +146,10 @@ def is_array_scalar(x): return len(atleast_1d(x) == 1) abs = absolute -import __builtin__ -pymin = __builtin__.min -pymax = __builtin__.max + +from scipy.lib.six.moves import builtins +pymin = builtins.min +pymax = builtins.max __version__ = "0.7" _epsilon = sqrt(numpy.finfo(float).eps) @@ -411,7 +416,7 @@ def _minimize_neldermead(func, x0, args=(), callback=None, chi = 2 psi = 0.5 sigma = 0.5 - one2np1 = range(1, N + 1) + one2np1 = list(range(1, N + 1)) if rank == 0: sim = numpy.zeros((N + 1,), dtype=x0.dtype) @@ -510,19 +515,19 @@ def _minimize_neldermead(func, x0, args=(), callback=None, warnflag = 1 msg = _status_message['maxfev'] if disp: - print 'Warning: ' + msg + print('Warning: ' + msg) elif iterations >= maxiter: warnflag = 2 msg = _status_message['maxiter'] if disp: - print 'Warning: ' + msg + print('Warning: ' + msg) else: msg = _status_message['success'] if disp: - print msg - print " Current function value: %f" % fval - print " Iterations: %d" % iterations - print " Function evaluations: %d" % fcalls[0] + print(msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % iterations) + print(" Function evaluations: %d" % fcalls[0]) result = Result(fun=fval, nit=iterations, nfev=fcalls[0], @@ -828,10 +833,10 @@ def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None, rhok = 1.0 / (numpy.dot(yk, sk)) except ZeroDivisionError: rhok = 1000.0 - print "Divide-by-zero encountered: rhok assumed large" + print("Divide-by-zero encountered: rhok assumed large") if isinf(rhok): #this is patch for numpy rhok = 1000.0 - print "Divide-by-zero encountered: rhok assumed large" + print("Divide-by-zero encountered: rhok assumed large") A1 = I - sk[:, numpy.newaxis] * yk[numpy.newaxis, :] * rhok A2 = I - yk[:, numpy.newaxis] * sk[numpy.newaxis, :] * rhok Hk = numpy.dot(A1, numpy.dot(Hk, A2)) + rhok * sk[:, numpy.newaxis] \ @@ -841,29 +846,29 @@ def _minimize_bfgs(fun, x0, args=(), jac=None, callback=None, if warnflag == 2: msg = _status_message['pr_loss'] if disp: - print "Warning: " + msg - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] + print("Warning: " + msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % func_calls[0]) + print(" Gradient evaluations: %d" % grad_calls[0]) elif k >= maxiter: warnflag = 1 msg = _status_message['maxiter'] if disp: - print "Warning: " + msg - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] + print("Warning: " + msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % func_calls[0]) + print(" Gradient evaluations: %d" % grad_calls[0]) else: msg = _status_message['success'] if disp: - print msg - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] + print(msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % func_calls[0]) + print(" Gradient evaluations: %d" % grad_calls[0]) result = Result(fun=fval, jac=gfk, hess=Hk, nfev=func_calls[0], njev=grad_calls[0], status=warnflag, @@ -1050,29 +1055,29 @@ def _minimize_cg(fun, x0, args=(), jac=None, callback=None, if warnflag == 2: msg = _status_message['pr_loss'] if disp: - print "Warning: " + msg - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] + print("Warning: " + msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % func_calls[0]) + print(" Gradient evaluations: %d" % grad_calls[0]) elif k >= maxiter: warnflag = 1 msg = _status_message['maxiter'] if disp: - print "Warning: " + msg - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] + print("Warning: " + msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % func_calls[0]) + print(" Gradient evaluations: %d" % grad_calls[0]) else: msg = _status_message['success'] if disp: - print msg - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % func_calls[0] - print " Gradient evaluations: %d" % grad_calls[0] + print(msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % func_calls[0]) + print(" Gradient evaluations: %d" % grad_calls[0]) result = Result(fun=fval, jac=gfk, nfev=func_calls[0], @@ -1308,22 +1313,22 @@ def _minimize_newtoncg(fun, x0, args=(), jac=None, hess=None, hessp=None, warnflag = 1 msg = _status_message['maxiter'] if disp: - print "Warning: " + msg - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % fcalls[0] - print " Gradient evaluations: %d" % gcalls[0] - print " Hessian evaluations: %d" % hcalls + print("Warning: " + msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % fcalls[0]) + print(" Gradient evaluations: %d" % gcalls[0]) + print(" Hessian evaluations: %d" % hcalls) else: warnflag = 0 msg = _status_message['success'] if disp: - print msg - print " Current function value: %f" % fval - print " Iterations: %d" % k - print " Function evaluations: %d" % fcalls[0] - print " Gradient evaluations: %d" % gcalls[0] - print " Hessian evaluations: %d" % hcalls + print(msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % k) + print(" Function evaluations: %d" % fcalls[0]) + print(" Gradient evaluations: %d" % gcalls[0]) + print(" Hessian evaluations: %d" % hcalls) result = Result(fun=fval, jac=gfk, nfev=fcalls[0], njev=gcalls[0], nhev=hcalls, status=warnflag, success=(warnflag == 0), @@ -1433,7 +1438,7 @@ def _minimize_scalar_bounded(func, bounds, args=(), if disp > 2: print (" ") print (header) - print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) while (abs(xf - xm) > (tol2 - 0.5*(b - a))): @@ -1477,7 +1482,7 @@ def _minimize_scalar_bounded(func, bounds, args=(), num += 1 fmin_data = (num, x, fu) if disp > 2: - print "%5.0f %12.6g %12.6g %s" % (fmin_data + (step,)) + print("%5.0f %12.6g %12.6g %s" % (fmin_data + (step,))) if fu <= fx: if x >= xf: @@ -2093,7 +2098,7 @@ def _minimize_powell(func, x0, args=(), callback=None, fval = squeeze(func(x)) x1 = x.copy() iter = 0; - ilist = range(N) + ilist = list(range(N)) while True: fx = fval bigind = 0 @@ -2137,19 +2142,19 @@ def _minimize_powell(func, x0, args=(), callback=None, warnflag = 1 msg = _status_message['maxfev'] if disp: - print "Warning: " + msg + print("Warning: " + msg) elif iter >= maxiter: warnflag = 2 msg = _status_message['maxiter'] if disp: - print "Warning: " + msg + print("Warning: " + msg) else: msg = _status_message['success'] if disp: - print msg - print " Current function value: %f" % fval - print " Iterations: %d" % iter - print " Function evaluations: %d" % fcalls[0] + print(msg) + print(" Current function value: %f" % fval) + print(" Iterations: %d" % iter) + print(" Function evaluations: %d" % fcalls[0]) x = squeeze(x) @@ -2164,12 +2169,12 @@ def _minimize_powell(func, x0, args=(), callback=None, def _endprint(x, flag, fval, maxfun, xtol, disp): if flag == 0: if disp > 1: - print "\nOptimization terminated successfully;\n" \ + print("\nOptimization terminated successfully;\n" \ "The returned value satisfies the termination criteria\n" \ - "(using xtol = ", xtol, ")" + "(using xtol = ", xtol, ")") if flag == 1: - print "\nMaximum number of function evaluations exceeded --- " \ - "increase maxfun argument.\n" + print("\nMaximum number of function evaluations exceeded --- " \ + "increase maxfun argument.\n") return @@ -2257,7 +2262,7 @@ def _scalarfunc(*params): xmin = vals[0] Jmin = vals[1] if vals[-1] > 0: - print "Warning: Final optimization did not succeed" + print("Warning: Final optimization did not succeed") if full_output: return xmin, Jmin, grid, Jout else: @@ -2788,7 +2793,7 @@ def show_options(solver, method=None): doc = [s.strip() for s in doc] doc = [s for s in doc if s.lower().startswith(method.lower())] - print '\n'.join(doc) + print('\n'.join(doc)) return @@ -2798,76 +2803,76 @@ def main(): times = [] algor = [] x0 = [0.8, 1.2, 0.7] - print "Nelder-Mead Simplex" - print "===================" + print("Nelder-Mead Simplex") + print("===================") start = time.time() x = fmin(rosen, x0) - print x + print(x) times.append(time.time() - start) algor.append('Nelder-Mead Simplex\t') - print - print "Powell Direction Set Method" - print "===========================" + print() + print("Powell Direction Set Method") + print("===========================") start = time.time() x = fmin_powell(rosen, x0) - print x + print(x) times.append(time.time() - start) algor.append('Powell Direction Set Method.') - print - print "Nonlinear CG" - print "============" + print() + print("Nonlinear CG") + print("============") start = time.time() x = fmin_cg(rosen, x0, fprime=rosen_der, maxiter=200) - print x + print(x) times.append(time.time() - start) algor.append('Nonlinear CG \t') - print - print "BFGS Quasi-Newton" - print "=================" + print() + print("BFGS Quasi-Newton") + print("=================") start = time.time() x = fmin_bfgs(rosen, x0, fprime=rosen_der, maxiter=80) - print x + print(x) times.append(time.time() - start) algor.append('BFGS Quasi-Newton\t') - print - print "BFGS approximate gradient" - print "=========================" + print() + print("BFGS approximate gradient") + print("=========================") start = time.time() x = fmin_bfgs(rosen, x0, gtol=1e-4, maxiter=100) - print x + print(x) times.append(time.time() - start) algor.append('BFGS without gradient\t') - print - print "Newton-CG with Hessian product" - print "==============================" + print() + print("Newton-CG with Hessian product") + print("==============================") start = time.time() x = fmin_ncg(rosen, x0, rosen_der, fhess_p=rosen_hess_prod, maxiter=80) - print x + print(x) times.append(time.time() - start) algor.append('Newton-CG with hessian product') - print - print "Newton-CG with full Hessian" - print "===========================" + print() + print("Newton-CG with full Hessian") + print("===========================") start = time.time() x = fmin_ncg(rosen, x0, rosen_der, fhess=rosen_hess, maxiter=80) - print x + print(x) times.append(time.time() - start) algor.append('Newton-CG with full hessian') - print - print "\nMinimizing the Rosenbrock function of order 3\n" - print " Algorithm \t\t\t Seconds" - print "===========\t\t\t =========" + print() + print("\nMinimizing the Rosenbrock function of order 3\n") + print(" Algorithm \t\t\t Seconds") + print("===========\t\t\t =========") for k in range(len(algor)): - print algor[k], "\t -- ", times[k] + print(algor[k], "\t -- ", times[k]) if __name__ == "__main__": main() diff --git a/scipy/optimize/setup.py b/scipy/optimize/setup.py index 3beb5fb65178..0d13d5f8fdb2 100755 --- a/scipy/optimize/setup.py +++ b/scipy/optimize/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/optimize/setupscons.py b/scipy/optimize/setupscons.py index 2fbc1d2f87fe..f39d7dc24b3f 100755 --- a/scipy/optimize/setupscons.py +++ b/scipy/optimize/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/optimize/slsqp.py b/scipy/optimize/slsqp.py index 06eab2dc9588..f586607e3356 100644 --- a/scipy/optimize/slsqp.py +++ b/scipy/optimize/slsqp.py @@ -13,12 +13,14 @@ """ +from __future__ import division, print_function, absolute_import + __all__ = ['approx_jacobian','fmin_slsqp'] from scipy.optimize._slsqp import slsqp from numpy import zeros, array, linalg, append, asfarray, concatenate, finfo, \ sqrt, vstack, exp, inf, where, isinf, atleast_1d -from optimize import wrap_function, Result, _check_unknown_options +from .optimize import wrap_function, Result, _check_unknown_options __docformat__ = "restructuredtext en" @@ -342,7 +344,7 @@ def cjac(x, *args): # Print the header if iprint >= 2 if iprint >= 2: - print "%5s %5s %16s %16s" % ("NIT","FC","OBJFUN","GNORM") + print("%5s %5s %16s %16s" % ("NIT","FC","OBJFUN","GNORM")) while 1: @@ -397,8 +399,8 @@ def cjac(x, *args): # Print the status of the current iterate if iprint > 2 and the # major iteration has incremented if iprint >= 2 and majiter > majiter_prev: - print "%5i %5i % 16.6E % 16.6E" % (majiter,feval[0], - fx,linalg.norm(g)) + print("%5i %5i % 16.6E % 16.6E" % (majiter,feval[0], + fx,linalg.norm(g))) # If exit mode is not -1 or 1, slsqp has completed if abs(mode) != 1: @@ -408,11 +410,11 @@ def cjac(x, *args): # Optimization loop complete. Print status if requested if iprint >= 1: - print exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')' - print " Current function value:", fx - print " Iterations:", majiter - print " Function evaluations:", feval[0] - print " Gradient evaluations:", geval[0] + print(exit_modes[int(mode)] + " (Exit mode " + str(mode) + ')') + print(" Current function value:", fx) + print(" Iterations:", majiter) + print(" Function evaluations:", feval[0]) + print(" Gradient evaluations:", geval[0]) return Result(x=x, fun=fx, jac=g, nit=int(majiter), nfev=feval[0], njev=geval[0], status=int(mode), @@ -453,21 +455,21 @@ def jieqcon(x, c=10): {'type': 'ineq', 'fun' : fieqcon, 'jac' : jieqcon, 'args': (10,)}) # Bounds constraint problem - print ' Bounds constraints '.center(72, '-') - print ' * fmin_slsqp' + print(' Bounds constraints '.center(72, '-')) + print(' * fmin_slsqp') x, f = fmin_slsqp(fun, array([-1, 1]), bounds=bnds, disp=1, full_output=True)[:2] - print ' * _minimize_slsqp' + print(' * _minimize_slsqp') res = _minimize_slsqp(fun, array([-1, 1]), bounds=bnds, **{'disp': True}) # Equality and inequality constraints problem - print ' Equality and inequality constraints '.center(72, '-') - print ' * fmin_slsqp' + print(' Equality and inequality constraints '.center(72, '-')) + print(' * fmin_slsqp') x, f = fmin_slsqp(fun, array([-1, 1]), f_eqcons=feqcon, fprime_eqcons=jeqcon, f_ieqcons=fieqcon, fprime_ieqcons=jieqcon, disp=1, full_output=True)[:2] - print ' * _minimize_slsqp' + print(' * _minimize_slsqp') res = _minimize_slsqp(fun, array([-1, 1]), constraints=cons, **{'disp': True}) diff --git a/scipy/optimize/tests/test__root.py b/scipy/optimize/tests/test__root.py index d055bebb73e1..022ce6347871 100644 --- a/scipy/optimize/tests/test__root.py +++ b/scipy/optimize/tests/test__root.py @@ -1,6 +1,7 @@ """ Unit tests for optimization routines from _root.py. """ +from __future__ import division, print_function, absolute_import from numpy.testing import assert_ import numpy as np diff --git a/scipy/optimize/tests/test_anneal.py b/scipy/optimize/tests/test_anneal.py index b92ff2ff8669..afe11752402a 100644 --- a/scipy/optimize/tests/test_anneal.py +++ b/scipy/optimize/tests/test_anneal.py @@ -1,6 +1,7 @@ """ Unit tests for the simulated annealing minimization algorithm. """ +from __future__ import division, print_function, absolute_import from numpy.testing import TestCase, run_module_suite, \ assert_almost_equal, assert_, dec diff --git a/scipy/optimize/tests/test_cobyla.py b/scipy/optimize/tests/test_cobyla.py index a7da3e529d7c..1e3921af376f 100644 --- a/scipy/optimize/tests/test_cobyla.py +++ b/scipy/optimize/tests/test_cobyla.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import math from numpy.testing import assert_allclose, TestCase, run_module_suite, \ diff --git a/scipy/optimize/tests/test_linesearch.py b/scipy/optimize/tests/test_linesearch.py index e405bbfb24fb..c9b347950389 100644 --- a/scipy/optimize/tests/test_linesearch.py +++ b/scipy/optimize/tests/test_linesearch.py @@ -1,6 +1,7 @@ """ Tests for line search routines """ +from __future__ import division, print_function, absolute_import from numpy.testing import assert_, assert_equal, \ assert_array_almost_equal, assert_array_almost_equal_nulp @@ -41,7 +42,7 @@ def assert_fp_equal(x, y, err_msg="", nulp=50): """Assert two arrays are equal, up to some floating-point rounding error""" try: assert_array_almost_equal_nulp(x, y, nulp) - except AssertionError, e: + except AssertionError as e: raise AssertionError("%s\n%s" % (e, err_msg)) class TestLineSearch(object): diff --git a/scipy/optimize/tests/test_minpack.py b/scipy/optimize/tests/test_minpack.py index 77b9680d09a6..4f87a044d248 100644 --- a/scipy/optimize/tests/test_minpack.py +++ b/scipy/optimize/tests/test_minpack.py @@ -1,6 +1,7 @@ """ Unit tests for optimization routines from minpack.py. """ +from __future__ import division, print_function, absolute_import from numpy.testing import assert_, assert_almost_equal, assert_array_equal, \ assert_array_almost_equal, TestCase, run_module_suite, assert_raises diff --git a/scipy/optimize/tests/test_nnls.py b/scipy/optimize/tests/test_nnls.py index 6ff1fd7ebcfa..ff28197acfd2 100644 --- a/scipy/optimize/tests/test_nnls.py +++ b/scipy/optimize/tests/test_nnls.py @@ -2,6 +2,7 @@ Author: Uwe Schmitt Sep 2008 """ +from __future__ import division, print_function, absolute_import from numpy.testing import assert_, TestCase, run_module_suite diff --git a/scipy/optimize/tests/test_nonlin.py b/scipy/optimize/tests/test_nonlin.py index 66aeef679b65..07e657a213fb 100644 --- a/scipy/optimize/tests/test_nonlin.py +++ b/scipy/optimize/tests/test_nonlin.py @@ -2,9 +2,11 @@ Author: Ondrej Certik May 2007 """ +from __future__ import division, print_function, absolute_import from numpy.testing import assert_, dec, TestCase, run_module_suite +from scipy.lib.six.moves import xrange from scipy.optimize import nonlin, root from numpy import matrix, diag, dot from numpy.linalg import inv @@ -101,7 +103,7 @@ def _check_func_fail(self, *a, **kw): def test_problem_nonlin(self): """ Tests for nonlin functions """ for f in [F, F2, F3, F4_powell, F5, F6]: - for func in SOLVERS.itervalues(): + for func in SOLVERS.values(): if func in f.KNOWN_BAD.values(): if func in MUST_WORK.values(): yield self._check_func_fail, f, func @@ -111,7 +113,7 @@ def test_problem_nonlin(self): def test_problem_root(self): """ Tests for root """ for f in [F, F2, F3, F4_powell, F5, F6]: - for meth in SOLVERS.iterkeys(): + for meth in SOLVERS.keys(): if meth in f.KNOWN_BAD.keys(): if meth in MUST_WORK.keys(): yield self._check_func_fail, f, meth diff --git a/scipy/optimize/tests/test_optimize.py b/scipy/optimize/tests/test_optimize.py index 2d3c4680bcf3..e0c6308b5de9 100644 --- a/scipy/optimize/tests/test_optimize.py +++ b/scipy/optimize/tests/test_optimize.py @@ -9,6 +9,7 @@ nosetests test_optimize.py """ +from __future__ import division, print_function, absolute_import from numpy.testing import assert_raises, assert_allclose, \ assert_equal, assert_, TestCase, run_module_suite diff --git a/scipy/optimize/tests/test_regression.py b/scipy/optimize/tests/test_regression.py index aa078fb02815..93dd2b7e12e6 100644 --- a/scipy/optimize/tests/test_regression.py +++ b/scipy/optimize/tests/test_regression.py @@ -1,6 +1,7 @@ """Regression tests for optimize. """ +from __future__ import division, print_function, absolute_import from numpy.testing import TestCase, run_module_suite, assert_almost_equal import scipy.optimize diff --git a/scipy/optimize/tests/test_slsqp.py b/scipy/optimize/tests/test_slsqp.py index 92c4a44df231..c71f53b41bee 100644 --- a/scipy/optimize/tests/test_slsqp.py +++ b/scipy/optimize/tests/test_slsqp.py @@ -1,6 +1,8 @@ """ Unit test for SLSQP optimization. """ +from __future__ import division, print_function, absolute_import + from numpy.testing import assert_, assert_array_almost_equal, TestCase, \ assert_allclose, run_module_suite import numpy as np diff --git a/scipy/optimize/tests/test_zeros.py b/scipy/optimize/tests/test_zeros.py index 44061253b0bd..7492cbc559c1 100644 --- a/scipy/optimize/tests/test_zeros.py +++ b/scipy/optimize/tests/test_zeros.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from math import sqrt, exp, sin, cos diff --git a/scipy/optimize/tnc.py b/scipy/optimize/tnc.py index 894ab8ed679d..6e7995f4a566 100644 --- a/scipy/optimize/tnc.py +++ b/scipy/optimize/tnc.py @@ -31,8 +31,11 @@ value of the function, and whose second argument is the gradient of the function (as a list of values); or None, to abort the minimization. """ + +from __future__ import division, print_function, absolute_import + from scipy.optimize import moduleTNC, approx_fprime -from optimize import MemoizeJac, Result, _check_unknown_options +from .optimize import MemoizeJac, Result, _check_unknown_options from numpy import asarray, inf, array __all__ = ['fmin_tnc'] @@ -404,7 +407,7 @@ def func_and_grad(x): # Examples for TNC def example(): - print "Example" + print("Example") # A function to minimize def function(x): f = pow(x[0],2.0)+pow(abs(x[1]),3.0) @@ -418,9 +421,9 @@ def function(x): # Optimizer call x, nf, rc = fmin_tnc(function, [-7, 3], bounds=([-10, 1], [10, 10])) - print "After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc] - print "x =", x - print "exact value = [0, 1]" - print + print("After", nf, "function evaluations, TNC returned:", RCSTRINGS[rc]) + print("x =", x) + print("exact value = [0, 1]") + print() example() diff --git a/scipy/optimize/tnc/example.py b/scipy/optimize/tnc/example.py index f51b318fa68b..653e8e4fc98a 100644 --- a/scipy/optimize/tnc/example.py +++ b/scipy/optimize/tnc/example.py @@ -2,6 +2,8 @@ # Python TNC example # @(#) $Jeannot: example.py,v 1.4 2004/04/02 18:51:04 js Exp $ +from __future__ import division, print_function, absolute_import + import tnc # A function to minimize @@ -19,6 +21,6 @@ def function(x): # Optimizer call rc, nf, x = tnc.minimize(function, [-7, 3], [-10, 1], [10, 10]) -print "After", nf, "function evaluations, TNC returned:", tnc.RCSTRINGS[rc] -print "x =", x -print "exact value = [0, 1]" +print("After", nf, "function evaluations, TNC returned:", tnc.RCSTRINGS[rc]) +print("x =", x) +print("exact value = [0, 1]") diff --git a/scipy/optimize/zeros.py b/scipy/optimize/zeros.py index 431a4c117fd6..a0a4828f36ba 100644 --- a/scipy/optimize/zeros.py +++ b/scipy/optimize/zeros.py @@ -1,7 +1,8 @@ +from __future__ import division, print_function, absolute_import import warnings -import _zeros +from . import _zeros from numpy import finfo, sign, sqrt _iter = 100 diff --git a/scipy/setup.py b/scipy/setup.py index e1b6266fb8cc..330d1250cc93 100644 --- a/scipy/setup.py +++ b/scipy/setup.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/setupscons.py b/scipy/setupscons.py index 250c60bf373f..9b677cb82673 100644 --- a/scipy/setupscons.py +++ b/scipy/setupscons.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from os.path import join as pjoin def configuration(parent_package='', top_path=None, setup_name='setupscons.py'): diff --git a/scipy/signal/__init__.py b/scipy/signal/__init__.py index a5c4ff693c31..85b213e5591a 100644 --- a/scipy/signal/__init__.py +++ b/scipy/signal/__init__.py @@ -220,26 +220,27 @@ lombscargle -- Computes the Lomb-Scargle periodogram """ +from __future__ import division, print_function, absolute_import -import sigtools -from waveforms import * +from . import sigtools +from .waveforms import * # The spline module (a C extension) provides: # cspline2d, qspline2d, sepfir2d, symiirord1, symiirord2 -from spline import * - -from bsplines import * -from cont2discrete import * -from dltisys import * -from filter_design import * -from fir_filter_design import * -from ltisys import * -from windows import * -from signaltools import * -from spectral import * -from wavelets import * -from _peak_finding import * - -__all__ = filter(lambda s: not s.startswith('_'), dir()) +from .spline import * + +from .bsplines import * +from .cont2discrete import * +from .dltisys import * +from .filter_design import * +from .fir_filter_design import * +from .ltisys import * +from .windows import * +from .signaltools import * +from .spectral import * +from .wavelets import * +from ._peak_finding import * + +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test diff --git a/scipy/signal/_arraytools.py b/scipy/signal/_arraytools.py index a9a34a2b12f4..429e9df986e1 100644 --- a/scipy/signal/_arraytools.py +++ b/scipy/signal/_arraytools.py @@ -1,6 +1,7 @@ """ Functions for acting on a axis of an array. """ +from __future__ import division, print_function, absolute_import import numpy as np diff --git a/scipy/signal/_peak_finding.py b/scipy/signal/_peak_finding.py index d3d946f06c3a..2f70d7070d67 100644 --- a/scipy/signal/_peak_finding.py +++ b/scipy/signal/_peak_finding.py @@ -1,9 +1,11 @@ """ Functions for identifying peaks in signals. """ +from __future__ import division, print_function, absolute_import import numpy as np +from scipy.lib.six.moves import xrange from scipy.signal.wavelets import cwt, ricker from scipy.stats import scoreatpercentile @@ -363,7 +365,7 @@ def filt_func(line): return False return True - return filter(filt_func, ridge_lines) + return list(filter(filt_func, ridge_lines)) def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None, gap_thresh=None, @@ -450,5 +452,5 @@ def find_peaks_cwt(vector, widths, wavelet=None, max_distances=None, gap_thresh= ridge_lines = _identify_ridge_lines(cwt_dat, max_distances, gap_thresh) filtered = _filter_ridge_lines(cwt_dat, ridge_lines, min_length=min_length, min_snr=min_snr, noise_perc=noise_perc) - max_locs = map(lambda x: x[1][0], filtered) + max_locs = [x[1][0] for x in filtered] return sorted(max_locs) diff --git a/scipy/signal/bsplines.py b/scipy/signal/bsplines.py index 512a28dd0cc0..d435ec0ee039 100644 --- a/scipy/signal/bsplines.py +++ b/scipy/signal/bsplines.py @@ -1,4 +1,6 @@ +from __future__ import division, print_function, absolute_import +from scipy.lib.six.moves import xrange import scipy.special from numpy import logical_and, asarray, pi, zeros_like, \ piecewise, array, arctan2, tan, zeros, arange, floor @@ -6,7 +8,7 @@ less_equal, greater_equal # From splinemodule.c -from spline import cspline2d, sepfir2d +from .spline import cspline2d, sepfir2d from scipy.misc import comb diff --git a/scipy/signal/cont2discrete.py b/scipy/signal/cont2discrete.py index 67a2721e6b75..2e7a4e705e82 100644 --- a/scipy/signal/cont2discrete.py +++ b/scipy/signal/cont2discrete.py @@ -1,6 +1,7 @@ """ Continuous to discrete transformations for state-space and transfer function. """ +from __future__ import division, print_function, absolute_import # Author: Jeffrey Armstrong # March 29, 2011 @@ -8,7 +9,7 @@ import numpy as np from scipy import linalg -from ltisys import tf2ss, ss2tf, zpk2ss, ss2zpk +from .ltisys import tf2ss, ss2tf, zpk2ss, ss2zpk __all__ = ['cont2discrete'] diff --git a/scipy/signal/dltisys.py b/scipy/signal/dltisys.py index 1ee8b3fa60d7..69c97a7669a4 100644 --- a/scipy/signal/dltisys.py +++ b/scipy/signal/dltisys.py @@ -4,10 +4,11 @@ # Author: Jeffrey Armstrong # April 4, 2011 +from __future__ import division, print_function, absolute_import import numpy as np from scipy.interpolate import interp1d -from ltisys import tf2ss, zpk2ss +from .ltisys import tf2ss, zpk2ss __all__ = ['dlsim', 'dstep', 'dimpulse'] diff --git a/scipy/signal/filter_design.py b/scipy/signal/filter_design.py index 6c2408891045..dbd82a3e5275 100644 --- a/scipy/signal/filter_design.py +++ b/scipy/signal/filter_design.py @@ -1,5 +1,6 @@ """Filter design. """ +from __future__ import division, print_function, absolute_import import types import warnings @@ -109,7 +110,7 @@ def freqs(b, a, worN=None, plot=None): """ if worN is None: w = findfreqs(b, a, 200) - elif isinstance(worN, types.IntType): + elif isinstance(worN, int): N = worN w = findfreqs(b, a, N) else: @@ -198,7 +199,7 @@ def freqz(b, a=1, worN=None, whole=0, plot=None): if worN is None: N = 512 w = numpy.linspace(0, lastpoint, N, endpoint=False) - elif isinstance(worN, types.IntType): + elif isinstance(worN, int): N = worN w = numpy.linspace(0, lastpoint, N, endpoint=False) else: @@ -1023,7 +1024,7 @@ def buttord(wp, ws, gpass, gstop, analog=False): W0 = nat / ((10 ** (0.1 * abs(gstop)) - 1) ** (1.0 / (2.0 * ord))) except ZeroDivisionError: W0 = nat - print "Warning, order is zero...check input parametegstop." + print("Warning, order is zero...check input parametegstop.") # now convert this frequency back from lowpass prototype # to the original analog filter diff --git a/scipy/signal/fir_filter_design.py b/scipy/signal/fir_filter_design.py index 775a4bf42102..dcf32d81d0fb 100644 --- a/scipy/signal/fir_filter_design.py +++ b/scipy/signal/fir_filter_design.py @@ -1,10 +1,11 @@ """Functions for FIR filter design.""" +from __future__ import division, print_function, absolute_import from math import ceil, log import numpy as np from numpy.fft import irfft from scipy.special import sinc -import sigtools +from . import sigtools __all__ = ['kaiser_beta', 'kaiser_atten', 'kaiserord', 'firwin', 'firwin2', 'remez'] @@ -281,7 +282,7 @@ def firwin(numtaps, cutoff, width=None, window='hamming', pass_zero=True, h -= left * sinc(left * m) # Get and apply the window function. - from signaltools import get_window + from .signaltools import get_window win = get_window(window, numtaps, fftbins=False) h *= win @@ -466,7 +467,7 @@ def firwin2(numtaps, freq, gain, nfreqs=None, window='hamming', nyq=1.0, antisym if window is not None: # Create the window to apply to the filter coefficients. - from signaltools import get_window + from .signaltools import get_window wind = get_window(window, numtaps, fftbins=False) else: wind = 1 diff --git a/scipy/signal/ltisys.py b/scipy/signal/ltisys.py index 5110a91208c0..e4cca958c14f 100644 --- a/scipy/signal/ltisys.py +++ b/scipy/signal/ltisys.py @@ -2,6 +2,7 @@ ltisys -- a collection of classes and functions for modeling linear time invariant systems. """ +from __future__ import division, print_function, absolute_import # # Author: Travis Oliphant 2001 @@ -10,13 +11,14 @@ # Rewrote lsim2 and added impulse2. # -from filter_design import tf2zpk, zpk2tf, normalize +from .filter_design import tf2zpk, zpk2tf, normalize import numpy from numpy import product, zeros, array, dot, transpose, ones, \ nan_to_num, zeros_like, linspace import scipy.interpolate as interpolate import scipy.integrate as integrate import scipy.linalg as linalg +from scipy.lib.six.moves import xrange from numpy import r_, eye, real, atleast_1d, atleast_2d, poly, \ squeeze, diag, asarray diff --git a/scipy/signal/setup.py b/scipy/signal/setup.py index 9b57cc2aa089..da1af77a5bfe 100755 --- a/scipy/signal/setup.py +++ b/scipy/signal/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='', top_path=None): diff --git a/scipy/signal/setupscons.py b/scipy/signal/setupscons.py index 0a3009c2f526..48224898f013 100755 --- a/scipy/signal/setupscons.py +++ b/scipy/signal/setupscons.py @@ -1,5 +1,5 @@ #!/usr/bin/env python - +from __future__ import division, print_function, absolute_import def configuration(parent_package='', top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/signal/signaltools.py b/scipy/signal/signaltools.py index 5b8baa0ec917..00dbf1bc9ef5 100644 --- a/scipy/signal/signaltools.py +++ b/scipy/signal/signaltools.py @@ -1,8 +1,10 @@ # Author: Travis Oliphant # 1999 -- 2002 +from __future__ import division, print_function, absolute_import -import sigtools +from . import sigtools +from scipy.lib.six import callable from scipy import linalg from scipy.fftpack import fft, ifft, ifftshift, fft2, ifft2, fftn, \ ifftn, fftfreq @@ -14,8 +16,8 @@ transpose, dot, mean, ndarray, atleast_2d import numpy as np from scipy.misc import factorial -from windows import get_window -from _arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext +from .windows import get_window +from ._arraytools import axis_slice, axis_reverse, odd_ext, even_ext, const_ext __all__ = ['correlate', 'fftconvolve', 'convolve', 'convolve2d', 'correlate2d', 'order_filter', 'medfilt', 'medfilt2d', 'wiener', 'lfilter', @@ -1288,7 +1290,7 @@ def detrend(data, axis=-1, type='linear', bp=0): # Put data back in original shape. tdshape = take(dshape, newdims, 0) ret = reshape(newdata, tuple(tdshape)) - vals = range(1, rnk) + vals = list(range(1, rnk)) olddims = vals[:axis] + [0] + vals[axis:] ret = transpose(ret, tuple(olddims)) return ret diff --git a/scipy/signal/spectral.py b/scipy/signal/spectral.py index 3378da40f5e5..fd6f691ca677 100644 --- a/scipy/signal/spectral.py +++ b/scipy/signal/spectral.py @@ -1,13 +1,17 @@ """Tools for spectral analysis. """ +from __future__ import division, print_function, absolute_import + import numpy as np from scipy import fftpack -import signaltools -from windows import get_window -from _spectral import lombscargle +from . import signaltools +from .windows import get_window +from ._spectral import lombscargle import warnings +from scipy.lib.six import string_types + __all__ = ['periodogram', 'welch', 'lombscargle'] @@ -274,7 +278,7 @@ def welch(x, fs=1.0, window='hanning', nperseg=256, noverlap=None, nfft=None, % (nperseg, axis, x.shape[axis], axis)) nperseg = x.shape[-1] - if isinstance(window, basestring) or type(window) is tuple: + if isinstance(window, string_types) or type(window) is tuple: win = get_window(window, nperseg) else: win = np.asarray(window) diff --git a/scipy/signal/tests/test_array_tools.py b/scipy/signal/tests/test_array_tools.py index 93c45db8377c..ff7e9ba9c3b2 100644 --- a/scipy/signal/tests/test_array_tools.py +++ b/scipy/signal/tests/test_array_tools.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import import numpy as np diff --git a/scipy/signal/tests/test_cont2discrete.py b/scipy/signal/tests/test_cont2discrete.py index 13429077d061..ad1aa9ffca3d 100644 --- a/scipy/signal/tests/test_cont2discrete.py +++ b/scipy/signal/tests/test_cont2discrete.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import TestCase, run_module_suite, \ assert_array_almost_equal, assert_almost_equal, \ diff --git a/scipy/signal/tests/test_dltisys.py b/scipy/signal/tests/test_dltisys.py index 22c601b5c4fc..9bfca23ca23b 100644 --- a/scipy/signal/tests/test_dltisys.py +++ b/scipy/signal/tests/test_dltisys.py @@ -1,7 +1,8 @@ - # Author: Jeffrey Armstrong # April 4, 2011 +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import TestCase, run_module_suite, assert_equal, \ assert_array_almost_equal, assert_array_equal, \ diff --git a/scipy/signal/tests/test_filter_design.py b/scipy/signal/tests/test_filter_design.py index 0cfdad546ae4..f7d120d36221 100644 --- a/scipy/signal/tests/test_filter_design.py +++ b/scipy/signal/tests/test_filter_design.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import warnings import numpy as np diff --git a/scipy/signal/tests/test_fir_filter_design.py b/scipy/signal/tests/test_fir_filter_design.py index 7cab1fa0c16c..6a1d48552c51 100644 --- a/scipy/signal/tests/test_fir_filter_design.py +++ b/scipy/signal/tests/test_fir_filter_design.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import TestCase, run_module_suite, assert_raises, \ diff --git a/scipy/signal/tests/test_ltisys.py b/scipy/signal/tests/test_ltisys.py index d9b2a2af86e0..9098eb5a9d7d 100644 --- a/scipy/signal/tests/test_ltisys.py +++ b/scipy/signal/tests/test_ltisys.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import import warnings diff --git a/scipy/signal/tests/test_peak_finding.py b/scipy/signal/tests/test_peak_finding.py index 7fa0c671c322..a89d2c251a74 100644 --- a/scipy/signal/tests/test_peak_finding.py +++ b/scipy/signal/tests/test_peak_finding.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import copy import numpy as np @@ -5,6 +7,7 @@ assert_almost_equal, assert_array_equal, assert_array_almost_equal, \ assert_raises, assert_ from scipy.signal._peak_finding import argrelmax, find_peaks_cwt, _identify_ridge_lines +from scipy.lib.six.moves import xrange def _gen_gaussians(center_locs, sigmas, total_length): @@ -241,4 +244,4 @@ def test_find_peaks_nopeak(self): np.testing.assert_equal(len(found_locs), 0) if __name__ == "__main__": - run_module_suite() \ No newline at end of file + run_module_suite() diff --git a/scipy/signal/tests/test_signaltools.py b/scipy/signal/tests/test_signaltools.py index da3aec2ee3ce..99e9b0cd3b6d 100644 --- a/scipy/signal/tests/test_signaltools.py +++ b/scipy/signal/tests/test_signaltools.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import from decimal import Decimal diff --git a/scipy/signal/tests/test_spectral.py b/scipy/signal/tests/test_spectral.py index db4bd4337903..828f0c9582fe 100644 --- a/scipy/signal/tests/test_spectral.py +++ b/scipy/signal/tests/test_spectral.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import warnings import numpy as np from numpy.testing import assert_raises, assert_approx_equal, \ diff --git a/scipy/signal/tests/test_waveforms.py b/scipy/signal/tests/test_waveforms.py index 7a00e5ad6ae0..51346d6e4897 100644 --- a/scipy/signal/tests/test_waveforms.py +++ b/scipy/signal/tests/test_waveforms.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import TestCase, assert_almost_equal, assert_equal, assert_, \ diff --git a/scipy/signal/tests/test_wavelets.py b/scipy/signal/tests/test_wavelets.py index 4167b65a89df..e9d6148d9ec0 100644 --- a/scipy/signal/tests/test_wavelets.py +++ b/scipy/signal/tests/test_wavelets.py @@ -1,8 +1,11 @@ -from __future__ import division +from __future__ import division, print_function, absolute_import + + import numpy as np from numpy.testing import TestCase, run_module_suite, assert_equal, \ assert_array_equal, assert_array_almost_equal, assert_array_less, assert_ +from scipy.lib.six.moves import xrange from scipy.signal import wavelets diff --git a/scipy/signal/tests/test_windows.py b/scipy/signal/tests/test_windows.py index 4dfd76173d17..0d6a184489c1 100644 --- a/scipy/signal/tests/test_windows.py +++ b/scipy/signal/tests/test_windows.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy import array, ones_like from numpy.testing import assert_array_almost_equal, assert_array_equal diff --git a/scipy/signal/waveforms.py b/scipy/signal/waveforms.py index 8edef8b34db9..61a45d8f9af5 100644 --- a/scipy/signal/waveforms.py +++ b/scipy/signal/waveforms.py @@ -4,6 +4,8 @@ # Feb. 2010: Updated by Warren Weckesser: # Rewrote much of chirp() # Added sweep_poly() +from __future__ import division, print_function, absolute_import + from numpy import asarray, zeros, place, nan, mod, pi, extract, log, sqrt, \ exp, cos, sin, polyval, polyint diff --git a/scipy/signal/wavelets.py b/scipy/signal/wavelets.py index f37b2bc3b325..4e76dc39937c 100644 --- a/scipy/signal/wavelets.py +++ b/scipy/signal/wavelets.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.dual import eig from scipy.misc import comb diff --git a/scipy/signal/windows.py b/scipy/signal/windows.py index 867994b6d3fb..4f2692fab397 100644 --- a/scipy/signal/windows.py +++ b/scipy/signal/windows.py @@ -1,4 +1,5 @@ """The suite of window functions.""" +from __future__ import division, print_function, absolute_import import numpy as np from scipy import special, linalg diff --git a/scipy/sparse/__init__.py b/scipy/sparse/__init__.py index 54b78159cf43..85ec06317c4e 100644 --- a/scipy/sparse/__init__.py +++ b/scipy/sparse/__init__.py @@ -172,27 +172,29 @@ """ +from __future__ import division, print_function, absolute_import + # Original code by Travis Oliphant. # Modified and extended by Ed Schofield, Robert Cimrman, # Nathan Bell, and Jake Vanderplas. -from base import * -from csr import * -from csc import * -from lil import * -from dok import * -from coo import * -from dia import * -from bsr import * -from construct import * -from extract import * +from .base import * +from .csr import * +from .csc import * +from .lil import * +from .dok import * +from .coo import * +from .dia import * +from .bsr import * +from .construct import * +from .extract import * # for backward compatibility with v0.10. This function is marked as deprecated -from csgraph import cs_graph_components +from .csgraph import cs_graph_components #from spfuncs import * -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test bench = Tester().bench diff --git a/scipy/sparse/base.py b/scipy/sparse/base.py index d5f919fca361..7c5a6ebdf9a6 100644 --- a/scipy/sparse/base.py +++ b/scipy/sparse/base.py @@ -1,13 +1,16 @@ """Base class for sparse matrices""" +from __future__ import division, print_function, absolute_import __all__ = ['spmatrix', 'isspmatrix', 'issparse', 'SparseWarning','SparseEfficiencyWarning'] +import sys from warnings import warn import numpy as np -from sputils import isdense, isscalarlike, isintlike +from scipy.lib.six.moves import xrange +from .sputils import isdense, isscalarlike, isintlike class SparseWarning(Warning): pass @@ -150,7 +153,7 @@ def __str__(self): # helper function, outputs "(i,j) v" def tostr(row,col,data): - triples = zip(zip(row,col),data) + triples = zip(list(zip(row,col)),data) return '\n'.join( [ (' %s\t%s' % t) for t in triples] ) if nnz > maxprint: @@ -164,8 +167,12 @@ def tostr(row,col,data): return out - def __nonzero__(self): # Simple -- other ideas? - return self.getnnz() > 0 + if sys.version_info[0] >= 3: + def __bool__(self): # Simple -- other ideas? + return self.getnnz() > 0 + else: + def __nonzero__(self): # Simple -- other ideas? + return self.getnnz() > 0 # What should len(sparse) return? For consistency with dense matrices, # perhaps it should be the number of rows? But for some uses the number of @@ -361,7 +368,7 @@ def __pow__(self, other): raise ValueError('exponent must be >= 0') if other == 0: - from construct import eye + from .construct import eye return eye( self.shape[0], dtype=self.dtype ) elif other == 1: return self.copy() @@ -440,7 +447,7 @@ def getcol(self, j): # Spmatrix subclasses should override this method for efficiency. # Post-multiply by a (n x 1) column vector 'a' containing all zeros # except for a_j = 1 - from csc import csc_matrix + from .csc import csc_matrix n = self.shape[1] if j < 0: j += n @@ -456,7 +463,7 @@ def getrow(self, i): # Spmatrix subclasses should override this method for efficiency. # Pre-multiply by a (1 x m) row vector 'a' containing all zeros # except for a_i = 1 - from csr import csr_matrix + from .csr import csr_matrix m = self.shape[0] if i < 0: i += m diff --git a/scipy/sparse/benchmarks/bench_sparse.py b/scipy/sparse/benchmarks/bench_sparse.py index 59c09640c9ac..e709a02fe99e 100644 --- a/scipy/sparse/benchmarks/bench_sparse.py +++ b/scipy/sparse/benchmarks/bench_sparse.py @@ -1,4 +1,5 @@ """general tests and simple benchmarks for the sparse module""" +from __future__ import division, print_function, absolute_import import time @@ -8,6 +9,7 @@ from numpy.testing import * from scipy import sparse +from scipy.lib.six.moves import xrange from scipy.sparse import csr_matrix, coo_matrix, dia_matrix, lil_matrix, \ dok_matrix @@ -51,25 +53,25 @@ def bench_arithmetic(self): matrices.append( ('A','Poisson5pt', poisson2d(250,format='csr')) ) matrices.append( ('B','Poisson5pt^2', poisson2d(250,format='csr')**2) ) - print - print ' Sparse Matrix Arithmetic' - print '====================================================================' - print ' var | name | shape | dtype | nnz ' - print '--------------------------------------------------------------------' + print() + print(' Sparse Matrix Arithmetic') + print('====================================================================') + print(' var | name | shape | dtype | nnz ') + print('--------------------------------------------------------------------') fmt = ' %1s | %14s | %20s | %9s | %8d ' for var,name,mat in matrices: name = name.center(14) shape = ("%s" % (mat.shape,)).center(20) dtype = mat.dtype.name.center(9) - print fmt % (var,name,shape,dtype,mat.nnz) + print(fmt % (var,name,shape,dtype,mat.nnz)) space = ' ' * 10 - print - print space+' Timings' - print space+'==========================================' - print space+' format | operation | time (msec) ' - print space+'------------------------------------------' + print() + print(space+' Timings') + print(space+'==========================================') + print(space+' format | operation | time (msec) ') + print(space+'------------------------------------------') fmt = space+' %3s | %17s | %7.1f ' for format in ['csr']: @@ -89,7 +91,7 @@ def bench_arithmetic(self): msec_per_it = 1000*(end - start)/float(iter) operation = (X + '.' + op + '(' + Y + ')').center(17) - print fmt % (format,operation,msec_per_it) + print(fmt % (format,operation,msec_per_it)) def bench_sort(self): @@ -101,11 +103,11 @@ def bench_sort(self): matrices.append( ('Rand100', 1e4, 100) ) matrices.append( ('Rand200', 1e4, 200) ) - print - print ' Sparse Matrix Index Sorting' - print '=====================================================================' - print ' type | name | shape | nnz | time (msec) ' - print '---------------------------------------------------------------------' + print() + print(' Sparse Matrix Index Sorting') + print('=====================================================================') + print(' type | name | shape | nnz | time (msec) ') + print('---------------------------------------------------------------------') fmt = ' %3s | %12s | %20s | %8d | %6.2f ' for name,N,K in matrices: @@ -124,7 +126,7 @@ def bench_sort(self): name = name.center(12) shape = ("%s" % (A.shape,)).center(20) - print fmt % (A.format,name,shape,A.nnz,1e3*(end-start)/float(iter) ) + print(fmt % (A.format,name,shape,A.nnz,1e3*(end-start)/float(iter) )) def bench_matvec(self): matrices = [] @@ -146,11 +148,11 @@ def bench_matvec(self): matrices.append( ('Block3x3', A.tocsr()) ) matrices.append( ('Block3x3', A) ) - print - print ' Sparse Matrix Vector Product' - print '==================================================================' - print ' type | name | shape | nnz | MFLOPs ' - print '------------------------------------------------------------------' + print() + print(' Sparse Matrix Vector Product') + print('==================================================================') + print(' type | name | shape | nnz | MFLOPs ') + print('------------------------------------------------------------------') fmt = ' %3s | %12s | %20s | %8d | %6.1f ' for name,A in matrices: @@ -171,7 +173,7 @@ def bench_matvec(self): shape = ("%s" % (A.shape,)).center(20) MFLOPs = (2*A.nnz*iter/(end-start))/float(1e6) - print fmt % (A.format,name,shape,A.nnz,MFLOPs) + print(fmt % (A.format,name,shape,A.nnz,MFLOPs)) def bench_matvecs(self): matrices = [] @@ -184,12 +186,12 @@ def bench_matvecs(self): n_vecs = 10 - print - print ' Sparse Matrix (Block) Vector Product' - print ' Blocksize = %d' % (n_vecs,) - print '==================================================================' - print ' type | name | shape | nnz | MFLOPs ' - print '------------------------------------------------------------------' + print() + print(' Sparse Matrix (Block) Vector Product') + print(' Blocksize = %d' % (n_vecs,)) + print('==================================================================') + print(' type | name | shape | nnz | MFLOPs ') + print('------------------------------------------------------------------') fmt = ' %3s | %12s | %20s | %8d | %6.1f ' for name,A in matrices: @@ -210,7 +212,7 @@ def bench_matvecs(self): shape = ("%s" % (A.shape,)).center(20) MFLOPs = (2*n_vecs*A.nnz*iter/(end-start))/float(1e6) - print fmt % (A.format,name,shape,A.nnz,MFLOPs) + print(fmt % (A.format,name,shape,A.nnz,MFLOPs)) def bench_construction(self): @@ -220,11 +222,11 @@ def bench_construction(self): matrices.append( ('Identity',sparse.eye(10000)) ) matrices.append( ('Poisson5pt', poisson2d(100)) ) - print - print ' Sparse Matrix Construction' - print '====================================================================' - print ' type | name | shape | nnz | time (sec) ' - print '--------------------------------------------------------------------' + print() + print(' Sparse Matrix Construction') + print('====================================================================') + print(' type | name | shape | nnz | time (sec) ') + print('--------------------------------------------------------------------') fmt = ' %3s | %12s | %20s | %8d | %6.4f ' for name,A in matrices: @@ -246,18 +248,18 @@ def bench_construction(self): name = name.center(12) shape = ("%s" % (A.shape,)).center(20) - print fmt % (format,name,shape,A.nnz,(end-start)/float(iter)) + print(fmt % (format,name,shape,A.nnz,(end-start)/float(iter))) def bench_conversion(self): A = poisson2d(100) formats = ['csr','csc','coo','dia','lil','dok'] - print - print ' Sparse Matrix Conversion' - print '====================================================================' - print ' format | tocsr() | tocsc() | tocoo() | todia() | tolil() | todok() ' - print '--------------------------------------------------------------------' + print() + print(' Sparse Matrix Conversion') + print('====================================================================') + print(' format | tocsr() | tocsc() | tocoo() | todia() | tolil() | todok() ') + print('--------------------------------------------------------------------') for fromfmt in formats: base = getattr(A,'to' + fromfmt)() @@ -286,7 +288,7 @@ def bench_conversion(self): output += '| n/a ' else: output += '| %5.1fms ' % (1000*t) - print output + print(output) #class TestLarge(TestCase): @@ -296,7 +298,7 @@ def bench_conversion(self): # #TODO move this out of Common since it doesn't use spmatrix # random.seed(0) # A = dok_matrix((100,100)) -# for k in range(100): +# for k in xrange(100): # i = random.randrange(100) # j = random.randrange(100) # A[i,j] = 1. diff --git a/scipy/sparse/bsr.py b/scipy/sparse/bsr.py index 55ad6fd10320..6529b40efb14 100644 --- a/scipy/sparse/bsr.py +++ b/scipy/sparse/bsr.py @@ -1,4 +1,6 @@ """Compressed Block Sparse Row matrix format""" +from __future__ import division, print_function, absolute_import + __docformat__ = "restructuredtext en" @@ -8,12 +10,12 @@ import numpy as np -from data import _data_matrix -from compressed import _cs_matrix -from base import isspmatrix, _formats -from sputils import isshape, getdtype, to_native, upcast -import sparsetools -from sparsetools import bsr_matvec, bsr_matvecs, csr_matmat_pass1, \ +from .data import _data_matrix +from .compressed import _cs_matrix +from .base import isspmatrix, _formats +from .sputils import isshape, getdtype, to_native, upcast +from . import sparsetools +from .sparsetools import bsr_matvec, bsr_matvecs, csr_matmat_pass1, \ bsr_matmat_pass2, bsr_transpose, bsr_sort_indices class bsr_matrix(_cs_matrix): @@ -146,7 +148,7 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None): elif len(arg1) == 2: # (data,(row,col)) format - from coo import coo_matrix + from .coo import coo_matrix self._set_self( coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize) ) elif len(arg1) == 3: @@ -164,7 +166,7 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None): except: raise ValueError("unrecognized form for" \ " %s_matrix constructor" % self.format) - from coo import coo_matrix + from .coo import coo_matrix arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize) self._set_self( arg1 ) @@ -245,8 +247,7 @@ def check_format(self, full_check=True): #check format validity (more expensive) if self.nnz > 0: if self.indices.max() >= N//C: - print "max index",self.indices.max() - raise ValueError("column index values must be < %d" % (N//C)) + raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max())) if self.indices.min() < 0: raise ValueError("column index values must be >= 0") if np.diff(self.indptr).min() < 0: @@ -347,7 +348,7 @@ def _mul_sparse_matrix(self, other): else: C = 1 - from csr import isspmatrix_csr + from .csr import isspmatrix_csr if isspmatrix_csr(other) and n == 1: other = other.tobsr(blocksize=(n,C), copy=False) #lightweight conversion @@ -420,7 +421,7 @@ def tocoo(self,copy=True): if copy: data = data.copy() - from coo import coo_matrix + from .coo import coo_matrix return coo_matrix((data,(row,col)), shape=self.shape) @@ -461,7 +462,7 @@ def eliminate_zeros(self): self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks] - from csr import csr_matrix + from .csr import csr_matrix # modifies self.indptr and self.indices *in place* proxy = csr_matrix((mask,self.indices,self.indptr),shape=(M//R,N//C)) diff --git a/scipy/sparse/compressed.py b/scipy/sparse/compressed.py index 33136e09ef5d..d750d79cf6d5 100644 --- a/scipy/sparse/compressed.py +++ b/scipy/sparse/compressed.py @@ -1,16 +1,18 @@ """Base class for sparse matrix formats using compressed storage """ +from __future__ import division, print_function, absolute_import __all__ = [] from warnings import warn import numpy as np +from scipy.lib.six.moves import xrange -from base import spmatrix, isspmatrix, SparseEfficiencyWarning -from data import _data_matrix -import sparsetools -from sputils import upcast, upcast_char, to_native, isdense, isshape, \ +from .base import spmatrix, isspmatrix, SparseEfficiencyWarning +from .data import _data_matrix +from . import sparsetools +from .sputils import upcast, upcast_char, to_native, isdense, isshape, \ getdtype, isscalarlike, isintlike @@ -40,7 +42,7 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False): else: if len(arg1) == 2: # (data, ij) format - from coo import coo_matrix + from .coo import coo_matrix other = self.__class__( coo_matrix(arg1, shape=shape) ) self._set_self( other ) elif len(arg1) == 3: @@ -60,7 +62,7 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False): except: raise ValueError("unrecognized %s_matrix constructor usage" % self.format) - from coo import coo_matrix + from .coo import coo_matrix self._set_self( self.__class__(coo_matrix(arg1, dtype=dtype)) ) # Read matrix dimensions given, if any @@ -551,7 +553,7 @@ def tocoo(self,copy=True): row,col = self._swap((major_indices,minor_indices)) - from coo import coo_matrix + from .coo import coo_matrix return coo_matrix((data,(row,col)), self.shape) def toarray(self, order=None, out=None): diff --git a/scipy/sparse/construct.py b/scipy/sparse/construct.py index 087815b336d5..cbef3c868303 100644 --- a/scipy/sparse/construct.py +++ b/scipy/sparse/construct.py @@ -1,5 +1,6 @@ """Functions to construct sparse matrices """ +from __future__ import division, print_function, absolute_import __docformat__ = "restructuredtext en" @@ -11,16 +12,16 @@ import numpy as np -from sputils import upcast +from .sputils import upcast -from csr import csr_matrix -from csc import csc_matrix -from bsr import bsr_matrix -from coo import coo_matrix -from lil import lil_matrix -from dia import dia_matrix +from .csr import csr_matrix +from .csc import csc_matrix +from .bsr import bsr_matrix +from .coo import coo_matrix +from .lil import lil_matrix +from .dia import dia_matrix -from base import issparse +from .base import issparse def spdiags(data, diags, m, n, format=None): """ @@ -141,7 +142,7 @@ def diags(diagonals, offsets, shape=None, format=None, dtype=None): else: raise ValueError("Different number of diagonals and offsets.") else: - diagonals = map(np.atleast_1d, diagonals) + diagonals = list(map(np.atleast_1d, diagonals)) offsets = np.atleast_1d(offsets) # Basic check @@ -640,7 +641,7 @@ def rand(m, n, density=0.01, format="coo", dtype=None): raise ValueError(msg % np.iinfo(tp).max) # Number of non zero values - k = long(density * m * n) + k = int(density * m * n) # Generate a few more values than k so that we can get unique values # afterwards. diff --git a/scipy/sparse/coo.py b/scipy/sparse/coo.py index 73eed22b1c27..e1afdeee400e 100644 --- a/scipy/sparse/coo.py +++ b/scipy/sparse/coo.py @@ -1,4 +1,5 @@ """ A sparse matrix in COOrdinate or 'triplet' format""" +from __future__ import division, print_function, absolute_import __docformat__ = "restructuredtext en" @@ -8,10 +9,12 @@ import numpy as np -from sparsetools import coo_tocsr, coo_todense, coo_matvec -from base import isspmatrix -from data import _data_matrix -from sputils import upcast, upcast_char, to_native, isshape, getdtype, isintlike +from scipy.lib.six.moves import zip as izip + +from .sparsetools import coo_tocsr, coo_todense, coo_matvec +from .base import isspmatrix +from .data import _data_matrix +from .sputils import upcast, upcast_char, to_native, isshape, getdtype, isintlike class coo_matrix(_data_matrix): """ @@ -261,7 +264,7 @@ def tocsc(self): [0, 0, 0, 1]]) """ - from csc import csc_matrix + from .csc import csc_matrix if self.nnz == 0: return csc_matrix(self.shape, dtype=self.dtype) else: @@ -299,7 +302,7 @@ def tocsr(self): [0, 0, 0, 1]]) """ - from csr import csr_matrix + from .csr import csr_matrix if self.nnz == 0: return csr_matrix(self.shape, dtype=self.dtype) else: @@ -324,7 +327,7 @@ def tocoo(self, copy=False): return self def todia(self): - from dia import dia_matrix + from .dia import dia_matrix ks = self.col - self.row #the diagonal for each nonzero diags = np.unique(ks) @@ -341,8 +344,7 @@ def todia(self): return dia_matrix((data,diags), shape=self.shape) def todok(self): - from itertools import izip - from dok import dok_matrix + from .dok import dok_matrix dok = dok_matrix((self.shape), dtype=self.dtype) diff --git a/scipy/sparse/csc.py b/scipy/sparse/csc.py index 2583c3f22c8c..a38003121b25 100644 --- a/scipy/sparse/csc.py +++ b/scipy/sparse/csc.py @@ -1,4 +1,5 @@ """Compressed Sparse Column matrix format""" +from __future__ import division, print_function, absolute_import __docformat__ = "restructuredtext en" @@ -7,11 +8,12 @@ from warnings import warn import numpy as np +from scipy.lib.six.moves import xrange -from sparsetools import csc_tocsr -from sputils import upcast, isintlike +from .sparsetools import csc_tocsr +from .sputils import upcast, isintlike -from compressed import _cs_matrix +from .compressed import _cs_matrix class csc_matrix(_cs_matrix): @@ -106,7 +108,7 @@ class csc_matrix(_cs_matrix): """ def transpose(self, copy=False): - from csr import csr_matrix + from .csr import csr_matrix M,N = self.shape return csr_matrix((self.data,self.indices,self.indptr),(N,M),copy=copy) @@ -131,7 +133,7 @@ def tocsr(self): self.indptr, self.indices, self.data, \ indptr, indices, data) - from csr import csr_matrix + from .csr import csr_matrix A = csr_matrix((data, indices, indptr), shape=self.shape) A.has_sorted_indices = True return A diff --git a/scipy/sparse/csgraph/__init__.py b/scipy/sparse/csgraph/__init__.py index 9afc183e763b..0df93976eb14 100644 --- a/scipy/sparse/csgraph/__init__.py +++ b/scipy/sparse/csgraph/__init__.py @@ -118,6 +118,8 @@ with non-edges indicated by zeros, infinities, and NaN entries. """ +from __future__ import division, print_function, absolute_import + __docformat__ = "restructuredtext en" __all__ = ['cs_graph_components', @@ -141,14 +143,14 @@ 'csgraph_to_masked', 'NegativeCycleError'] -from _components import cs_graph_components -from _laplacian import laplacian -from _shortest_path import shortest_path, floyd_warshall, dijkstra,\ +from ._components import cs_graph_components +from ._laplacian import laplacian +from ._shortest_path import shortest_path, floyd_warshall, dijkstra,\ bellman_ford, johnson, NegativeCycleError -from _traversal import breadth_first_order, depth_first_order, \ +from ._traversal import breadth_first_order, depth_first_order, \ breadth_first_tree, depth_first_tree, connected_components -from _min_spanning_tree import minimum_spanning_tree -from _tools import construct_dist_matrix, reconstruct_path,\ +from ._min_spanning_tree import minimum_spanning_tree +from ._tools import construct_dist_matrix, reconstruct_path,\ csgraph_from_dense, csgraph_to_dense, csgraph_masked_from_dense,\ csgraph_from_masked diff --git a/scipy/sparse/csgraph/_components.py b/scipy/sparse/csgraph/_components.py index 52065f36d100..2934385b83e2 100644 --- a/scipy/sparse/csgraph/_components.py +++ b/scipy/sparse/csgraph/_components.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.sparse.sparsetools import cs_graph_components as _cs_graph_components diff --git a/scipy/sparse/csgraph/_laplacian.py b/scipy/sparse/csgraph/_laplacian.py index 9120d4c7c589..7915eba9e80c 100644 --- a/scipy/sparse/csgraph/_laplacian.py +++ b/scipy/sparse/csgraph/_laplacian.py @@ -7,6 +7,8 @@ # Jake Vanderplas # License: BSD +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.sparse import isspmatrix, coo_matrix diff --git a/scipy/sparse/csgraph/_validation.py b/scipy/sparse/csgraph/_validation.py index a89959c34bb3..514ffbb03ad4 100644 --- a/scipy/sparse/csgraph/_validation.py +++ b/scipy/sparse/csgraph/_validation.py @@ -1,6 +1,8 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.sparse import csr_matrix, isspmatrix, isspmatrix_csc, isspmatrix_csr -from _tools import csgraph_to_dense, csgraph_from_dense,\ +from ._tools import csgraph_to_dense, csgraph_from_dense,\ csgraph_masked_from_dense, csgraph_from_masked DTYPE = np.float64 diff --git a/scipy/sparse/csgraph/setup.py b/scipy/sparse/csgraph/setup.py index 177285d3fbdc..adfe1c329af3 100644 --- a/scipy/sparse/csgraph/setup.py +++ b/scipy/sparse/csgraph/setup.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from os.path import join from numpy.distutils.system_info import get_info, get_standard_file, \ BlasNotFoundError diff --git a/scipy/sparse/csgraph/setupscons.py b/scipy/sparse/csgraph/setupscons.py index 92d966576f64..f58b6fed9063 100644 --- a/scipy/sparse/csgraph/setupscons.py +++ b/scipy/sparse/csgraph/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/sparse/csgraph/tests/test_connected_components.py b/scipy/sparse/csgraph/tests/test_connected_components.py index 87b11dc2d462..188d045dbe30 100644 --- a/scipy/sparse/csgraph/tests/test_connected_components.py +++ b/scipy/sparse/csgraph/tests/test_connected_components.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_, assert_array_almost_equal from scipy.sparse import csgraph diff --git a/scipy/sparse/csgraph/tests/test_conversions.py b/scipy/sparse/csgraph/tests/test_conversions.py index 07b57bace5a1..f4d4254ad587 100644 --- a/scipy/sparse/csgraph/tests/test_conversions.py +++ b/scipy/sparse/csgraph/tests/test_conversions.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_array_almost_equal, dec from scipy.sparse import csr_matrix diff --git a/scipy/sparse/csgraph/tests/test_graph_components.py b/scipy/sparse/csgraph/tests/test_graph_components.py index 1a2e24d9aa3b..d85a6c500de4 100644 --- a/scipy/sparse/csgraph/tests/test_graph_components.py +++ b/scipy/sparse/csgraph/tests/test_graph_components.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import warnings import numpy as np diff --git a/scipy/sparse/csgraph/tests/test_graph_laplacian.py b/scipy/sparse/csgraph/tests/test_graph_laplacian.py index 3e5a80cc7d7a..d323cf10c546 100644 --- a/scipy/sparse/csgraph/tests/test_graph_laplacian.py +++ b/scipy/sparse/csgraph/tests/test_graph_laplacian.py @@ -1,6 +1,7 @@ # Author: Gael Varoquaux # Jake Vanderplas # License: BSD +from __future__ import division, print_function, absolute_import import numpy as np from scipy import sparse diff --git a/scipy/sparse/csgraph/tests/test_shortest_path.py b/scipy/sparse/csgraph/tests/test_shortest_path.py index 5f54fbfe970f..d6d0d4af55d6 100644 --- a/scipy/sparse/csgraph/tests/test_shortest_path.py +++ b/scipy/sparse/csgraph/tests/test_shortest_path.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import \ assert_array_almost_equal, assert_raises, TestCase, dec diff --git a/scipy/sparse/csgraph/tests/test_spanning_tree.py b/scipy/sparse/csgraph/tests/test_spanning_tree.py index b804a71e1943..bcd9068e109d 100644 --- a/scipy/sparse/csgraph/tests/test_spanning_tree.py +++ b/scipy/sparse/csgraph/tests/test_spanning_tree.py @@ -1,4 +1,6 @@ """Test the minimum spanning tree function""" +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_ import numpy.testing as npt @@ -61,4 +63,4 @@ def test_minimum_spanning_tree(): expected[idx, idx+1] = 1 npt.assert_array_equal(mintree.todense(), expected, - 'Incorrect spanning tree found.') \ No newline at end of file + 'Incorrect spanning tree found.') diff --git a/scipy/sparse/csgraph/tests/test_traversal.py b/scipy/sparse/csgraph/tests/test_traversal.py index d0866bbd830a..05348f74e3d9 100644 --- a/scipy/sparse/csgraph/tests/test_traversal.py +++ b/scipy/sparse/csgraph/tests/test_traversal.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_array_almost_equal from scipy.sparse.csgraph import breadth_first_tree, depth_first_tree,\ diff --git a/scipy/sparse/csr.py b/scipy/sparse/csr.py index 7351b03ffc65..c536d0b87377 100644 --- a/scipy/sparse/csr.py +++ b/scipy/sparse/csr.py @@ -1,5 +1,7 @@ """Compressed Sparse Row matrix format""" +from __future__ import division, print_function, absolute_import + __docformat__ = "restructuredtext en" __all__ = ['csr_matrix', 'isspmatrix_csr'] @@ -8,13 +10,14 @@ from warnings import warn import numpy as np +from scipy.lib.six.moves import xrange -from sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \ +from .sparsetools import csr_tocsc, csr_tobsr, csr_count_blocks, \ get_csr_submatrix, csr_sample_values -from sputils import upcast, isintlike +from .sputils import upcast, isintlike -from compressed import _cs_matrix +from .compressed import _cs_matrix class csr_matrix(_cs_matrix): """ @@ -105,12 +108,12 @@ class csr_matrix(_cs_matrix): """ def transpose(self, copy=False): - from csc import csc_matrix + from .csc import csc_matrix M,N = self.shape return csc_matrix((self.data,self.indices,self.indptr), shape=(N,M), copy=copy) def tolil(self): - from lil import lil_matrix + from .lil import lil_matrix lil = lil_matrix(self.shape,dtype=self.dtype) self.sort_indices() #lil_matrix needs sorted column indices @@ -141,16 +144,16 @@ def tocsc(self): self.indptr, self.indices, self.data, \ indptr, indices, data) - from csc import csc_matrix + from .csc import csc_matrix A = csc_matrix((data, indices, indptr), shape=self.shape) A.has_sorted_indices = True return A def tobsr(self, blocksize=None, copy=True): - from bsr import bsr_matrix + from .bsr import bsr_matrix if blocksize is None: - from spfuncs import estimate_blocksize + from .spfuncs import estimate_blocksize return self.tobsr(blocksize=estimate_blocksize(self)) elif blocksize == (1,1): diff --git a/scipy/sparse/data.py b/scipy/sparse/data.py index 57cf8d262bbf..5fa637103b9e 100644 --- a/scipy/sparse/data.py +++ b/scipy/sparse/data.py @@ -6,12 +6,14 @@ """ +from __future__ import division, print_function, absolute_import + __all__ = [] import numpy as np -from base import spmatrix -from sputils import isscalarlike +from .base import spmatrix +from .sputils import isscalarlike #TODO implement all relevant operations diff --git a/scipy/sparse/dia.py b/scipy/sparse/dia.py index ed6a193545e7..95898f5d9b80 100644 --- a/scipy/sparse/dia.py +++ b/scipy/sparse/dia.py @@ -1,15 +1,17 @@ """Sparse DIAgonal format""" +from __future__ import division, print_function, absolute_import + __docformat__ = "restructuredtext en" __all__ = ['dia_matrix', 'isspmatrix_dia'] import numpy as np -from base import isspmatrix, _formats -from data import _data_matrix -from sputils import isshape, upcast, upcast_char, getdtype -from sparsetools import dia_matvec +from .base import isspmatrix, _formats +from .data import _data_matrix +from .sputils import isshape, upcast, upcast_char, getdtype +from .sparsetools import dia_matvec class dia_matrix(_data_matrix): """Sparse matrix with DIAgonal storage @@ -113,7 +115,7 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False): except: raise ValueError("unrecognized form for" \ " %s_matrix constructor" % self.format) - from coo import coo_matrix + from .coo import coo_matrix A = coo_matrix(arg1, dtype=dtype).todia() self.data = A.data self.offsets = A.offsets @@ -211,7 +213,7 @@ def tocoo(self): mask &= data != 0 row,col,data = row[mask],col[mask],data[mask] - from coo import coo_matrix + from .coo import coo_matrix return coo_matrix((data,(row,col)), shape=self.shape) # needed by _data_matrix diff --git a/scipy/sparse/dok.py b/scipy/sparse/dok.py index 291586d40be6..913bb582760c 100644 --- a/scipy/sparse/dok.py +++ b/scipy/sparse/dok.py @@ -1,15 +1,20 @@ """Dictionary Of Keys based matrix""" +from __future__ import division, print_function, absolute_import + __docformat__ = "restructuredtext en" __all__ = ['dok_matrix', 'isspmatrix_dok'] -from itertools import izip + import numpy as np -from base import spmatrix, isspmatrix -from sputils import isdense, getdtype, isshape, isintlike, isscalarlike, upcast +from scipy.lib.six.moves import zip as izip, xrange +from scipy.lib.six import iteritems + +from .base import spmatrix, isspmatrix +from .sputils import isdense, getdtype, isshape, isintlike, isscalarlike, upcast try: from operator import isSequenceType as _is_sequence @@ -97,7 +102,7 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False): if len(arg1.shape)!=2: raise TypeError('expected rank <=2 dense array or matrix') - from coo import coo_matrix + from .coo import coo_matrix self.update( coo_matrix(arg1, dtype=dtype).todok() ) self.shape = arg1.shape self.dtype = arg1.dtype @@ -239,7 +244,7 @@ def __setitem__(self, key, value): if np.isscalar(value): if value == 0: - if self.has_key((i,j)): + if (i,j) in self: del self[(i,j)] else: dict.__setitem__(self, (i,j), self.dtype.type(value)) @@ -391,14 +396,14 @@ def __neg__(self): def _mul_scalar(self, other): # Multiply this scalar by every element. new = dok_matrix(self.shape, dtype=self.dtype) - for (key, val) in self.iteritems(): + for (key, val) in iteritems(self): new[key] = val * other return new def _mul_vector(self, other): #matrix * vector result = np.zeros( self.shape[0], dtype=upcast(self.dtype,other.dtype) ) - for (i,j),v in self.iteritems(): + for (i,j),v in iteritems(self): result[i] += v * other[j] return result @@ -407,14 +412,14 @@ def _mul_multivector(self, other): M,N = self.shape n_vecs = other.shape[1] #number of column vectors result = np.zeros( (M,n_vecs), dtype=upcast(self.dtype,other.dtype) ) - for (i,j),v in self.iteritems(): + for (i,j),v in iteritems(self): result[i,:] += v * other[j,:] return result def __imul__(self, other): if isscalarlike(other): # Multiply this scalar by every element. - for (key, val) in self.iteritems(): + for (key, val) in iteritems(self): self[key] = val * other #new.dtype.char = self.dtype.char return self @@ -426,7 +431,7 @@ def __truediv__(self, other): if isscalarlike(other): new = dok_matrix(self.shape, dtype=self.dtype) # Multiply this scalar by every element. - for (key, val) in self.iteritems(): + for (key, val) in iteritems(self): new[key] = val / other #new.dtype.char = self.dtype.char return new @@ -437,7 +442,7 @@ def __truediv__(self, other): def __itruediv__(self, other): if isscalarlike(other): # Multiply this scalar by every element. - for (key, val) in self.iteritems(): + for (key, val) in iteritems(self): self[key] = val / other return self else: @@ -452,7 +457,7 @@ def transpose(self): """ M, N = self.shape new = dok_matrix((N, M), dtype=self.dtype) - for key, value in self.iteritems(): + for key, value in iteritems(self): new[key[1], key[0]] = value return new @@ -461,7 +466,7 @@ def conjtransp(self): """ M, N = self.shape new = dok_matrix((N, M), dtype=self.dtype) - for key, value in self.iteritems(): + for key, value in iteritems(self): new[key[1], key[0]] = np.conj(value) return new @@ -518,12 +523,12 @@ def split(self, cols_or_rows, columns=1): def tocoo(self): """ Return a copy of this matrix in COOrdinate format""" - from coo import coo_matrix + from .coo import coo_matrix if self.nnz == 0: return coo_matrix(self.shape, dtype=self.dtype) else: - data = np.asarray(self.values(), dtype=self.dtype) - indices = np.asarray(self.keys(), dtype=np.intc).T + data = np.asarray(list(self.values()), dtype=self.dtype) + indices = np.asarray(list(self.keys()), dtype=np.intc).T return coo_matrix((data,indices), shape=self.shape, dtype=self.dtype) def todok(self,copy=False): @@ -556,7 +561,7 @@ def resize(self, shape): M, N = self.shape if newM < M or newN < N: # Remove all elements outside new dimensions - for (i, j) in self.keys(): + for (i, j) in list(self.keys()): if i >= newM or j >= newN: del self[i, j] self._shape = shape diff --git a/scipy/sparse/extract.py b/scipy/sparse/extract.py index 2f24b76059a6..9cb3af431080 100644 --- a/scipy/sparse/extract.py +++ b/scipy/sparse/extract.py @@ -1,12 +1,14 @@ """Functions to extract parts of sparse matrices """ +from __future__ import division, print_function, absolute_import + __docformat__ = "restructuredtext en" __all__ = ['find', 'tril', 'triu'] -from coo import coo_matrix +from .coo import coo_matrix def find(A): """Return the indices and values of the nonzero elements of a matrix diff --git a/scipy/sparse/lil.py b/scipy/sparse/lil.py index 83c48d55ac60..79378349cc17 100644 --- a/scipy/sparse/lil.py +++ b/scipy/sparse/lil.py @@ -1,6 +1,8 @@ """LInked List sparse matrix class """ +from __future__ import division, print_function, absolute_import + __docformat__ = "restructuredtext en" __all__ = ['lil_matrix','isspmatrix_lil'] @@ -8,12 +10,13 @@ from bisect import bisect_left import numpy as np +from scipy.lib.six.moves import xrange -from base import spmatrix, isspmatrix -from sputils import getdtype, isshape, issequence, isscalarlike +from .base import spmatrix, isspmatrix +from .sputils import getdtype, isshape, issequence, isscalarlike from warnings import warn -from base import SparseEfficiencyWarning +from .base import SparseEfficiencyWarning class lil_matrix(spmatrix): @@ -116,7 +119,7 @@ def __init__(self, arg1, shape=None, dtype=None, copy=False): except TypeError: raise TypeError('unsupported matrix type') else: - from csr import csr_matrix + from .csr import csr_matrix A = csr_matrix(A, dtype=dtype).tolil() self.shape = A.shape @@ -210,7 +213,7 @@ def _slicetoseq(self, j, shape): stop = shape else: stop = j.stop - j = range(start, stop, j.step or 1) + j = list(range(start, stop, j.step or 1)) return j @@ -467,7 +470,7 @@ def tocsr(self): data.extend(x) data = np.asarray(data, dtype=self.dtype) - from csr import csr_matrix + from .csr import csr_matrix return csr_matrix((data, indices, indptr), shape=self.shape) def tocsc(self): diff --git a/scipy/sparse/linalg/__init__.py b/scipy/sparse/linalg/__init__.py index ee35193adf38..a3a41a85c23f 100644 --- a/scipy/sparse/linalg/__init__.py +++ b/scipy/sparse/linalg/__init__.py @@ -95,14 +95,15 @@ """ +from __future__ import division, print_function, absolute_import -from isolve import * -from dsolve import * -from interface import * -from eigen import * -from matfuncs import * +from .isolve import * +from .dsolve import * +from .interface import * +from .eigen import * +from .matfuncs import * -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test bench = Tester().bench diff --git a/scipy/sparse/linalg/dsolve/__init__.py b/scipy/sparse/linalg/dsolve/__init__.py index 2a26cbb55a6b..e5566e658d42 100644 --- a/scipy/sparse/linalg/dsolve/__init__.py +++ b/scipy/sparse/linalg/dsolve/__init__.py @@ -51,12 +51,14 @@ """ +from __future__ import division, print_function, absolute_import + #import umfpack #__doc__ = '\n\n'.join( (__doc__, umfpack.__doc__) ) #del umfpack -from linsolve import * +from .linsolve import * -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test diff --git a/scipy/sparse/linalg/dsolve/linsolve.py b/scipy/sparse/linalg/dsolve/linsolve.py index af6467ed769a..2ff90c0c0a3f 100644 --- a/scipy/sparse/linalg/dsolve/linsolve.py +++ b/scipy/sparse/linalg/dsolve/linsolve.py @@ -1,16 +1,18 @@ +from __future__ import division, print_function, absolute_import + from warnings import warn from numpy import asarray, empty, where, squeeze, prod from scipy.sparse import isspmatrix_csc, isspmatrix_csr, isspmatrix, \ SparseEfficiencyWarning, csc_matrix -import _superlu +from . import _superlu noScikit = False try: import scikits.umfpack as umfpack except ImportError: - import umfpack + from . import umfpack noScikit = True isUmfpack = hasattr( umfpack, 'UMFPACK_OK' ) diff --git a/scipy/sparse/linalg/dsolve/setup.py b/scipy/sparse/linalg/dsolve/setup.py index 0263932e80d3..0c5fa279153b 100755 --- a/scipy/sparse/linalg/dsolve/setup.py +++ b/scipy/sparse/linalg/dsolve/setup.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + from os.path import join, dirname import sys import os diff --git a/scipy/sparse/linalg/dsolve/setupscons.py b/scipy/sparse/linalg/dsolve/setupscons.py index 330d0c981eff..04df12575916 100755 --- a/scipy/sparse/linalg/dsolve/setupscons.py +++ b/scipy/sparse/linalg/dsolve/setupscons.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + from os.path import join import sys diff --git a/scipy/sparse/linalg/dsolve/tests/test_linsolve.py b/scipy/sparse/linalg/dsolve/tests/test_linsolve.py index bb1a1ec6011d..812550c4d85b 100644 --- a/scipy/sparse/linalg/dsolve/tests/test_linsolve.py +++ b/scipy/sparse/linalg/dsolve/tests/test_linsolve.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import warnings from numpy import array, finfo, arange, eye, all, unique, ones, dot, matrix diff --git a/scipy/sparse/linalg/dsolve/umfpack/__init__.py b/scipy/sparse/linalg/dsolve/umfpack/__init__.py index dd8764b41b70..c87ed3f7fdd0 100644 --- a/scipy/sparse/linalg/dsolve/umfpack/__init__.py +++ b/scipy/sparse/linalg/dsolve/umfpack/__init__.py @@ -166,11 +166,10 @@ :Other contributors: Nathan Bell (lu() method wrappers) """ +from __future__ import division, print_function, absolute_import +from .umfpack import * -from umfpack import * - - -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test diff --git a/scipy/sparse/linalg/dsolve/umfpack/setup.py b/scipy/sparse/linalg/dsolve/umfpack/setup.py index a7ac4891c65c..8fb10640b2f2 100644 --- a/scipy/sparse/linalg/dsolve/umfpack/setup.py +++ b/scipy/sparse/linalg/dsolve/umfpack/setup.py @@ -1,6 +1,8 @@ #!/usr/bin/env python # 05.12.2005, c # last change: 27.03.2006 +from __future__ import division, print_function, absolute_import + def configuration(parent_package='',top_path=None): import numpy from numpy.distutils.misc_util import Configuration diff --git a/scipy/sparse/linalg/dsolve/umfpack/setupscons.py b/scipy/sparse/linalg/dsolve/umfpack/setupscons.py index d44fe1487714..b2a67da58094 100644 --- a/scipy/sparse/linalg/dsolve/umfpack/setupscons.py +++ b/scipy/sparse/linalg/dsolve/umfpack/setupscons.py @@ -1,6 +1,8 @@ #!/usr/bin/env python # 05.12.2005, c # last change: 27.03.2006 +from __future__ import division, print_function, absolute_import + def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py b/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py index 6bf4b963e102..f28ed17f3ce7 100644 --- a/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py +++ b/scipy/sparse/linalg/dsolve/umfpack/tests/test_umfpack.py @@ -5,6 +5,8 @@ """ +from __future__ import division, print_function, absolute_import + import warnings import random diff --git a/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py b/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py index 6f7cd7acdb42..4b5a4803c985 100644 --- a/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py +++ b/scipy/sparse/linalg/dsolve/umfpack/tests/try_umfpack.py @@ -3,13 +3,17 @@ """Benchamrks for umfpack module""" +from __future__ import division, print_function, absolute_import + from optparse import OptionParser import time -import urllib +import urllib.request, urllib.parse, urllib.error import gzip import numpy as np +import sys + import scipy.sparse as sp import scipy.sparse.linalg.dsolve.umfpack as um import scipy.linalg as nla @@ -70,24 +74,27 @@ def readMatrix( matrixName, options ): if options.default_url: matrixName = defaultURL + matrixName - print 'url:', matrixName + print('url:', matrixName) if matrixName[:7] == 'http://': - fileName, status = urllib.urlretrieve( matrixName ) + if sys.version_info[0] >= 3: + fileName, status = urllib.request.urlretrieve( matrixName ) + else: + fileName, status = urllib.urlretrieve( matrixName ) ## print status else: fileName = matrixName - print 'file:', fileName + print('file:', fileName) try: readMatrix = formatMap[options.format] except: raise ValueError('unsupported format: %s' % options.format) - print 'format:', options.format + print('format:', options.format) - print 'reading...' + print('reading...') if fileName.endswith('.gz'): fd = gzip.open( fileName ) else: @@ -97,7 +104,7 @@ def readMatrix( matrixName, options ): fd.close() - print 'ok' + print('ok') return mtx @@ -132,7 +139,7 @@ def main(): legends = ['umfpack', 'sparse.solve'] for ii, matrixName in enumerate( matrixNames ): - print '*' * 50 + print('*' * 50) mtx = readMatrix( matrixName, options ) sizes.append( mtx.shape ) @@ -142,7 +149,7 @@ def main(): err = np.zeros( (2,2), dtype = np.double ) errors.append( err ) - print 'size : %s (%d nnz)' % (mtx.shape, mtx.nnz) + print('size : %s (%d nnz)' % (mtx.shape, mtx.nnz)) sol0 = np.ones( (mtx.shape[0],), dtype = np.double ) rhs = mtx * sol0 @@ -152,29 +159,29 @@ def main(): tt = time.clock() sol = umfpack( um.UMFPACK_A, mtx, rhs, autoTranspose = True ) tts[0] = time.clock() - tt - print "umfpack : %.2f s" % tts[0] + print("umfpack : %.2f s" % tts[0]) error = mtx * sol - rhs err[0,0] = nla.norm( error ) - print '||Ax-b|| :', err[0,0] + print('||Ax-b|| :', err[0,0]) error = sol0 - sol err[0,1] = nla.norm( error ) - print '||x - x_{exact}|| :', err[0,1] + print('||x - x_{exact}|| :', err[0,1]) if options.compare: tt = time.clock() sol = sp.solve( mtx, rhs ) tts[1] = time.clock() - tt - print "sparse.solve : %.2f s" % tts[1] + print("sparse.solve : %.2f s" % tts[1]) error = mtx * sol - rhs err[1,0] = nla.norm( error ) - print '||Ax-b|| :', err[1,0] + print('||Ax-b|| :', err[1,0]) error = sol0 - sol err[1,1] = nla.norm( error ) - print '||x - x_{exact}|| :', err[1,1] + print('||x - x_{exact}|| :', err[1,1]) if options.plot: try: @@ -182,18 +189,18 @@ def main(): except ImportError: raise ImportError("could not import pylab") times = np.array( times ) - print times + print(times) pylab.plot( times[:,0], 'b-o' ) if options.compare: pylab.plot( times[:,1], 'r-s' ) else: del legends[1] - print legends + print(legends) ax = pylab.axis() y2 = 0.5 * (ax[3] - ax[2]) - xrng = range( len( nnzs ) ) + xrng = list(range( len( nnzs ))) for ii in xrng: yy = y2 + 0.4 * (ax[3] - ax[2])\ * np.sin( ii * 2 * np.pi / (len( xrng ) - 1) ) diff --git a/scipy/sparse/linalg/dsolve/umfpack/umfpack.py b/scipy/sparse/linalg/dsolve/umfpack/umfpack.py index 2f03a2e14497..45b8e98f8380 100644 --- a/scipy/sparse/linalg/dsolve/umfpack/umfpack.py +++ b/scipy/sparse/linalg/dsolve/umfpack/umfpack.py @@ -5,13 +5,17 @@ Author: Robert Cimrman """ +from __future__ import division, print_function, absolute_import + import re import warnings +from scipy.lib.six import iteritems + import numpy as np import scipy.sparse as sp try: # Silence import error. - import _umfpack as _um + from . import _umfpack as _um except: _um = None @@ -229,7 +233,7 @@ def updateDictWithVars( adict, module, pattern, group = None ): UMFPACK_Aat : UMFPACK_A} ] -umfFamilyTypes = {'di' : int, 'dl' : long, 'zi' : int, 'zl' : long} +umfFamilyTypes = {'di' : int, 'dl' : int, 'zi' : int, 'zl' : int} umfRealTypes = ('di', 'dl') umfComplexTypes = ('zi', 'zl') @@ -245,7 +249,7 @@ def __init__( self, **kwargs ): # 08.03.2005 def __str__( self ): ss = "%s\n" % self.__class__ - for key, val in self.__dict__.iteritems(): + for key, val in iteritems(self.__dict__): if (issubclass( self.__dict__[key].__class__, Struct )): ss += " %s:\n %s\n" % (key, self.__dict__[key].__class__) else: diff --git a/scipy/sparse/linalg/eigen/__init__.py b/scipy/sparse/linalg/eigen/__init__.py index 685c750c95d3..e5f887d43042 100644 --- a/scipy/sparse/linalg/eigen/__init__.py +++ b/scipy/sparse/linalg/eigen/__init__.py @@ -6,11 +6,12 @@ 1. lobpcg: Locally Optimal Block Preconditioned Conjugate Gradient Method """ +from __future__ import division, print_function, absolute_import -from arpack import * -from lobpcg import * +from .arpack import * +from .lobpcg import * -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test bench = Tester().bench diff --git a/scipy/sparse/linalg/eigen/arpack/__init__.py b/scipy/sparse/linalg/eigen/arpack/__init__.py index 85190cd1a9b7..420bdc45dafa 100644 --- a/scipy/sparse/linalg/eigen/arpack/__init__.py +++ b/scipy/sparse/linalg/eigen/arpack/__init__.py @@ -17,6 +17,6 @@ Arnoldi Methods. SIAM, Philadelphia, PA, 1998. """ +from __future__ import division, print_function, absolute_import - -from arpack import * +from .arpack import * diff --git a/scipy/sparse/linalg/eigen/arpack/arpack.py b/scipy/sparse/linalg/eigen/arpack/arpack.py index f2d4aac4aa99..cc97539636b5 100644 --- a/scipy/sparse/linalg/eigen/arpack/arpack.py +++ b/scipy/sparse/linalg/eigen/arpack/arpack.py @@ -1,4 +1,3 @@ - """ Find a few eigenvectors and eigenvalues of a matrix. @@ -37,6 +36,8 @@ # ARPACK and handle shifted and shift-inverse computations # for eigenvalues by providing a shift (sigma) and a solver. +from __future__ import division, print_function, absolute_import + __docformat__ = "restructuredtext en" __all__ = ['eigs', 'eigsh', 'svds', 'ArpackError', 'ArpackNoConvergence'] @@ -44,7 +45,7 @@ import sys import warnings -import _arpack +from . import _arpack import numpy as np from scipy.sparse.linalg.interface import aslinearoperator, LinearOperator from scipy.sparse import eye, csc_matrix, csr_matrix, \ @@ -375,7 +376,7 @@ def _raise_no_convergence(self): num_iter = self.iparam[2] try: ev, vec = self.extract(True) - except ArpackError, err: + except ArpackError as err: msg = "%s [%s]" % (msg, err) ev = np.zeros((0,)) vec = np.zeros((self.n, 0)) diff --git a/scipy/sparse/linalg/eigen/arpack/setup.py b/scipy/sparse/linalg/eigen/arpack/setup.py index 4e63641feb30..9a00a80c62c5 100755 --- a/scipy/sparse/linalg/eigen/arpack/setup.py +++ b/scipy/sparse/linalg/eigen/arpack/setup.py @@ -1,4 +1,6 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + import re from os.path import join diff --git a/scipy/sparse/linalg/eigen/arpack/setupscons.py b/scipy/sparse/linalg/eigen/arpack/setupscons.py index 6257abbe6d46..b82dc8ede7f7 100755 --- a/scipy/sparse/linalg/eigen/arpack/setupscons.py +++ b/scipy/sparse/linalg/eigen/arpack/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py b/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py index 8e9d80856274..d7ecbf9a4ccd 100644 --- a/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py +++ b/scipy/sparse/linalg/eigen/arpack/tests/test_arpack.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + __usage__ = """ To run tests locally: python tests/test_arpack.py [-l] [-v] @@ -362,7 +364,7 @@ def test_symmetric_modes(): for typ in 'fd': for which in params.which: for mattype in params.mattypes: - for (sigma, modes) in params.sigmas_modes.iteritems(): + for (sigma, modes) in params.sigmas_modes.items(): for mode in modes: yield (eval_evec, symmetric, D, typ, k, which, None, sigma, mattype, None, mode) @@ -399,7 +401,7 @@ def test_symmetric_no_convergence(): try: w, v = eigsh(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol) raise AssertionError("Spurious no-error exit") - except ArpackNoConvergence, err: + except ArpackNoConvergence as err: k = len(err.eigenvalues) if k <= 0: raise AssertionError("Spurious no-eigenvalues-found case") @@ -415,7 +417,7 @@ def test_real_nonsymmetric_modes(): for typ in 'fd': for which in params.which: for mattype in params.mattypes: - for sigma, OPparts in params.sigmas_OPparts.iteritems(): + for sigma, OPparts in params.sigmas_OPparts.items(): for OPpart in OPparts: yield (eval_evec, symmetric, D, typ, k, which, None, sigma, mattype, OPpart) @@ -467,7 +469,7 @@ def test_standard_nonsymmetric_no_convergence(): try: w, v = eigs(m, 4, which='LM', v0=m[:, 0], maxiter=5, tol=tol) raise AssertionError("Spurious no-error exit") - except ArpackNoConvergence, err: + except ArpackNoConvergence as err: k = len(err.eigenvalues) if k <= 0: raise AssertionError("Spurious no-eigenvalues-found case") diff --git a/scipy/sparse/linalg/eigen/lobpcg/__init__.py b/scipy/sparse/linalg/eigen/lobpcg/__init__.py index 510f6e1c9b51..5bd15e0cca2c 100644 --- a/scipy/sparse/linalg/eigen/lobpcg/__init__.py +++ b/scipy/sparse/linalg/eigen/lobpcg/__init__.py @@ -99,9 +99,9 @@ http://www-math.cudenver.edu/~aknyazev/software/BLOPEX/ """ +from __future__ import division, print_function, absolute_import - -from lobpcg import * +from .lobpcg import * from numpy.testing import Tester test = Tester().test diff --git a/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py b/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py index d462c306b20a..9e02504d2198 100644 --- a/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py +++ b/scipy/sparse/linalg/eigen/lobpcg/lobpcg.py @@ -10,10 +10,14 @@ Examples in tests directory contributed by Nils Wagner. """ +from __future__ import division, print_function, absolute_import + import sys import numpy as np import scipy as sp +from scipy.lib.six.moves import xrange + from scipy.sparse.linalg import aslinearoperator, LinearOperator __all__ = ['lobpcg'] @@ -62,7 +66,7 @@ def symeig( mtxA, mtxB = None, eigenvectors = True, select = None ): return out[:-1] def pause(): - raw_input() + input() def save( ar, fileName ): from numpy import savetxt @@ -281,7 +285,7 @@ def lobpcg( A, X, aux += "%d constraints\n\n" % sizeY else: aux += "%d constraint\n\n" % sizeY - print aux + print(aux) ## # Apply constraints to X. @@ -340,7 +344,7 @@ def lobpcg( A, X, # Main iteration loop. for iterationNumber in xrange( maxIterations ): if verbosityLevel > 0: - print 'iteration %d' % iterationNumber + print('iteration %d' % iterationNumber) aux = blockVectorBX * _lambda[np.newaxis,:] blockVectorR = blockVectorAX - aux @@ -353,7 +357,7 @@ def lobpcg( A, X, ii = np.where( residualNorms > residualTolerance, True, False ) activeMask = activeMask & ii if verbosityLevel > 2: - print activeMask + print(activeMask) currentBlockSize = activeMask.sum() if currentBlockSize != previousBlockSize: @@ -365,11 +369,11 @@ def lobpcg( A, X, break if verbosityLevel > 0: - print 'current block size:', currentBlockSize - print 'eigenvalue:', _lambda - print 'residual norms:', residualNorms + print('current block size:', currentBlockSize) + print('eigenvalue:', _lambda) + print('residual norms:', residualNorms) if verbosityLevel > 10: - print eigBlockVector + print(eigBlockVector) activeBlockVectorR = as2d( blockVectorR[:,activeMask] ) @@ -433,13 +437,13 @@ def lobpcg( A, X, try: assert np.allclose( gramA.T, gramA ) except: - print gramA.T - gramA + print(gramA.T - gramA) raise try: assert np.allclose( gramB.T, gramB ) except: - print gramB.T - gramB + print(gramB.T - gramB) raise if verbosityLevel > 10: @@ -454,7 +458,7 @@ def lobpcg( A, X, if largest: ii = ii[::-1] if verbosityLevel > 10: - print ii + print(ii) _lambda = _lambda[ii].astype( np.float64 ) eigBlockVector = np.asarray( eigBlockVector[:,ii].astype( np.float64 ) ) @@ -462,7 +466,7 @@ def lobpcg( A, X, lambdaHistory.append( _lambda ) if verbosityLevel > 10: - print 'lambda:', _lambda + print('lambda:', _lambda) ## # Normalize eigenvectors! ## aux = np.sum( eigBlockVector.conjugate() * eigBlockVector, 0 ) ## eigVecNorms = np.sqrt( aux ) @@ -470,7 +474,7 @@ def lobpcg( A, X, # eigBlockVector, aux = b_orthonormalize( B, eigBlockVector ) if verbosityLevel > 10: - print eigBlockVector + print(eigBlockVector) pause() ## @@ -497,9 +501,9 @@ def lobpcg( A, X, bpp = sp.dot( activeBlockVectorBR, eigBlockVectorR ) if verbosityLevel > 10: - print pp - print app - print bpp + print(pp) + print(app) + print(bpp) pause() blockVectorX = sp.dot( blockVectorX, eigBlockVectorX ) + pp @@ -516,8 +520,8 @@ def lobpcg( A, X, if verbosityLevel > 0: - print 'final eigenvalue:', _lambda - print 'final residual norms:', residualNorms + print('final eigenvalue:', _lambda) + print('final residual norms:', residualNorms) if retLambdaHistory: if retResidualNormsHistory: @@ -569,7 +573,7 @@ def precond( x ): M = precond, residualTolerance = 1e-4, maxIterations = 40, largest = False, verbosityLevel = 1 ) - print 'solution time:', time.clock() - tt + print('solution time:', time.clock() - tt) - print vecs - print eigs + print(vecs) + print(eigs) diff --git a/scipy/sparse/linalg/eigen/lobpcg/setup.py b/scipy/sparse/linalg/eigen/lobpcg/setup.py index 97778f539463..e38a876e9fd8 100755 --- a/scipy/sparse/linalg/eigen/lobpcg/setup.py +++ b/scipy/sparse/linalg/eigen/lobpcg/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/sparse/linalg/eigen/lobpcg/setupscons.py b/scipy/sparse/linalg/eigen/lobpcg/setupscons.py index 97778f539463..e38a876e9fd8 100755 --- a/scipy/sparse/linalg/eigen/lobpcg/setupscons.py +++ b/scipy/sparse/linalg/eigen/lobpcg/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/sparse/linalg/eigen/lobpcg/tests/benchmark.py b/scipy/sparse/linalg/eigen/lobpcg/tests/benchmark.py index a47fa4373a59..b271ba27e192 100644 --- a/scipy/sparse/linalg/eigen/lobpcg/tests/benchmark.py +++ b/scipy/sparse/linalg/eigen/lobpcg/tests/benchmark.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from scipy import * from scipy.sparse.linalg import lobpcg from symeig import symeig @@ -32,7 +34,7 @@ def precond(x): data2=[] for n in N: - print '******', n + print('******', n) A,B = test(n) # Mikota pair X = rand(n,m) X = linalg.orth(X) @@ -43,18 +45,18 @@ def precond(x): residualTolerance = 1e-4, maxIterations = 40) data1.append(time.clock()-tt) eigs = sort(eigs) - print - print 'Results by LOBPCG' - print - print n,eigs + print() + print('Results by LOBPCG') + print() + print(n,eigs) tt = time.clock() w,v=symeig(A,B,range=(1,m)) data2.append(time.clock()-tt) - print - print 'Results by symeig' - print - print n, w + print() + print('Results by symeig') + print() + print(n, w) xlabel(r'Size $n$') ylabel(r'Elapsed time $t$') diff --git a/scipy/sparse/linalg/eigen/lobpcg/tests/large_scale.py b/scipy/sparse/linalg/eigen/lobpcg/tests/large_scale.py index 6392b16b87b9..d1d434bf5c5e 100644 --- a/scipy/sparse/linalg/eigen/lobpcg/tests/large_scale.py +++ b/scipy/sparse/linalg/eigen/lobpcg/tests/large_scale.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from scipy import array, arange, ones, sort, cos, pi, rand, \ set_printoptions, r_ from scipy.sparse.linalg import lobpcg @@ -35,15 +37,15 @@ def sakurai(n): tt = time.clock() eigs,vecs, resnh = lobpcg(X,A,B, residualTolerance = 1e-6, maxIterations =500, retResidualNormsHistory=1) data.append(time.clock()-tt) -print 'Results by LOBPCG for n='+str(n) -print -print eigs -print -print 'Exact eigenvalues' -print -print w_ex[:m] -print -print 'Elapsed time',data[0] +print('Results by LOBPCG for n='+str(n)) +print() +print(eigs) +print() +print('Exact eigenvalues') +print() +print(w_ex[:m]) +print() +print('Elapsed time',data[0]) loglog(arange(1,n+1),w_ex,'b.') xlabel(r'Number $i$') ylabel(r'$\lambda_i$') diff --git a/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py b/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py index 40c68f04a38c..adcf74f007a6 100644 --- a/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py +++ b/scipy/sparse/linalg/eigen/lobpcg/tests/test_lobpcg.py @@ -1,6 +1,7 @@ #!/usr/bin/env python """ Test functions for the sparse.linalg.eigen.lobpcg module """ +from __future__ import division, print_function, absolute_import import numpy from numpy.testing import assert_almost_equal, run_module_suite diff --git a/scipy/sparse/linalg/eigen/setup.py b/scipy/sparse/linalg/eigen/setup.py index f5938d14ecad..77dc2f1161e0 100755 --- a/scipy/sparse/linalg/eigen/setup.py +++ b/scipy/sparse/linalg/eigen/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/sparse/linalg/eigen/setupscons.py b/scipy/sparse/linalg/eigen/setupscons.py index bd10842a5fcb..fd35e47e0a7c 100755 --- a/scipy/sparse/linalg/eigen/setupscons.py +++ b/scipy/sparse/linalg/eigen/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/sparse/linalg/interface.py b/scipy/sparse/linalg/interface.py index 687136e5aec4..0dfee4ad0302 100644 --- a/scipy/sparse/linalg/interface.py +++ b/scipy/sparse/linalg/interface.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.sparse.sputils import isshape from scipy.sparse import isspmatrix diff --git a/scipy/sparse/linalg/isolve/__init__.py b/scipy/sparse/linalg/isolve/__init__.py index a7bc40913a67..1ad1685dbfc6 100644 --- a/scipy/sparse/linalg/isolve/__init__.py +++ b/scipy/sparse/linalg/isolve/__init__.py @@ -1,13 +1,15 @@ "Iterative Solvers for Sparse Linear Systems" +from __future__ import division, print_function, absolute_import + #from info import __doc__ -from iterative import * -from minres import minres -from lgmres import lgmres -from lsqr import lsqr -from lsmr import lsmr +from .iterative import * +from .minres import minres +from .lgmres import lgmres +from .lsqr import lsqr +from .lsmr import lsmr -__all__ = filter(lambda s:not s.startswith('_'),dir()) +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.testing import Tester test = Tester().test bench = Tester().bench diff --git a/scipy/sparse/linalg/isolve/iterative.py b/scipy/sparse/linalg/isolve/iterative.py index 59c127658a8e..9ba5c74adb1b 100644 --- a/scipy/sparse/linalg/isolve/iterative.py +++ b/scipy/sparse/linalg/isolve/iterative.py @@ -1,13 +1,15 @@ """Iterative methods for solving linear systems""" +from __future__ import division, print_function, absolute_import + __all__ = ['bicg','bicgstab','cg','cgs','gmres','qmr'] -import _iterative +from . import _iterative import numpy as np from scipy.sparse.linalg.interface import LinearOperator from scipy.lib.decorator import decorator -from utils import make_system +from .utils import make_system _type_conv = {'f':'s', 'd':'d', 'F':'c', 'D':'z'} diff --git a/scipy/sparse/linalg/isolve/iterative/test.py b/scipy/sparse/linalg/isolve/iterative/test.py index b2160d7dfb95..7f572758ab7d 100644 --- a/scipy/sparse/linalg/isolve/iterative/test.py +++ b/scipy/sparse/linalg/isolve/iterative/test.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from scipy import * from iterative import * @@ -108,15 +110,15 @@ def test_psolveq_t(x,b,which,n): #zx,ziter,zresid,zinfor = zqmr(zb,zx,ziter,zresid,test_fun,test_fun_t,test_psolveq,test_psolveq_t,(zA,n),(zA,n),(n,),(n,)) -print -print '**************** double *****************' -print 'iter:',diter, 'resid:', dresid, 'info:',dinfor -print 'x=',dx -print '*****************************************' -print -print -print '**************** complex ****************' -print 'iter:',ziter, 'resid:',zresid, 'info:',zinfor -print 'x=',zx -print '*****************************************' -print +print() +print('**************** double *****************') +print('iter:',diter, 'resid:', dresid, 'info:',dinfor) +print('x=',dx) +print('*****************************************') +print() +print() +print('**************** complex ****************') +print('iter:',ziter, 'resid:',zresid, 'info:',zinfor) +print('x=',zx) +print('*****************************************') +print() diff --git a/scipy/sparse/linalg/isolve/lgmres.py b/scipy/sparse/linalg/isolve/lgmres.py index 8ad96315bc84..c914271de051 100644 --- a/scipy/sparse/linalg/isolve/lgmres.py +++ b/scipy/sparse/linalg/isolve/lgmres.py @@ -1,9 +1,12 @@ # Copyright (C) 2009, Pauli Virtanen # Distributed under the same license as Scipy. +from __future__ import division, print_function, absolute_import + import numpy as np +from scipy.lib.six.moves import xrange from scipy.linalg import get_blas_funcs -from utils import make_system +from .utils import make_system __all__ = ['lgmres'] diff --git a/scipy/sparse/linalg/isolve/lsmr.py b/scipy/sparse/linalg/isolve/lsmr.py index 03ced68440fe..f7119fd36878 100644 --- a/scipy/sparse/linalg/isolve/lsmr.py +++ b/scipy/sparse/linalg/isolve/lsmr.py @@ -16,6 +16,8 @@ """ +from __future__ import division, print_function, absolute_import + __all__ = ['lsmr'] from numpy import zeros, infty @@ -23,7 +25,7 @@ from math import sqrt from scipy.sparse.linalg.interface import aslinearoperator -from lsqr import _sym_ortho +from .lsqr import _sym_ortho def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, maxiter=None, show=False): @@ -155,12 +157,12 @@ def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, maxiter = minDim if show: - print ' ' - print 'LSMR Least-squares solution of Ax = b\n' - print 'The matrix A has %8g rows and %8g cols' % (m, n) - print 'damp = %20.14e\n' % (damp) - print 'atol = %8.2e conlim = %8.2e\n' % (atol, conlim) - print 'btol = %8.2e maxiter = %8g\n' % (btol, maxiter) + print(' ') + print('LSMR Least-squares solution of Ax = b\n') + print('The matrix A has %8g rows and %8g cols' % (m, n)) + print('damp = %20.14e\n' % (damp)) + print('atol = %8.2e conlim = %8.2e\n' % (atol, conlim)) + print('btol = %8.2e maxiter = %8g\n' % (btol, maxiter)) u = b beta = norm(u) @@ -223,18 +225,18 @@ def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, normar = alpha * beta if normar == 0: if show: - print msg[0] + print(msg[0]) return x, istop, itn, normr, normar, normA, condA, normx if show: - print ' ' - print hdg1, hdg2 + print(' ') + print(hdg1, hdg2) test1 = 1 test2 = alpha / beta str1 = '%6g %12.5e' % (itn, x[0]) str2 = ' %10.3e %10.3e' % (normr, normar) str3 = ' %8.1e %8.1e' % (test1, test2) - print ''.join([str1, str2, str3]) + print(''.join([str1, str2, str3])) # Main iteration loop. while itn < maxiter: @@ -374,14 +376,14 @@ def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, if pcount >= pfreq: pcount = 0 - print ' ' - print hdg1, hdg2 + print(' ') + print(hdg1, hdg2) pcount = pcount + 1 str1 = '%6g %12.5e' % (itn, x[0]) str2 = ' %10.3e %10.3e' % (normr, normar) str3 = ' %8.1e %8.1e' % (test1, test2) str4 = ' %8.1e %8.1e' % (normA, condA) - print ''.join([str1, str2, str3, str4]) + print(''.join([str1, str2, str3, str4])) if istop > 0: break @@ -389,14 +391,14 @@ def lsmr(A, b, damp=0.0, atol=1e-6, btol=1e-6, conlim=1e8, # Print the stopping condition. if show: - print ' ' - print 'LSMR finished' - print msg[istop] - print 'istop =%8g normr =%8.1e' % (istop, normr) - print ' normA =%8.1e normAr =%8.1e' % (normA, normar) - print 'itn =%8g condA =%8.1e' % (itn, condA) - print ' normx =%8.1e' % (normx) - print str1, str2 - print str3, str4 + print(' ') + print('LSMR finished') + print(msg[istop]) + print('istop =%8g normr =%8.1e' % (istop, normr)) + print(' normA =%8.1e normAr =%8.1e' % (normA, normar)) + print('itn =%8g condA =%8.1e' % (itn, condA)) + print(' normx =%8.1e' % (normx)) + print(str1, str2) + print(str3, str4) return x, istop, itn, normr, normar, normA, condA, normx diff --git a/scipy/sparse/linalg/isolve/lsqr.py b/scipy/sparse/linalg/isolve/lsqr.py index 5450cef32489..9febb3892010 100644 --- a/scipy/sparse/linalg/isolve/lsqr.py +++ b/scipy/sparse/linalg/isolve/lsqr.py @@ -49,6 +49,8 @@ """ +from __future__ import division, print_function, absolute_import + __all__ = ['lsqr'] import numpy as np @@ -259,16 +261,16 @@ def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8, 'The iteration limit has been reached '); if show: - print ' ' - print 'LSQR Least-squares solution of Ax = b' + print(' ') + print('LSQR Least-squares solution of Ax = b') str1 = 'The matrix A has %8g rows and %8g cols' % (m, n) str2 = 'damp = %20.14e calc_var = %8g' % (damp, calc_var) str3 = 'atol = %8.2e conlim = %8.2e'%( atol, conlim) str4 = 'btol = %8.2e iter_lim = %8g' %( btol, iter_lim) - print str1 - print str2 - print str3 - print str4 + print(str1) + print(str2) + print(str3) + print(str4) itn = 0 istop = 0 @@ -319,20 +321,20 @@ def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8, # there was an error on return when arnorm==0 arnorm = alfa * beta if arnorm == 0: - print msg[0]; + print(msg[0]); return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var head1 = ' Itn x[0] r1norm r2norm '; head2 = ' Compatible LS Norm A Cond A'; if show: - print ' ' - print head1, head2 + print(' ') + print(head1, head2) test1 = 1; test2 = alfa / beta; str1 = '%6g %12.5e' %( itn, x[0] ); str2 = ' %10.3e %10.3e'%( r1norm, r2norm ); str3 = ' %8.1e %8.1e' %( test1, test2 ); - print str1, str2, str3 + print(str1, str2, str3) # Main iteration loop. while itn < iter_lim: @@ -460,23 +462,23 @@ def lsqr(A, b, damp=0.0, atol=1e-8, btol=1e-8, conlim=1e8, str2 = ' %10.3e %10.3e' % (r1norm, r2norm) str3 = ' %8.1e %8.1e' % (test1, test2) str4 = ' %8.1e %8.1e' % (anorm, acond) - print str1, str2, str3, str4 + print(str1, str2, str3, str4) if istop != 0: break # End of iteration loop. # Print the stopping condition. if show: - print ' ' - print 'LSQR finished' - print msg[istop] - print ' ' + print(' ') + print('LSQR finished') + print(msg[istop]) + print(' ') str1 = 'istop =%8g r1norm =%8.1e' % (istop, r1norm) str2 = 'anorm =%8.1e arnorm =%8.1e' % (anorm, arnorm) str3 = 'itn =%8g r2norm =%8.1e' % (itn, r2norm) str4 = 'acond =%8.1e xnorm =%8.1e' % (acond, xnorm) - print str1+ ' ' + str2 - print str3+ ' ' + str4 - print ' ' + print(str1+ ' ' + str2) + print(str3+ ' ' + str4) + print(' ') return x, istop, itn, r1norm, r2norm, anorm, acond, arnorm, xnorm, var diff --git a/scipy/sparse/linalg/isolve/minres.py b/scipy/sparse/linalg/isolve/minres.py index 2fc440989386..ac930a0f16e5 100644 --- a/scipy/sparse/linalg/isolve/minres.py +++ b/scipy/sparse/linalg/isolve/minres.py @@ -1,8 +1,10 @@ +from __future__ import division, print_function, absolute_import + from numpy import sqrt, inner, finfo, zeros from numpy.linalg import norm -from utils import make_system -from iterative import set_docstring +from .utils import make_system +from .iterative import set_docstring __all__ = ['minres'] @@ -68,10 +70,10 @@ def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None, if show: - print first + 'Solution of symmetric Ax = b' - print first + 'n = %3g shift = %23.14e' % (n,shift) - print first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol) - print + print(first + 'Solution of symmetric Ax = b') + print(first + 'n = %3g shift = %23.14e' % (n,shift)) + print(first + 'itnlim = %3g rtol = %11.2e' % (maxiter,tol)) + print() istop = 0; itn = 0; Anorm = 0; Acond = 0; rnorm = 0; ynorm = 0; @@ -133,9 +135,9 @@ def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None, r2 = r1 if show: - print - print - print ' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|' + print() + print() + print(' Itn x(1) Compatible LS norm(A) cond(A) gbar/|A|') while itn < maxiter: itn += 1 @@ -264,9 +266,9 @@ def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None, str2 = ' %10.3e' % (test2,) str3 = ' %8.1e %8.1e %8.1e' % (Anorm, Acond, gbar/Anorm) - print str1 + str2 + str3 + print(str1 + str2 + str3) - if itn % 10 == 0: print + if itn % 10 == 0: print() if callback is not None: callback(x) @@ -275,12 +277,12 @@ def minres(A, b, x0=None, shift=0.0, tol=1e-5, maxiter=None, xtype=None, if show: - print - print last + ' istop = %3g itn =%5g' % (istop,itn) - print last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond) - print last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm) - print last + ' Arnorm = %12.4e' % (Arnorm,) - print last + msg[istop+1] + print() + print(last + ' istop = %3g itn =%5g' % (istop,itn)) + print(last + ' Anorm = %12.4e Acond = %12.4e' % (Anorm,Acond)) + print(last + ' rnorm = %12.4e ynorm = %12.4e' % (rnorm,ynorm)) + print(last + ' Arnorm = %12.4e' % (Arnorm,)) + print(last + msg[istop+1]) if istop == 6: info = maxiter diff --git a/scipy/sparse/linalg/isolve/setup.py b/scipy/sparse/linalg/isolve/setup.py index 29a94b2b058f..6a7a3c615c8a 100755 --- a/scipy/sparse/linalg/isolve/setup.py +++ b/scipy/sparse/linalg/isolve/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import import os import sys diff --git a/scipy/sparse/linalg/isolve/setupscons.py b/scipy/sparse/linalg/isolve/setupscons.py index 360d30240294..3a411aa902d2 100755 --- a/scipy/sparse/linalg/isolve/setupscons.py +++ b/scipy/sparse/linalg/isolve/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/sparse/linalg/isolve/tests/demo_lgmres.py b/scipy/sparse/linalg/isolve/tests/demo_lgmres.py index 8d9bd8ab8c3a..3a84d0158cc0 100644 --- a/scipy/sparse/linalg/isolve/tests/demo_lgmres.py +++ b/scipy/sparse/linalg/isolve/tests/demo_lgmres.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import scipy.sparse.linalg as la import scipy.sparse as sp import scipy.io as io @@ -27,31 +29,31 @@ def matvec(v): M = 100 -print "MatrixMarket problem %s" % problem -print "Invert %d x %d matrix; nnz = %d" % (Am.shape[0], Am.shape[1], Am.nnz) +print("MatrixMarket problem %s" % problem) +print("Invert %d x %d matrix; nnz = %d" % (Am.shape[0], Am.shape[1], Am.nnz)) count[0] = 0 x0, info = la.gmres(A, b, restrt=M, tol=1e-14) count_0 = count[0] err0 = np.linalg.norm(Am*x0 - b) / np.linalg.norm(b) -print "GMRES(%d):" % M, count_0, "matvecs, residual", err0 +print("GMRES(%d):" % M, count_0, "matvecs, residual", err0) if info != 0: - print "Didn't converge" + print("Didn't converge") count[0] = 0 x1, info = la.lgmres(A, b, inner_m=M-6*2, outer_k=6, tol=1e-14) count_1 = count[0] err1 = np.linalg.norm(Am*x1 - b) / np.linalg.norm(b) -print "LGMRES(%d,6) [same memory req.]:" % (M-2*6), count_1, \ - "matvecs, residual:", err1 +print("LGMRES(%d,6) [same memory req.]:" % (M-2*6), count_1, \ + "matvecs, residual:", err1) if info != 0: - print "Didn't converge" + print("Didn't converge") count[0] = 0 x2, info = la.lgmres(A, b, inner_m=M-6, outer_k=6, tol=1e-14) count_2 = count[0] err2 = np.linalg.norm(Am*x2 - b) / np.linalg.norm(b) -print "LGMRES(%d,6) [same subspace size]:" % (M-6), count_2, \ - "matvecs, residual:", err2 +print("LGMRES(%d,6) [same subspace size]:" % (M-6), count_2, \ + "matvecs, residual:", err2) if info != 0: - print "Didn't converge" + print("Didn't converge") diff --git a/scipy/sparse/linalg/isolve/tests/test_iterative.py b/scipy/sparse/linalg/isolve/tests/test_iterative.py index 5c51fbc663bf..03a9825e1e57 100644 --- a/scipy/sparse/linalg/isolve/tests/test_iterative.py +++ b/scipy/sparse/linalg/isolve/tests/test_iterative.py @@ -2,6 +2,8 @@ """ Test functions for the sparse.linalg.isolve module """ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import TestCase, assert_equal, assert_array_equal, \ diff --git a/scipy/sparse/linalg/isolve/tests/test_lgmres.py b/scipy/sparse/linalg/isolve/tests/test_lgmres.py index 89f6a29b476c..fe1fa475c63c 100644 --- a/scipy/sparse/linalg/isolve/tests/test_lgmres.py +++ b/scipy/sparse/linalg/isolve/tests/test_lgmres.py @@ -2,6 +2,8 @@ """Tests for the linalg.isolve.lgmres module """ +from __future__ import division, print_function, absolute_import + from numpy.testing import TestCase, assert_ from numpy import zeros, array, allclose diff --git a/scipy/sparse/linalg/isolve/tests/test_lsmr.py b/scipy/sparse/linalg/isolve/tests/test_lsmr.py index 7fb4cf596ddf..e184943e9613 100644 --- a/scipy/sparse/linalg/isolve/tests/test_lsmr.py +++ b/scipy/sparse/linalg/isolve/tests/test_lsmr.py @@ -15,6 +15,9 @@ Dept of MS&E, Stanford University. """ + +from __future__ import division, print_function, absolute_import + from numpy import arange, concatenate, eye, zeros, ones, sqrt, \ transpose, hstack from numpy.linalg import norm @@ -125,23 +128,23 @@ def lsmrtest(m, n, damp): = lsmr(A, b, damp, atol, btol, conlim, itnlim, show ) j1 = min(n,5); j2 = max(n-4,1); - print ' ' - print 'First elements of x:' + print(' ') + print('First elements of x:') str = [ '%10.4f' %(xi) for xi in x[0:j1] ] - print ''.join(str) - print ' ' - print 'Last elements of x:' + print(''.join(str)) + print(' ') + print('Last elements of x:') str = [ '%10.4f' %(xi) for xi in x[j2-1:] ] - print ''.join(str) + print(''.join(str)) r = b - Afun.matvec(x); r2 = sqrt(norm(r)**2 + (damp*norm(x))**2) - print ' ' + print(' ') str = 'normr (est.) %17.10e' %(normr ) str2 = 'normr (true) %17.10e' %(r2 ) - print str - print str2 - print ' ' + print(str) + print(str2) + print(' ') if __name__ == "__main__": # Comment out the next line to run unit tests only diff --git a/scipy/sparse/linalg/isolve/tests/test_lsqr.py b/scipy/sparse/linalg/isolve/tests/test_lsqr.py index c054e608e13a..13a6e9cef131 100644 --- a/scipy/sparse/linalg/isolve/tests/test_lsqr.py +++ b/scipy/sparse/linalg/isolve/tests/test_lsqr.py @@ -1,5 +1,8 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_ +from scipy.lib.six.moves import xrange from scipy.sparse.linalg import lsqr from time import time @@ -10,7 +13,7 @@ normal = np.random.normal norm = np.linalg.norm -for jj in range(5): +for jj in xrange(5): gg = normal(size=n) hh = gg * gg.T G += (hh + hh.T) * 0.5 @@ -44,17 +47,17 @@ def test_basic(): else: sym='Yes' - print 'LSQR' - print "Is linear operator symmetric? " + sym - print "n: %3g iterations: %3g" % (n, k) - print "Norms computed in %.2fs by LSQR" % (time() - tic) - print " ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e " %( chio, phio, psio) - print "Residual norms computed directly:" - print " ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e" % (norm(xo), + print('LSQR') + print("Is linear operator symmetric? " + sym) + print("n: %3g iterations: %3g" % (n, k)) + print("Norms computed in %.2fs by LSQR" % (time() - tic)) + print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e " %( chio, phio, psio)) + print("Residual norms computed directly:") + print(" ||x|| %9.4e ||r|| %9.4e ||Ar|| %9.4e" % (norm(xo), norm(G*xo - b), - norm(G.T*(G*xo-b))) - print "Direct solution norms:" - print " ||x|| %9.4e ||r|| %9.4e " % (norm(svx), norm(G*svx -b)) - print "" - print " || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo) - print "" + norm(G.T*(G*xo-b)))) + print("Direct solution norms:") + print(" ||x|| %9.4e ||r|| %9.4e " % (norm(svx), norm(G*svx -b))) + print("") + print(" || x_{direct} - x_{LSQR}|| %9.4e " % norm(svx-xo)) + print("") diff --git a/scipy/sparse/linalg/isolve/tests/test_utils.py b/scipy/sparse/linalg/isolve/tests/test_utils.py index 0920964680b2..0ccc9f74e059 100644 --- a/scipy/sparse/linalg/isolve/tests/test_utils.py +++ b/scipy/sparse/linalg/isolve/tests/test_utils.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_raises diff --git a/scipy/sparse/linalg/isolve/utils.py b/scipy/sparse/linalg/isolve/utils.py index de0d47c0ea6f..cd9c76025691 100644 --- a/scipy/sparse/linalg/isolve/utils.py +++ b/scipy/sparse/linalg/isolve/utils.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + __docformat__ = "restructuredtext en" __all__ = [] diff --git a/scipy/sparse/linalg/matfuncs.py b/scipy/sparse/linalg/matfuncs.py index 3dc9fbd9b49e..d44af53eb2bc 100644 --- a/scipy/sparse/linalg/matfuncs.py +++ b/scipy/sparse/linalg/matfuncs.py @@ -8,6 +8,8 @@ # Jake Vanderplas, August 2012 (Sparse Updates) # +from __future__ import division, print_function, absolute_import + __all__ = ['expm', 'inv'] from numpy import asarray, dot, eye, ceil, log2 diff --git a/scipy/sparse/linalg/setup.py b/scipy/sparse/linalg/setup.py index 29a04aa199ce..411e8b31166c 100755 --- a/scipy/sparse/linalg/setup.py +++ b/scipy/sparse/linalg/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/sparse/linalg/setupscons.py b/scipy/sparse/linalg/setupscons.py index 402ea0119f01..f95a658b8c6f 100755 --- a/scipy/sparse/linalg/setupscons.py +++ b/scipy/sparse/linalg/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): from numpy.distutils.misc_util import Configuration diff --git a/scipy/sparse/linalg/tests/test_interface.py b/scipy/sparse/linalg/tests/test_interface.py index d70b9fc62144..95db87873a49 100644 --- a/scipy/sparse/linalg/tests/test_interface.py +++ b/scipy/sparse/linalg/tests/test_interface.py @@ -1,6 +1,8 @@ """Test functions for the sparse.linalg.interface module """ +from __future__ import division, print_function, absolute_import + from numpy.testing import TestCase, assert_, assert_equal, \ assert_raises diff --git a/scipy/sparse/linalg/tests/test_matfuncs.py b/scipy/sparse/linalg/tests/test_matfuncs.py index 79891afa1ee8..010270b476a5 100644 --- a/scipy/sparse/linalg/tests/test_matfuncs.py +++ b/scipy/sparse/linalg/tests/test_matfuncs.py @@ -5,6 +5,8 @@ """ Test functions for scipy.linalg.matfuncs module """ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy import array, eye, dot, sqrt, double, exp, random from numpy.testing import TestCase, run_module_suite, assert_array_almost_equal, \ diff --git a/scipy/sparse/setup.py b/scipy/sparse/setup.py index 8074ec89881f..c3373e74508d 100755 --- a/scipy/sparse/setup.py +++ b/scipy/sparse/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): import numpy diff --git a/scipy/sparse/setupscons.py b/scipy/sparse/setupscons.py index 7121d8193804..c29dced76335 100755 --- a/scipy/sparse/setupscons.py +++ b/scipy/sparse/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join import sys diff --git a/scipy/sparse/sparsetools/__init__.py b/scipy/sparse/sparsetools/__init__.py index d5e39e37eb09..6989b9247879 100644 --- a/scipy/sparse/sparsetools/__init__.py +++ b/scipy/sparse/sparsetools/__init__.py @@ -1,9 +1,10 @@ """sparsetools - a collection of routines for sparse matrix operations """ +from __future__ import division, print_function, absolute_import -from csr import * -from csc import * -from coo import * -from dia import * -from bsr import * -from csgraph import * +from .csr import * +from .csc import * +from .coo import * +from .dia import * +from .bsr import * +from .csgraph import * diff --git a/scipy/sparse/sparsetools/setup.py b/scipy/sparse/sparsetools/setup.py index 5c4d86b94847..0c754a1e3bf2 100755 --- a/scipy/sparse/sparsetools/setup.py +++ b/scipy/sparse/sparsetools/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import def configuration(parent_package='',top_path=None): import numpy diff --git a/scipy/sparse/sparsetools/setupscons.py b/scipy/sparse/sparsetools/setupscons.py index 32544b7c8423..edff57fc4ea4 100755 --- a/scipy/sparse/sparsetools/setupscons.py +++ b/scipy/sparse/sparsetools/setupscons.py @@ -1,5 +1,7 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + from os.path import join import sys diff --git a/scipy/sparse/spfuncs.py b/scipy/sparse/spfuncs.py index a3868d702878..1e4262c7f064 100644 --- a/scipy/sparse/spfuncs.py +++ b/scipy/sparse/spfuncs.py @@ -1,11 +1,13 @@ """ Functions that operate on sparse matrices """ +from __future__ import division, print_function, absolute_import + __all__ = ['count_blocks','estimate_blocksize'] -from csr import isspmatrix_csr, csr_matrix -from csc import isspmatrix_csc -from sparsetools import csr_count_blocks +from .csr import isspmatrix_csr, csr_matrix +from .csc import isspmatrix_csc +from .sparsetools import csr_count_blocks def extract_diagonal(A): raise NotImplementedError('use .diagonal() instead') diff --git a/scipy/sparse/sputils.py b/scipy/sparse/sputils.py index 6d29895e2766..7898fb8d6f23 100644 --- a/scipy/sparse/sputils.py +++ b/scipy/sparse/sputils.py @@ -1,6 +1,8 @@ """ Utility functions for sparse matrix module """ +from __future__ import division, print_function, absolute_import + __all__ = ['upcast','getdtype','isscalarlike','isintlike', 'isshape','issequence','isdense'] diff --git a/scipy/sparse/tests/test_base.py b/scipy/sparse/tests/test_base.py index 63945ca98bd8..b627e6a845fc 100644 --- a/scipy/sparse/tests/test_base.py +++ b/scipy/sparse/tests/test_base.py @@ -4,6 +4,9 @@ """ Test functions for sparse matrices """ + +from __future__ import division, print_function, absolute_import + __usage__ = """ Build sparse: python setup.py build @@ -17,6 +20,7 @@ import warnings import numpy as np +from scipy.lib.six.moves import xrange from numpy import arange, zeros, array, dot, matrix, asmatrix, asarray, \ vstack, ndarray, transpose, diag, kron, inf, conjugate, \ int8, ComplexWarning @@ -1368,7 +1372,7 @@ def test_set_slice(self): caught = 0 # The next 6 commands should raise exceptions try: - A[0,0] = range(100) + A[0,0] = list(range(100)) except ValueError: caught += 1 try: @@ -1376,11 +1380,11 @@ def test_set_slice(self): except ValueError: caught += 1 try: - A[0,:] = range(100) + A[0,:] = list(range(100)) except ValueError: caught += 1 try: - A[:,1] = range(100) + A[:,1] = list(range(100)) except ValueError: caught += 1 try: @@ -1394,7 +1398,7 @@ def test_ctor(self): # Empty ctor try: A = dok_matrix() - except TypeError, e: + except TypeError as e: caught+=1 assert_equal(caught, 1) @@ -1603,7 +1607,7 @@ def test_inplace_ops(self): 'sub': (B,A - B), 'mul': (3,A * 3)} - for op,(other,expected) in data.iteritems(): + for op,(other,expected) in data.items(): result = A.copy() getattr(result, '__i%s__' % op)(other) diff --git a/scipy/sparse/tests/test_construct.py b/scipy/sparse/tests/test_construct.py index 502948852dad..1111a9a0d4d5 100644 --- a/scipy/sparse/tests/test_construct.py +++ b/scipy/sparse/tests/test_construct.py @@ -1,11 +1,12 @@ """test sparse matrix construction functions""" +from __future__ import division, print_function, absolute_import + import numpy as np from numpy import array, matrix from numpy.testing import TestCase, run_module_suite, assert_equal, \ assert_array_equal, assert_raises, assert_array_almost_equal_nulp - from scipy.sparse import csr_matrix, coo_matrix from scipy.sparse import construct @@ -115,7 +116,7 @@ def test_diags(self): # should be able to find the shape automatically assert_equal(construct.diags(d, o).todense(), result) except: - print "%r %r %r" % (d, o, shape) + print("%r %r %r" % (d, o, shape)) raise def test_diags_bad(self): @@ -139,7 +140,7 @@ def test_diags_bad(self): try: assert_raises(ValueError, construct.diags, d, o, shape) except: - print "%r %r %r" % (d, o, shape) + print("%r %r %r" % (d, o, shape)) raise assert_raises(TypeError, construct.diags, [[None]], [0]) @@ -177,7 +178,7 @@ def test_diags_dtype(self): assert_equal(x.todense(), [[2, 0], [0, 2]]) def test_diags_one_diagonal(self): - d = range(5) + d = list(range(5)) for k in range(-5, 6): assert_equal(construct.diags(d, k).toarray(), construct.diags([d], [k]).toarray()) diff --git a/scipy/sparse/tests/test_csc.py b/scipy/sparse/tests/test_csc.py index 6d06f02758ac..b4c38653f2db 100644 --- a/scipy/sparse/tests/test_csc.py +++ b/scipy/sparse/tests/test_csc.py @@ -1,8 +1,9 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_array_almost_equal, run_module_suite, assert_ from scipy.sparse import csr_matrix, csc_matrix - def test_csc_getrow(): N = 10 np.random.seed(0) diff --git a/scipy/sparse/tests/test_csr.py b/scipy/sparse/tests/test_csr.py index fdb4f7974901..d536d23b3a94 100644 --- a/scipy/sparse/tests/test_csr.py +++ b/scipy/sparse/tests/test_csr.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_array_almost_equal, run_module_suite, assert_ from scipy.sparse import csr_matrix diff --git a/scipy/sparse/tests/test_extract.py b/scipy/sparse/tests/test_extract.py index 92f94360a835..b2266ec40c28 100644 --- a/scipy/sparse/tests/test_extract.py +++ b/scipy/sparse/tests/test_extract.py @@ -1,5 +1,7 @@ """test sparse matrix construction functions""" +from __future__ import division, print_function, absolute_import + from numpy.testing import TestCase, assert_equal from scipy.sparse import csr_matrix diff --git a/scipy/sparse/tests/test_spfuncs.py b/scipy/sparse/tests/test_spfuncs.py index 70c0cd1de691..b892ca99fbf4 100644 --- a/scipy/sparse/tests/test_spfuncs.py +++ b/scipy/sparse/tests/test_spfuncs.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy import array, kron, matrix, diag from numpy.testing import TestCase, run_module_suite, assert_, assert_equal diff --git a/scipy/sparse/tests/test_sputils.py b/scipy/sparse/tests/test_sputils.py index 287a2372cf32..af6759f3340f 100644 --- a/scipy/sparse/tests/test_sputils.py +++ b/scipy/sparse/tests/test_sputils.py @@ -1,5 +1,7 @@ """unit tests for sparse utility functions""" +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import TestCase, run_module_suite, assert_equal from scipy.sparse import sputils diff --git a/scipy/spatial/__init__.py b/scipy/spatial/__init__.py index c7ecd0dbad26..759be003f693 100644 --- a/scipy/spatial/__init__.py +++ b/scipy/spatial/__init__.py @@ -70,15 +70,17 @@ """ -from kdtree import * -from ckdtree import * -from qhull import * -from _plotutils import * +from __future__ import division, print_function, absolute_import -__all__ = filter(lambda s:not s.startswith('_'),dir()) +from .kdtree import * +from .ckdtree import * +from .qhull import * +from ._plotutils import * + +__all__ = [s for s in dir() if not s.startswith('_')] __all__ += ['distance'] -import distance +from . import distance from numpy.testing import Tester test = Tester().test bench = Tester().bench diff --git a/scipy/spatial/_plotutils.py b/scipy/spatial/_plotutils.py index cd89c96690b3..d3e3e9c2d029 100644 --- a/scipy/spatial/_plotutils.py +++ b/scipy/spatial/_plotutils.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.lib.decorator import decorator as _decorator diff --git a/scipy/spatial/benchmarks/bench_ckdtree.py b/scipy/spatial/benchmarks/bench_ckdtree.py index 5ebc255f6048..328d96a79ea4 100644 --- a/scipy/spatial/benchmarks/bench_ckdtree.py +++ b/scipy/spatial/benchmarks/bench_ckdtree.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import sys from numpy.testing import * from scipy.spatial import cKDTree, KDTree @@ -6,36 +8,36 @@ class TestBuild(TestCase): def bench_build(self): - print - print ' Constructing kd-tree' - print '=====================================' - print ' dim | # points | KDTree | cKDTree ' + print() + print(' Constructing kd-tree') + print('=====================================') + print(' dim | # points | KDTree | cKDTree ') for (m, n, repeat) in [(3,10000,3), (8,10000,3), (16,10000,3)]: - print '%4s | %7s ' % (m, n), + print('%4s | %7s ' % (m, n), end=' ') sys.stdout.flush() data = np.concatenate((np.random.randn(n//2,m), np.random.randn(n-n//2,m)+np.ones(m))) - print '| %6.3fs ' % (measure('T1 = KDTree(data)', repeat) / repeat), + print('| %6.3fs ' % (measure('T1 = KDTree(data)', repeat) / repeat), end=' ') sys.stdout.flush() - print '| %6.3fs' % (measure('T2 = cKDTree(data)', repeat) / repeat), + print('| %6.3fs' % (measure('T2 = cKDTree(data)', repeat) / repeat), end=' ') sys.stdout.flush() - print '' + print('') class TestQuery(TestCase): def bench_query(self): - print - print ' Querying kd-tree' - print '===============================================================' - print ' dim | # points | # queries | KDTree | cKDTree | flat cKDTree' + print() + print(' Querying kd-tree') + print('===============================================================') + print(' dim | # points | # queries | KDTree | cKDTree | flat cKDTree') for (m, n, r, repeat) in [(3,10000,1000,3), (8,10000,1000,3), (16,10000,1000,3)]: - print '%4s | %8s | %8s ' % (m, n, r), + print('%4s | %8s | %8s ' % (m, n, r), end=' ') sys.stdout.flush() data = np.concatenate((np.random.randn(n//2,m), @@ -46,26 +48,26 @@ def bench_query(self): T1 = KDTree(data) T2 = cKDTree(data) T3 = cKDTree(data,leafsize=n) - print '| %6.3fs ' % (measure('T1.query(queries)', 1) / 1), + print('| %6.3fs ' % (measure('T1.query(queries)', 1) / 1), end=' ') sys.stdout.flush() - print '| %6.3fs' % (measure('T2.query(queries)', repeat) / repeat), + print('| %6.3fs' % (measure('T2.query(queries)', repeat) / repeat), end=' ') sys.stdout.flush() - print '| %6.3fs' % (measure('T3.query(queries)', repeat) / repeat), + print('| %6.3fs' % (measure('T3.query(queries)', repeat) / repeat), end=' ') sys.stdout.flush() - print '' + print('') class TestQueryBallPoint(TestCase): def bench_query_ball_point(self): - print - print ' Query ball point kd-tree' - print '===============================================================' - print ' dim | # points | # queries | probe radius | KDTree | cKDTree | flat cKDTree' + print() + print(' Query ball point kd-tree') + print('===============================================================') + print(' dim | # points | # queries | probe radius | KDTree | cKDTree | flat cKDTree') for (m, n, r, repeat) in [(3,10000,1000,3)]:#, # (8,10000,1000,3), # (16,10000,1000,3)]: for probe_radius in (0.2, 0.5): - print '%4s | %8s | %9s | %11.1f ' % (m, n, r, probe_radius), + print('%4s | %8s | %9s | %11.1f ' % (m, n, r, probe_radius), end=' ') sys.stdout.flush() data = np.concatenate((np.random.randn(n//2,m), @@ -76,27 +78,27 @@ def bench_query_ball_point(self): T1 = KDTree(data) T2 = cKDTree(data) T3 = cKDTree(data,leafsize=n) - print '| %6.3fs ' % (measure('T1.query_ball_point(queries, probe_radius)', 1) / 1), + print('| %6.3fs ' % (measure('T1.query_ball_point(queries, probe_radius)', 1) / 1), end=' ') sys.stdout.flush() - print '| %6.3fs' % (measure('T2.query_ball_point(queries, probe_radius)', repeat) / repeat), + print('| %6.3fs' % (measure('T2.query_ball_point(queries, probe_radius)', repeat) / repeat), end=' ') sys.stdout.flush() - print '| %6.3fs' % (measure('T3.query_ball_point(queries, probe_radius)', repeat) / repeat), + print('| %6.3fs' % (measure('T3.query_ball_point(queries, probe_radius)', repeat) / repeat), end=' ') sys.stdout.flush() - print '' + print('') class TestQueryPairs(TestCase): def bench_query_pairs(self): - print - print ' Query pairs kd-tree' - print '==================================================================' - print ' dim | # points | probe radius | KDTree | cKDTree | flat cKDTree' + print() + print(' Query pairs kd-tree') + print('==================================================================') + print(' dim | # points | probe radius | KDTree | cKDTree | flat cKDTree') for (m, n, repeat) in [(3,1000,30), (8,1000,30), (16,1000,30)]: for probe_radius in (0.2, 0.5): - print '%4s | %8s | %11.1f ' % (m, n, probe_radius), + print('%4s | %8s | %11.1f ' % (m, n, probe_radius), end=' ') sys.stdout.flush() data = np.concatenate((np.random.randn(n//2,m), @@ -105,21 +107,21 @@ def bench_query_pairs(self): T1 = KDTree(data) T2 = cKDTree(data) T3 = cKDTree(data,leafsize=n) - print '| %6.3fs ' % (measure('T1.query_pairs(probe_radius)', 1) / 1), + print('| %6.3fs ' % (measure('T1.query_pairs(probe_radius)', 1) / 1), end=' ') sys.stdout.flush() - print '| %6.3fs' % (measure('T2.query_pairs(probe_radius)', repeat) / repeat), + print('| %6.3fs' % (measure('T2.query_pairs(probe_radius)', repeat) / repeat), end=' ') sys.stdout.flush() - print '| %6.3fs' % (measure('T3.query_pairs(probe_radius)', repeat) / repeat), + print('| %6.3fs' % (measure('T3.query_pairs(probe_radius)', repeat) / repeat), end=' ') sys.stdout.flush() - print '' + print('') class TestSparseDistanceMatrix(TestCase): def bench_sparse_distance_matrix(self): - print - print ' Sparse distance matrix kd-tree' - print '====================================================================' - print ' dim | # points T1 | # points T2 | probe radius | KDTree | cKDTree' + print() + print(' Sparse distance matrix kd-tree') + print('====================================================================') + print(' dim | # points T1 | # points T2 | probe radius | KDTree | cKDTree') for (m, n1, n2, repeat) in [(3,1000,1000,30), (8,1000,1000,30), @@ -136,22 +138,22 @@ def bench_sparse_distance_matrix(self): cT2 = cKDTree(data2) for probe_radius in (0.2, 0.5): - print '%4s | %11s | %11s | %11.1f ' % (m, n1, n2, probe_radius), + print('%4s | %11s | %11s | %11.1f ' % (m, n1, n2, probe_radius), end=' ') sys.stdout.flush() - print '| %6.3fs ' % (measure('T1.sparse_distance_matrix(T2, probe_radius)', 1) / 1), + print('| %6.3fs ' % (measure('T1.sparse_distance_matrix(T2, probe_radius)', 1) / 1), end=' ') sys.stdout.flush() - print '| %6.3fs ' % (measure('cT1.sparse_distance_matrix(cT2, probe_radius)', repeat) / repeat), + print('| %6.3fs ' % (measure('cT1.sparse_distance_matrix(cT2, probe_radius)', repeat) / repeat), end=' ') sys.stdout.flush() - print '' + print('') class TestCountNeighbors(TestCase): def bench_count_neighbors(self): - print - print ' Count neighbors kd-tree' - print '====================================================================' - print ' dim | # points T1 | # points T2 | probe radius | KDTree | cKDTree' + print() + print(' Count neighbors kd-tree') + print('====================================================================') + print(' dim | # points T1 | # points T2 | probe radius | KDTree | cKDTree') for (m, n1, n2, repeat) in [(3,1000,1000,30), (8,1000,1000,30), @@ -168,14 +170,14 @@ def bench_count_neighbors(self): cT2 = cKDTree(data2) for probe_radius in (0.2, 0.5): - print '%4s | %11s | %11s | %11.1f ' % (m, n1, n2, probe_radius), + print('%4s | %11s | %11s | %11.1f ' % (m, n1, n2, probe_radius), end=' ') sys.stdout.flush() - print '| %6.3fs ' % (measure('T1.count_neighbors(T2, probe_radius)', 1) / 1), + print('| %6.3fs ' % (measure('T1.count_neighbors(T2, probe_radius)', 1) / 1), end=' ') sys.stdout.flush() - print '| %6.3fs ' % (measure('cT1.count_neighbors(cT2, probe_radius)', repeat) / repeat), + print('| %6.3fs ' % (measure('cT1.count_neighbors(cT2, probe_radius)', repeat) / repeat), end=' ') sys.stdout.flush() - print '' + print('') if __name__ == "__main__": run_module_suite() diff --git a/scipy/spatial/distance.py b/scipy/spatial/distance.py index fa038c22372e..95af6c8a77db 100644 --- a/scipy/spatial/distance.py +++ b/scipy/spatial/distance.py @@ -63,12 +63,17 @@ # Copyright (C) Damian Eads, 2007-2008. New BSD License. +from __future__ import division, print_function, absolute_import import warnings import numpy as np from numpy.linalg import norm -import _distance_wrap +from scipy.lib.six import callable, string_types +from scipy.lib.six.moves import xrange + +from . import _distance_wrap +import collections def _copy_array_if_base_present(a): @@ -1144,7 +1149,7 @@ def dfun(u, v): dm[k] = dfun(X[i], X[j]) k = k + 1 - elif isinstance(metric, basestring): + elif isinstance(metric, string_types): mstr = metric.lower() #if X.dtype != np.double and \ @@ -1521,7 +1526,7 @@ def is_valid_dm(D, tol=0.0, throw=False, name="D", warning=False): raise ValueError(('Distance matrix \'%s\' diagonal must be' ' close to zero within tolerance %5.5f.') % tol) - except Exception, e: + except Exception as e: if throw: raise if warning: @@ -1591,7 +1596,7 @@ def is_valid_y(y, warning=False, throw=False, name=None): raise ValueError('Length n of condensed distance matrix must ' 'be a binomial coefficient, i.e. there must ' 'be a k such that (k \choose 2)=n)!') - except Exception, e: + except Exception as e: if throw: raise if warning: @@ -1934,7 +1939,7 @@ def cdist(XA, XB, metric='euclidean', p=2, V=None, VI=None, w=None): for i in xrange(0, mA): for j in xrange(0, mB): dm[i, j] = metric(XA[i, :], XB[j, :]) - elif isinstance(metric, basestring): + elif isinstance(metric, string_types): mstr = metric.lower() #if XA.dtype != np.double and \ diff --git a/scipy/spatial/kdtree.py b/scipy/spatial/kdtree.py index e2ec388d314b..eeac6f0a146a 100644 --- a/scipy/spatial/kdtree.py +++ b/scipy/spatial/kdtree.py @@ -1,5 +1,7 @@ # Copyright Anne M. Archibald 2008 # Released under the scipy license +from __future__ import division, print_function, absolute_import + import sys import numpy as np from heapq import heappush, heappop @@ -80,7 +82,7 @@ def __init__(self, maxes, mins): self.m, = self.maxes.shape def __repr__(self): - return "" % zip(self.mins, self.maxes) + return "" % list(zip(self.mins, self.maxes)) def volume(self): """Total volume.""" diff --git a/scipy/spatial/setup.py b/scipy/spatial/setup.py index 3abbfd4fef17..48d57f11ab58 100755 --- a/scipy/spatial/setup.py +++ b/scipy/spatial/setup.py @@ -1,5 +1,7 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + from os.path import join def configuration(parent_package = '', top_path = None): diff --git a/scipy/spatial/setupscons.py b/scipy/spatial/setupscons.py index a236c2c019dd..c044bc3315c0 100755 --- a/scipy/spatial/setupscons.py +++ b/scipy/spatial/setupscons.py @@ -1,5 +1,7 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + from os.path import join def configuration(parent_package = '', top_path = None): diff --git a/scipy/spatial/tests/test__plotutils.py b/scipy/spatial/tests/test__plotutils.py index 8c30b77ad731..090f7a6bd494 100644 --- a/scipy/spatial/tests/test__plotutils.py +++ b/scipy/spatial/tests/test__plotutils.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy.testing import dec, assert_, assert_array_equal try: diff --git a/scipy/spatial/tests/test_distance.py b/scipy/spatial/tests/test_distance.py index 4457d066803b..f32f21ecda62 100644 --- a/scipy/spatial/tests/test_distance.py +++ b/scipy/spatial/tests/test_distance.py @@ -33,14 +33,18 @@ # NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +from __future__ import division, print_function, absolute_import import os.path +from scipy.lib.six.moves import xrange import numpy as np from numpy.linalg import norm from numpy.testing import verbose, TestCase, run_module_suite, \ assert_raises, assert_array_equal, assert_equal, assert_almost_equal +from scipy.lib.six import u + from scipy.spatial.distance import squareform, pdist, cdist, matching, \ jaccard, dice, sokalsneath, rogerstanimoto, russellrao, yule, \ num_obs_y, num_obs_dm, is_valid_dm, is_valid_y, minkowski, wminkowski, \ @@ -115,7 +119,7 @@ def test_cdist_euclidean_random(self): Y1 = cdist(X1, X2, 'euclidean') Y2 = cdist(X1, X2, 'test_euclidean') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_euclidean_random_unicode(self): @@ -124,10 +128,10 @@ def test_cdist_euclidean_random_unicode(self): # Get the data: the input matrix and the right output. X1 = eo['cdist-X1'] X2 = eo['cdist-X2'] - Y1 = cdist(X1, X2, u'euclidean') - Y2 = cdist(X1, X2, u'test_euclidean') + Y1 = cdist(X1, X2, u('euclidean')) + Y2 = cdist(X1, X2, u('test_euclidean')) if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_sqeuclidean_random(self): @@ -139,7 +143,7 @@ def test_cdist_sqeuclidean_random(self): Y1 = cdist(X1, X2, 'sqeuclidean') Y2 = cdist(X1, X2, 'test_sqeuclidean') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_cityblock_random(self): @@ -151,7 +155,7 @@ def test_cdist_cityblock_random(self): Y1 = cdist(X1, X2, 'cityblock') Y2 = cdist(X1, X2, 'test_cityblock') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_hamming_double_random(self): @@ -163,7 +167,7 @@ def test_cdist_hamming_double_random(self): Y1 = cdist(X1, X2, 'hamming') Y2 = cdist(X1, X2, 'test_hamming') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_hamming_bool_random(self): @@ -175,7 +179,7 @@ def test_cdist_hamming_bool_random(self): Y1 = cdist(X1, X2, 'hamming') Y2 = cdist(X1, X2, 'test_hamming') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_jaccard_double_random(self): @@ -187,7 +191,7 @@ def test_cdist_jaccard_double_random(self): Y1 = cdist(X1, X2, 'jaccard') Y2 = cdist(X1, X2, 'test_jaccard') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_jaccard_bool_random(self): @@ -199,7 +203,7 @@ def test_cdist_jaccard_bool_random(self): Y1 = cdist(X1, X2, 'jaccard') Y2 = cdist(X1, X2, 'test_jaccard') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_chebychev_random(self): @@ -211,7 +215,7 @@ def test_cdist_chebychev_random(self): Y1 = cdist(X1, X2, 'chebychev') Y2 = cdist(X1, X2, 'test_chebychev') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_minkowski_random_p3d8(self): @@ -223,7 +227,7 @@ def test_cdist_minkowski_random_p3d8(self): Y1 = cdist(X1, X2, 'minkowski', p=3.8) Y2 = cdist(X1, X2, 'test_minkowski', p=3.8) if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_minkowski_random_p4d6(self): @@ -235,7 +239,7 @@ def test_cdist_minkowski_random_p4d6(self): Y1 = cdist(X1, X2, 'minkowski', p=4.6) Y2 = cdist(X1, X2, 'test_minkowski', p=4.6) if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_minkowski_random_p1d23(self): @@ -247,7 +251,7 @@ def test_cdist_minkowski_random_p1d23(self): Y1 = cdist(X1, X2, 'minkowski', p=1.23) Y2 = cdist(X1, X2, 'test_minkowski', p=1.23) if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) @@ -261,7 +265,7 @@ def test_cdist_wminkowski_random_p3d8(self): Y1 = cdist(X1, X2, 'wminkowski', p=3.8, w=w) Y2 = cdist(X1, X2, 'test_wminkowski', p=3.8, w=w) if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_wminkowski_random_p4d6(self): @@ -274,7 +278,7 @@ def test_cdist_wminkowski_random_p4d6(self): Y1 = cdist(X1, X2, 'wminkowski', p=4.6, w=w) Y2 = cdist(X1, X2, 'test_wminkowski', p=4.6, w=w) if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_wminkowski_random_p1d23(self): @@ -287,7 +291,7 @@ def test_cdist_wminkowski_random_p1d23(self): Y1 = cdist(X1, X2, 'wminkowski', p=1.23, w=w) Y2 = cdist(X1, X2, 'test_wminkowski', p=1.23, w=w) if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) @@ -300,7 +304,7 @@ def test_cdist_seuclidean_random(self): Y1 = cdist(X1, X2, 'seuclidean') Y2 = cdist(X1, X2, 'test_seuclidean') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) @@ -313,7 +317,7 @@ def test_cdist_cosine_random(self): Y1 = cdist(X1, X2, 'cosine') Y2 = cdist(X1, X2, 'test_cosine') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_correlation_random(self): @@ -325,7 +329,7 @@ def test_cdist_correlation_random(self): Y1 = cdist(X1, X2, 'correlation') Y2 = cdist(X1, X2, 'test_correlation') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_mahalanobis_random(self): @@ -337,7 +341,7 @@ def test_cdist_mahalanobis_random(self): Y1 = cdist(X1, X2, 'mahalanobis') Y2 = cdist(X1, X2, 'test_mahalanobis') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_canberra_random(self): @@ -349,7 +353,7 @@ def test_cdist_canberra_random(self): Y1 = cdist(X1, X2, 'canberra') Y2 = cdist(X1, X2, 'test_canberra') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_braycurtis_random(self): @@ -361,8 +365,8 @@ def test_cdist_braycurtis_random(self): Y1 = cdist(X1, X2, 'braycurtis') Y2 = cdist(X1, X2, 'test_braycurtis') if verbose > 2: - print Y1, Y2 - print (Y1-Y2).max() + print(Y1, Y2) + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_yule_random(self): @@ -374,7 +378,7 @@ def test_cdist_yule_random(self): Y1 = cdist(X1, X2, 'yule') Y2 = cdist(X1, X2, 'test_yule') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_matching_random(self): @@ -386,7 +390,7 @@ def test_cdist_matching_random(self): Y1 = cdist(X1, X2, 'matching') Y2 = cdist(X1, X2, 'test_matching') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_kulsinski_random(self): @@ -398,7 +402,7 @@ def test_cdist_kulsinski_random(self): Y1 = cdist(X1, X2, 'kulsinski') Y2 = cdist(X1, X2, 'test_kulsinski') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_dice_random(self): @@ -410,7 +414,7 @@ def test_cdist_dice_random(self): Y1 = cdist(X1, X2, 'dice') Y2 = cdist(X1, X2, 'test_dice') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_rogerstanimoto_random(self): @@ -422,7 +426,7 @@ def test_cdist_rogerstanimoto_random(self): Y1 = cdist(X1, X2, 'rogerstanimoto') Y2 = cdist(X1, X2, 'test_rogerstanimoto') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_russellrao_random(self): @@ -434,7 +438,7 @@ def test_cdist_russellrao_random(self): Y1 = cdist(X1, X2, 'russellrao') Y2 = cdist(X1, X2, 'test_russellrao') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_sokalmichener_random(self): @@ -446,7 +450,7 @@ def test_cdist_sokalmichener_random(self): Y1 = cdist(X1, X2, 'sokalmichener') Y2 = cdist(X1, X2, 'test_sokalmichener') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) def test_cdist_sokalsneath_random(self): @@ -458,7 +462,7 @@ def test_cdist_sokalsneath_random(self): Y1 = cdist(X1, X2, 'sokalsneath') Y2 = cdist(X1, X2, 'test_sokalsneath') if verbose > 2: - print (Y1-Y2).max() + print((Y1-Y2).max()) self.assertTrue(within_tol(Y1, Y2, eps)) @@ -485,7 +489,7 @@ def test_pdist_euclidean_random_u(self): X = eo['pdist-double-inp'] Y_right = eo['pdist-euclidean'] - Y_test1 = pdist(X, u'euclidean') + Y_test1 = pdist(X, u('euclidean')) self.assertTrue(within_tol(Y_test1, Y_right, eps)) def test_pdist_euclidean_random_float32(self): @@ -526,7 +530,7 @@ def test_pdist_euclidean_iris_float32(self): Y_test1 = pdist(X, 'euclidean') if verbose > 2: - print np.abs(Y_right - Y_test1).max() + print(np.abs(Y_right - Y_test1).max()) self.assertTrue(within_tol(Y_test1, Y_right, eps)) def test_pdist_euclidean_iris_nonC(self): @@ -646,7 +650,7 @@ def test_pdist_cosine_iris_float32(self): Y_test1 = pdist(X, 'cosine') if verbose > 2: - print np.abs(Y_test1 - Y_right).max() + print(np.abs(Y_test1 - Y_right).max()) self.assertTrue(within_tol(Y_test1, Y_right, eps)) #print "cosine-iris", np.abs(Y_test1 - Y_right).max() @@ -709,7 +713,7 @@ def test_pdist_cityblock_iris_float32(self): Y_test1 = pdist(X, 'cityblock') if verbose > 2: - print "cityblock-iris-float32", np.abs(Y_test1 - Y_right).max() + print("cityblock-iris-float32", np.abs(Y_test1 - Y_right).max()) self.assertTrue(within_tol(Y_test1, Y_right, eps)) def test_pdist_cityblock_iris_nonC(self): @@ -773,7 +777,7 @@ def test_pdist_correlation_iris_float32(self): Y_test1 = pdist(X, 'correlation') if verbose > 2: - print "correlation-iris", np.abs(Y_test1 - Y_right).max() + print("correlation-iris", np.abs(Y_test1 - Y_right).max()) self.assertTrue(within_tol(Y_test1, Y_right, eps)) def test_pdist_correlation_iris_nonC(self): @@ -867,7 +871,7 @@ def test_pdist_minkowski_5_8_iris_float32(self): Y_test1 = pdist(X, 'minkowski', 5.8) if verbose > 2: - print "minkowski-iris-5.8", np.abs(Y_test1 - Y_right).max() + print("minkowski-iris-5.8", np.abs(Y_test1 - Y_right).max()) self.assertTrue(within_tol(Y_test1, Y_right, eps)) def test_pdist_minkowski_5_8_iris_nonC(self): @@ -1056,7 +1060,7 @@ def test_pdist_chebychev_random_float32(self): Y_test1 = pdist(X, 'chebychev') if verbose > 2: - print "chebychev", np.abs(Y_test1 - Y_right).max() + print("chebychev", np.abs(Y_test1 - Y_right).max()) self.assertTrue(within_tol(Y_test1, Y_right, eps)) def test_pdist_chebychev_random_nonC(self): @@ -1087,7 +1091,7 @@ def test_pdist_chebychev_iris_float32(self): Y_right = eo['pdist-chebychev-iris'] Y_test1 = pdist(X, 'chebychev') if verbose > 2: - print "chebychev-iris", np.abs(Y_test1 - Y_right).max() + print("chebychev-iris", np.abs(Y_test1 - Y_right).max()) self.assertTrue(within_tol(Y_test1, Y_right, eps)) def test_pdist_chebychev_iris_nonC(self): @@ -1123,14 +1127,14 @@ def test_pdist_matching_match(self): D = eo['random-bool-data'] B = np.bool_(D) if verbose > 2: - print B.shape, B.dtype + print(B.shape, B.dtype) eps = 1e-10 y1 = pdist(B, "matching") y2 = pdist(B, "test_matching") y3 = pdist(D, "test_matching") if verbose > 2: - print np.abs(y1-y2).max() - print np.abs(y1-y3).max() + print(np.abs(y1-y2).max()) + print(np.abs(y1-y3).max()) self.assertTrue(within_tol(y1, y2, eps)) self.assertTrue(within_tol(y2, y3, eps)) @@ -1156,14 +1160,14 @@ def test_pdist_jaccard_match(self): "Tests pdist(X, 'jaccard') to see if the two implementations match on random double input data." D = eo['random-bool-data'] if verbose > 2: - print D.shape, D.dtype + print(D.shape, D.dtype) eps = 1e-10 y1 = pdist(D, "jaccard") y2 = pdist(D, "test_jaccard") y3 = pdist(np.bool_(D), "test_jaccard") if verbose > 2: - print np.abs(y1-y2).max() - print np.abs(y2-y3).max() + print(np.abs(y1-y2).max()) + print(np.abs(y2-y3).max()) self.assertTrue(within_tol(y1, y2, eps)) self.assertTrue(within_tol(y2, y3, eps)) @@ -1174,7 +1178,7 @@ def test_pdist_yule_mtica1(self): m2 = yule(np.array([1, 0, 1, 1, 0], dtype=np.bool), np.array([1, 1, 0, 1, 1], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - 2.0) <= 1e-10) self.assertTrue(np.abs(m2 - 2.0) <= 1e-10) @@ -1185,7 +1189,7 @@ def test_pdist_yule_mtica2(self): m2 = yule(np.array([1, 0, 1], dtype=np.bool), np.array([1, 1, 0], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - 2.0) <= 1e-10) self.assertTrue(np.abs(m2 - 2.0) <= 1e-10) @@ -1193,14 +1197,14 @@ def test_pdist_yule_match(self): "Tests pdist(X, 'yule') to see if the two implementations match on random double input data." D = eo['random-bool-data'] if verbose > 2: - print D.shape, D.dtype + print(D.shape, D.dtype) eps = 1e-10 y1 = pdist(D, "yule") y2 = pdist(D, "test_yule") y3 = pdist(np.bool_(D), "test_yule") if verbose > 2: - print np.abs(y1-y2).max() - print np.abs(y2-y3).max() + print(np.abs(y1-y2).max()) + print(np.abs(y2-y3).max()) self.assertTrue(within_tol(y1, y2, eps)) self.assertTrue(within_tol(y2, y3, eps)) @@ -1211,7 +1215,7 @@ def test_pdist_dice_mtica1(self): m2 = dice(np.array([1, 0, 1, 1, 0], dtype=np.bool), np.array([1, 1, 0, 1, 1], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - (3.0/7.0)) <= 1e-10) self.assertTrue(np.abs(m2 - (3.0/7.0)) <= 1e-10) @@ -1222,7 +1226,7 @@ def test_pdist_dice_mtica2(self): m2 = dice(np.array([1, 0, 1], dtype=np.bool), np.array([1, 1, 0], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - 0.5) <= 1e-10) self.assertTrue(np.abs(m2 - 0.5) <= 1e-10) @@ -1230,14 +1234,14 @@ def test_pdist_dice_match(self): "Tests pdist(X, 'dice') to see if the two implementations match on random double input data." D = eo['random-bool-data'] if verbose > 2: - print D.shape, D.dtype + print(D.shape, D.dtype) eps = 1e-10 y1 = pdist(D, "dice") y2 = pdist(D, "test_dice") y3 = pdist(D, "test_dice") if verbose > 2: - print np.abs(y1-y2).max() - print np.abs(y2-y3).max() + print(np.abs(y1-y2).max()) + print(np.abs(y2-y3).max()) self.assertTrue(within_tol(y1, y2, eps)) self.assertTrue(within_tol(y2, y3, eps)) @@ -1248,7 +1252,7 @@ def test_pdist_sokalsneath_mtica1(self): m2 = sokalsneath(np.array([1, 0, 1, 1, 0], dtype=np.bool), np.array([1, 1, 0, 1, 1], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - (3.0/4.0)) <= 1e-10) self.assertTrue(np.abs(m2 - (3.0/4.0)) <= 1e-10) @@ -1259,7 +1263,7 @@ def test_pdist_sokalsneath_mtica2(self): m2 = sokalsneath(np.array([1, 0, 1], dtype=np.bool), np.array([1, 1, 0], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - (4.0/5.0)) <= 1e-10) self.assertTrue(np.abs(m2 - (4.0/5.0)) <= 1e-10) @@ -1267,14 +1271,14 @@ def test_pdist_sokalsneath_match(self): "Tests pdist(X, 'sokalsneath') to see if the two implementations match on random double input data." D = eo['random-bool-data'] if verbose > 2: - print D.shape, D.dtype + print(D.shape, D.dtype) eps = 1e-10 y1 = pdist(D, "sokalsneath") y2 = pdist(D, "test_sokalsneath") y3 = pdist(np.bool_(D), "test_sokalsneath") if verbose > 2: - print np.abs(y1-y2).max() - print np.abs(y2-y3).max() + print(np.abs(y1-y2).max()) + print(np.abs(y2-y3).max()) self.assertTrue(within_tol(y1, y2, eps)) self.assertTrue(within_tol(y2, y3, eps)) @@ -1285,7 +1289,7 @@ def test_pdist_rogerstanimoto_mtica1(self): m2 = rogerstanimoto(np.array([1, 0, 1, 1, 0], dtype=np.bool), np.array([1, 1, 0, 1, 1], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - (3.0/4.0)) <= 1e-10) self.assertTrue(np.abs(m2 - (3.0/4.0)) <= 1e-10) @@ -1296,7 +1300,7 @@ def test_pdist_rogerstanimoto_mtica2(self): m2 = rogerstanimoto(np.array([1, 0, 1], dtype=np.bool), np.array([1, 1, 0], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - (4.0/5.0)) <= 1e-10) self.assertTrue(np.abs(m2 - (4.0/5.0)) <= 1e-10) @@ -1304,14 +1308,14 @@ def test_pdist_rogerstanimoto_match(self): "Tests pdist(X, 'rogerstanimoto') to see if the two implementations match on random double input data." D = eo['random-bool-data'] if verbose > 2: - print D.shape, D.dtype + print(D.shape, D.dtype) eps = 1e-10 y1 = pdist(D, "rogerstanimoto") y2 = pdist(D, "test_rogerstanimoto") y3 = pdist(np.bool_(D), "test_rogerstanimoto") if verbose > 2: - print np.abs(y1-y2).max() - print np.abs(y2-y3).max() + print(np.abs(y1-y2).max()) + print(np.abs(y2-y3).max()) self.assertTrue(within_tol(y1, y2, eps)) self.assertTrue(within_tol(y2, y3, eps)) @@ -1322,7 +1326,7 @@ def test_pdist_russellrao_mtica1(self): m2 = russellrao(np.array([1, 0, 1, 1, 0], dtype=np.bool), np.array([1, 1, 0, 1, 1], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - (3.0/5.0)) <= 1e-10) self.assertTrue(np.abs(m2 - (3.0/5.0)) <= 1e-10) @@ -1333,7 +1337,7 @@ def test_pdist_russellrao_mtica2(self): m2 = russellrao(np.array([1, 0, 1], dtype=np.bool), np.array([1, 1, 0], dtype=np.bool)) if verbose > 2: - print m + print(m) self.assertTrue(np.abs(m - (2.0/3.0)) <= 1e-10) self.assertTrue(np.abs(m2 - (2.0/3.0)) <= 1e-10) @@ -1341,14 +1345,14 @@ def test_pdist_russellrao_match(self): "Tests pdist(X, 'russellrao') to see if the two implementations match on random double input data." D = eo['random-bool-data'] if verbose > 2: - print D.shape, D.dtype + print(D.shape, D.dtype) eps = 1e-10 y1 = pdist(D, "russellrao") y2 = pdist(D, "test_russellrao") y3 = pdist(np.bool_(D), "test_russellrao") if verbose > 2: - print np.abs(y1-y2).max() - print np.abs(y2-y3).max() + print(np.abs(y1-y2).max()) + print(np.abs(y2-y3).max()) self.assertTrue(within_tol(y1, y2, eps)) self.assertTrue(within_tol(y2, y3, eps)) @@ -1356,14 +1360,14 @@ def test_pdist_sokalmichener_match(self): "Tests pdist(X, 'sokalmichener') to see if the two implementations match on random double input data." D = eo['random-bool-data'] if verbose > 2: - print D.shape, D.dtype + print(D.shape, D.dtype) eps = 1e-10 y1 = pdist(D, "sokalmichener") y2 = pdist(D, "test_sokalmichener") y3 = pdist(np.bool_(D), "test_sokalmichener") if verbose > 2: - print np.abs(y1-y2).max() - print np.abs(y2-y3).max() + print(np.abs(y1-y2).max()) + print(np.abs(y2-y3).max()) self.assertTrue(within_tol(y1, y2, eps)) self.assertTrue(within_tol(y2, y3, eps)) @@ -1371,25 +1375,25 @@ def test_pdist_kulsinski_match(self): "Tests pdist(X, 'kulsinski') to see if the two implementations match on random double input data." D = eo['random-bool-data'] if verbose > 2: - print D.shape, D.dtype + print(D.shape, D.dtype) eps = 1e-10 y1 = pdist(D, "kulsinski") y2 = pdist(D, "test_kulsinski") y3 = pdist(np.bool_(D), "test_kulsinski") if verbose > 2: - print np.abs(y1-y2).max() + print(np.abs(y1-y2).max()) self.assertTrue(within_tol(y1, y2, eps)) def test_pdist_canberra_match(self): "Tests pdist(X, 'canberra') to see if the two implementations match on the Iris data set." D = eo['iris'] if verbose > 2: - print D.shape, D.dtype + print(D.shape, D.dtype) eps = 1e-10 y1 = pdist(D, "canberra") y2 = pdist(D, "test_canberra") if verbose > 2: - print np.abs(y1-y2).max() + print(np.abs(y1-y2).max()) self.assertTrue(within_tol(y1, y2, eps)) def test_pdist_canberra_ticket_711(self): @@ -1398,7 +1402,7 @@ def test_pdist_canberra_ticket_711(self): pdist_y = pdist(([3.3], [3.4]), "canberra") right_y = 0.01492537 if verbose > 2: - print np.abs(pdist_y-right_y).max() + print(np.abs(pdist_y-right_y).max()) self.assertTrue(within_tol(pdist_y, right_y, eps)) @@ -1531,7 +1535,7 @@ def check_squareform_multi_matrix(self, n): s = A.shape k = 0 if verbose >= 3: - print A.shape, Y.shape, Yr.shape + print(A.shape, Y.shape, Yr.shape) self.assertTrue(len(s) == 2) self.assertTrue(len(Yr.shape) == 1) self.assertTrue(s[0] == s[1]) @@ -1609,7 +1613,7 @@ def test_num_obs_dm_multi_matrix(self): Y = pdist(X) A = squareform(Y) if verbose >= 3: - print A.shape, Y.shape + print(A.shape, Y.shape) self.assertTrue(num_obs_dm(A) == n) def test_num_obs_dm_0(self): diff --git a/scipy/spatial/tests/test_kdtree.py b/scipy/spatial/tests/test_kdtree.py index a365770b2d59..d92170489f5f 100644 --- a/scipy/spatial/tests/test_kdtree.py +++ b/scipy/spatial/tests/test_kdtree.py @@ -1,6 +1,8 @@ # Copyright Anne M. Archibald 2008 # Released under the scipy license +from __future__ import division, print_function, absolute_import + from numpy.testing import assert_equal, assert_array_equal, assert_almost_equal, \ assert_, run_module_suite @@ -613,7 +615,7 @@ def test_query_pairs_single_node_compiled(): def test_ball_point_ints(): """Regression test for #1373.""" x, y = np.mgrid[0:4, 0:4] - points = zip(x.ravel(), y.ravel()) + points = list(zip(x.ravel(), y.ravel())) tree = KDTree(points) assert_equal(sorted([4, 8, 9, 12]), sorted(tree.query_ball_point((2, 0), 1))) diff --git a/scipy/spatial/tests/test_qhull.py b/scipy/spatial/tests/test_qhull.py index 1b4b5b7038d1..72c647b9603e 100644 --- a/scipy/spatial/tests/test_qhull.py +++ b/scipy/spatial/tests/test_qhull.py @@ -1,10 +1,12 @@ +from __future__ import division, print_function, absolute_import + import os import sys import numpy as np from numpy.testing import assert_equal, assert_almost_equal, run_module_suite,\ assert_, dec, assert_allclose, assert_array_equal, assert_raises -from numpy.compat import asbytes +from scipy.lib.six.moves import xrange import copy import scipy.spatial.qhull as qhull @@ -21,9 +23,9 @@ def assert_unordered_tuple_list_equal(a, b, tpl=tuple): a = a.tolist() if isinstance(b, np.ndarray): b = b.tolist() - a = map(tpl, a) + a = list(map(tpl, a)) a.sort() - b = map(tpl, b) + b = list(map(tpl, b)) b.sort() assert_equal(a, b) @@ -77,14 +79,14 @@ class Test_Qhull(object): def test_swapping(self): # Check that Qhull state swapping works - x = qhull._Qhull(asbytes('v'), + x = qhull._Qhull(b'v', np.array([[0,0],[0,1],[1,0],[1,1.],[0.5,0.5]]), - asbytes('Qz')) + b'Qz') xd = copy.deepcopy(x.get_voronoi_diagram()) - y = qhull._Qhull(asbytes('v'), + y = qhull._Qhull(b'v', np.array([[0,0],[0,1],[1,0],[1,2.]]), - asbytes('Qz')) + b'Qz') yd = copy.deepcopy(y.get_voronoi_diagram()) xd2 = copy.deepcopy(x.get_voronoi_diagram()) @@ -259,7 +261,7 @@ def test_more_barycentric_transforms(self): for ndim in xrange(2, 6): # Generate an uniform grid in n-d unit cube x = np.linspace(0, 1, npoints[ndim]) - grid = np.c_[map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim))))].T + grid = np.c_[list(map(np.ravel, np.broadcast_arrays(*np.ix_(*([x]*ndim)))))].T err_msg = "ndim=%d" % ndim @@ -655,10 +657,9 @@ def _compare_qvoronoi(self, points, output, **kw): """Compare to output from 'qvoronoi o Fv < data' to Voronoi()""" # Parse output - output = map(lambda x: map(float, x.split()), - output.strip().splitlines()) + output = [list(map(float, x.split())) for x in output.strip().splitlines()] nvertex = int(output[1][0]) - vertices = map(tuple, output[3:2+nvertex]) # exclude inf + vertices = list(map(tuple, output[3:2+nvertex])) # exclude inf nregion = int(output[1][1]) regions = [[int(y)-1 for y in x[1:]] for x in output[2+nvertex:2+nvertex+nregion]] @@ -678,9 +679,9 @@ def sorttuple(x): assert_equal(set(map(tuple, vor.regions)), set(map(tuple, regions))) - p1 = zip(map(sorttuple, ridge_points), map(sorttuple, ridge_vertices)) - p2 = zip(map(sorttuple, vor.ridge_points.tolist()), - map(sorttuple, vor.ridge_vertices)) + p1 = list(zip(list(map(sorttuple, ridge_points)), list(map(sorttuple, ridge_vertices)))) + p2 = list(zip(list(map(sorttuple, vor.ridge_points.tolist())), + list(map(sorttuple, vor.ridge_vertices)))) p1.sort() p2.sort() diff --git a/scipy/special/__init__.py b/scipy/special/__init__.py index ebdb2977452d..f22554dec424 100644 --- a/scipy/special/__init__.py +++ b/scipy/special/__init__.py @@ -524,17 +524,19 @@ class also has an attribute ``weights`` which returns the roots, weights, """ -from _ufuncs import * -from _ufuncs_cxx import * +from __future__ import division, print_function, absolute_import -from basic import * -import specfun -import orthogonal -from orthogonal import * -from spfun_stats import multigammaln -from lambertw import lambertw +from ._ufuncs import * +from ._ufuncs_cxx import * -__all__ = filter(lambda s:not s.startswith('_'),dir()) +from .basic import * +from . import specfun +from . import orthogonal +from .orthogonal import * +from .spfun_stats import multigammaln +from .lambertw import lambertw + +__all__ = [s for s in dir() if not s.startswith('_')] from numpy.dual import register_func register_func('i0',i0) diff --git a/scipy/special/_testutils.py b/scipy/special/_testutils.py index bf12211be92d..64ebe2a60094 100644 --- a/scipy/special/_testutils.py +++ b/scipy/special/_testutils.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import os import warnings @@ -78,7 +80,7 @@ def assert_func_equal(func, results, points, rtol=None, atol=None, npoints = points.shape[1] data = np.c_[points, results] - fdata = FuncData(func, data, range(npoints), range(npoints, data.shape[1]), + fdata = FuncData(func, data, list(range(npoints)), list(range(npoints, data.shape[1])), rtol=rtol, atol=atol, param_filter=param_filter, knownfailure=knownfailure) fdata.check() @@ -161,7 +163,7 @@ def check(self, data=None, dtype=None): param_mask = np.ones((data.shape[0],), np.bool_) for j, filter in zip(self.param_columns, self.param_filter): if filter: - param_mask &= filter(data[:,j]) + param_mask &= list(filter(data[:,j])) data = data[param_mask] # Pick parameters and results from the correct columns @@ -229,7 +231,7 @@ def check(self, data=None, dtype=None): def __repr__(self): """Pretty-printing, esp. for Nose output""" - if np.any(map(np.iscomplexobj, self.param_columns)): + if np.any(list(map(np.iscomplexobj, self.param_columns))): is_complex = " (complex)" else: is_complex = "" diff --git a/scipy/special/add_newdocs.py b/scipy/special/add_newdocs.py index 1e56618423eb..26964f5e3b27 100755 --- a/scipy/special/add_newdocs.py +++ b/scipy/special/add_newdocs.py @@ -7,6 +7,8 @@ # scipy.special at the C level when the ufuncs are created at compile # time. +from __future__ import division, print_function, absolute_import + docdict = {} def get(name): diff --git a/scipy/special/amos/setup.py b/scipy/special/amos/setup.py deleted file mode 100644 index bb3eb7330d2b..000000000000 --- a/scipy/special/amos/setup.py +++ /dev/null @@ -1,55 +0,0 @@ -import distutils -from distutils.core import setup, Extension -import distutils.dep_util -import os - -def fortran_extension(module_name, c_files, fortran_files, library_dirs, - libraries): - fcompiler = f90_compiler() - library_name = module_name + '_fortran' - fcompiler.build_library(library_name,fortran_files) - libraries.append(library_name) - ext = Extension(module_name, c_files, - library_dirs=library_dirs_list, - libraries = libraries_list) - return ext - -class f90_compiler: - def __init__(self): - self.compiler_name = 'g77' - def to_object(self,dirty_files): - files = " ".join(dirty_files) - cmd = self.compiler_name + ' -c ' + files - print cmd - failure = os.system(cmd) - if failure: - raise ValueError('failure during compile') - def object_to_library(self,library_name,object_files): - objects = " ".join(object_files) - cmd = 'ar -cr lib%s.a %s' % (library_name,objects) - print cmd - os.system(cmd) - cmd = 'ranlib lib%s.a' % library_name - print cmd - os.system(cmd) - def build_library(self,library_name,source_list): - - object_list = map(lambda x: x[:-1] +'o',source_list) - file_pairs = zip(source_list,object_list) - dirty_files = [] - for source,object in file_pairs: - if distutils.dep_util.newer(source,object): - dirty_files.append(source) - if dirty_files != []: - self.to_object(dirty_files) - self.object_to_library(library_name,object_list) - -if __name__ == "__main__": - import setup # this file - d,f = os.path.split(setup.__file__) - print d,f - files = os.listdir(os.path.abspath(d)) - source_files = filter(lambda x: x[-1:] == 'f',files) - source_files = map(lambda x: os.path.abspath(x),source_files) - compiler = f90_compiler() - compiler.build_library('common',source_files) diff --git a/scipy/special/basic.py b/scipy/special/basic.py index 05e51eca0297..b0dac8e1a364 100644 --- a/scipy/special/basic.py +++ b/scipy/special/basic.py @@ -2,17 +2,20 @@ # Author: Travis Oliphant, 2002 # +from __future__ import division, print_function, absolute_import + import numpy as np +from scipy.lib.six.moves import xrange from numpy import pi, asarray, floor, isscalar, iscomplex, real, imag, sqrt, \ where, mgrid, cos, sin, exp, place, seterr, issubdtype, extract, \ less, vectorize, inexact, nan, zeros, sometrue, atleast_1d -from _ufuncs import ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta, \ +from ._ufuncs import ellipkm1, mathieu_a, mathieu_b, iv, jv, gamma, psi, zeta, \ hankel1, hankel2, yv, kv, gammaln, ndtri -import _ufuncs -import _ufuncs_cxx +from . import _ufuncs +from . import _ufuncs_cxx import types -import specfun -import orthogonal +from . import specfun +from . import orthogonal import warnings __all__ = ['agm', 'ai_zeros', 'assoc_laguerre', 'bei_zeros', 'beip_zeros', @@ -69,7 +72,7 @@ def diric(x,n): ytype = float y = zeros(x.shape,ytype) - mask1 = (n <= 0) | (n <> floor(n)) + mask1 = (n <= 0) | (n != floor(n)) place(y,mask1,nan) z = asarray(x / 2.0 / pi) @@ -189,7 +192,7 @@ def bessel_diff_formula(v, z, n, L, phase): def jvp(v,z,n=1): """Return the nth derivative of Jv(z) with respect to z. """ - if not isinstance(n,types.IntType) or (n<0): + if not isinstance(n,int) or (n<0): raise ValueError("n must be a non-negative integer.") if n == 0: return jv(v,z) @@ -200,7 +203,7 @@ def jvp(v,z,n=1): def yvp(v,z,n=1): """Return the nth derivative of Yv(z) with respect to z. """ - if not isinstance(n,types.IntType) or (n<0): + if not isinstance(n,int) or (n<0): raise ValueError("n must be a non-negative integer.") if n == 0: return yv(v,z) @@ -211,7 +214,7 @@ def yvp(v,z,n=1): def kvp(v,z,n=1): """Return the nth derivative of Kv(z) with respect to z. """ - if not isinstance(n,types.IntType) or (n<0): + if not isinstance(n,int) or (n<0): raise ValueError("n must be a non-negative integer.") if n == 0: return kv(v,z) @@ -221,7 +224,7 @@ def kvp(v,z,n=1): def ivp(v,z,n=1): """Return the nth derivative of Iv(z) with respect to z. """ - if not isinstance(n,types.IntType) or (n<0): + if not isinstance(n,int) or (n<0): raise ValueError("n must be a non-negative integer.") if n == 0: return iv(v,z) @@ -231,7 +234,7 @@ def ivp(v,z,n=1): def h1vp(v,z,n=1): """Return the nth derivative of H1v(z) with respect to z. """ - if not isinstance(n,types.IntType) or (n<0): + if not isinstance(n,int) or (n<0): raise ValueError("n must be a non-negative integer.") if n == 0: return hankel1(v,z) @@ -242,7 +245,7 @@ def h1vp(v,z,n=1): def h2vp(v,z,n=1): """Return the nth derivative of H2v(z) with respect to z. """ - if not isinstance(n,types.IntType) or (n<0): + if not isinstance(n,int) or (n<0): raise ValueError("n must be a non-negative integer.") if n == 0: return hankel2(v,z) @@ -537,7 +540,7 @@ def mathieu_even_coef(m,q): qm=17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q km = int(qm+0.5*m) if km > 251: - print "Warning, too many predicted coefficients." + print("Warning, too many predicted coefficients.") kd = 1 m = int(floor(m)) if m % 2: @@ -564,7 +567,7 @@ def mathieu_odd_coef(m,q): qm=17.0+3.1*sqrt(q)-.126*q+.0037*sqrt(q)*q km = int(qm+0.5*m) if km > 251: - print "Warning, too many predicted coefficients." + print("Warning, too many predicted coefficients.") kd = 4 m = int(floor(m)) if m % 2: diff --git a/scipy/special/generate_ufuncs.py b/scipy/special/generate_ufuncs.py index 2764370b7fd2..3cc191f11fb3 100755 --- a/scipy/special/generate_ufuncs.py +++ b/scipy/special/generate_ufuncs.py @@ -55,6 +55,8 @@ """ +from __future__ import division, print_function, absolute_import + #--------------------------------------------------------------------------------- # Ufunc listing #--------------------------------------------------------------------------------- @@ -342,7 +344,7 @@ def _errprint(inflag=None): import optparse import re import textwrap -import add_newdocs +from . import add_newdocs CY_TYPES = { 'f': 'float', @@ -380,7 +382,7 @@ def _errprint(inflag=None): } def cast_order(c): - return map(lambda x: 'ilfdgFDG'.index(x), c) + return ['ilfdgFDG'.index(x) for x in c] # These downcasts will cause the function to return NaNs, unless the # values happen to coincide exactly. @@ -608,13 +610,13 @@ def _parse_signatures(self, sigs): def _parse_signature(self, sig): m = re.match("\s*(.*):\s*([fdgFDGil]*)\s*\\*\s*([fdgFDGil]*)\s*->\s*([*fdgFDGil]*)\s*$", sig) if m: - func, inarg, outarg, ret = map(lambda x: x.strip(), m.groups()) + func, inarg, outarg, ret = [x.strip() for x in m.groups()] if ret.count('*') > 1: raise ValueError("%s: Invalid signature: %r" % (self.name, sig)) return (func, inarg, outarg, ret) m = re.match("\s*(.*):\s*([fdgFDGil]*)\s*->\s*([fdgFDGil]?)\s*$", sig) if m: - func, inarg, ret = map(lambda x: x.strip(), m.groups()) + func, inarg, ret = [x.strip() for x in m.groups()] return (func, inarg, "", ret) raise ValueError("%s: Invalid signature: %r" % (self.name, sig)) @@ -800,7 +802,7 @@ def generate(filename, ufunc_str, extra_code): defs_h.append("#include \"%s\"" % header) defs_h.append("%s;" % (c_proto.replace('(*)', c_name))) - toplevel = "\n".join(all_loops.values() + [defs, toplevel]) + toplevel = "\n".join(list(all_loops.values()) + [defs, toplevel]) f = open(filename, 'wb') f.write("""\ diff --git a/scipy/special/lambertw.py b/scipy/special/lambertw.py index 360212b0037b..52d1d61fbe2f 100644 --- a/scipy/special/lambertw.py +++ b/scipy/special/lambertw.py @@ -1,4 +1,6 @@ -from _ufuncs import _lambertw +from __future__ import division, print_function, absolute_import + +from ._ufuncs import _lambertw def lambertw(z, k=0, tol=1e-8): r""" diff --git a/scipy/special/orthogonal.py b/scipy/special/orthogonal.py index fa20f1af5ca4..37e80f852ca6 100755 --- a/scipy/special/orthogonal.py +++ b/scipy/special/orthogonal.py @@ -81,13 +81,15 @@ # Author: Travis Oliphant 2000 # Updated Sep. 2003 (fixed bugs --- tested to be accurate) +from __future__ import division, print_function, absolute_import + # Scipy imports. import numpy as np from numpy import all, any, exp, inf, pi, sqrt from numpy.dual import eig # Local imports. -import _ufuncs as cephes +from . import _ufuncs as cephes _gam = cephes.gamma __all__ = ['legendre', 'chebyt', 'chebyu', 'chebyc', 'chebys', @@ -110,7 +112,7 @@ class orthopoly1d(np.poly1d): def __init__(self, roots, weights=None, hn=1.0, kn=1.0, wfunc=None, limits=None, monic=0,eval_func=None): np.poly1d.__init__(self, roots, r=1) equiv_weights = [weights[k] / wfunc(roots[k]) for k in range(len(roots))] - self.__dict__['weights'] = np.array(zip(roots,weights,equiv_weights)) + self.__dict__['weights'] = np.array(list(zip(roots,weights,equiv_weights))) self.__dict__['weight_func'] = wfunc self.__dict__['limits'] = limits mu = sqrt(hn) @@ -695,7 +697,7 @@ def sh_legendre(n, monic=0): #------------------------------------------------------------------------------ # Vectorized functions for evaluation #------------------------------------------------------------------------------ -from _ufuncs import \ +from ._ufuncs import \ binom, eval_jacobi, eval_sh_jacobi, eval_gegenbauer, eval_chebyt, \ eval_chebyu, eval_chebys, eval_chebyc, eval_sh_chebyt, eval_sh_chebyu, \ eval_legendre, eval_sh_legendre, eval_genlaguerre, eval_laguerre, \ diff --git a/scipy/special/setup.py b/scipy/special/setup.py index 493f9d6a94e5..9962570b3138 100755 --- a/scipy/special/setup.py +++ b/scipy/special/setup.py @@ -1,5 +1,7 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + import os import sys from os.path import join diff --git a/scipy/special/setupscons.py b/scipy/special/setupscons.py index 7114f22a395b..029b65a4a84e 100755 --- a/scipy/special/setupscons.py +++ b/scipy/special/setupscons.py @@ -1,5 +1,7 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import + import os import sys from os.path import join diff --git a/scipy/special/spfun_stats.py b/scipy/special/spfun_stats.py index eed0fe9414b0..c2dc661e1661 100644 --- a/scipy/special/spfun_stats.py +++ b/scipy/special/spfun_stats.py @@ -33,6 +33,8 @@ """Some more special functions which may be useful for multivariate statistical analysis.""" +from __future__ import division, print_function, absolute_import + import numpy as np from scipy.special import gammaln as loggam diff --git a/scipy/special/tests/test_basic.py b/scipy/special/tests/test_basic.py index 925839c3f027..67853cfa8001 100644 --- a/scipy/special/tests/test_basic.py +++ b/scipy/special/tests/test_basic.py @@ -20,6 +20,8 @@ # test_sph_jn # test_sph_kn +from __future__ import division, print_function, absolute_import + import warnings import numpy as np @@ -86,10 +88,10 @@ def test_binom_2(self): def test_binom_exact(self): @np.vectorize def binom_int(n, k): - n = long(n) - k = long(k) - num = long(1) - den = long(1) + n = int(n) + k = int(k) + num = int(1) + den = int(1) for i in range(1, k+1): num *= i + n - k den *= i @@ -1086,9 +1088,9 @@ def test_euler(self): assert_almost_equal(eu2[2],-1,8) eu24 = special.euler(24) mathworld = [1,1,5,61,1385,50521,2702765,199360981, - 19391512145l,2404879675441l, - 370371188237525l,69348874393137901l, - 15514534163557086905l] + 19391512145,2404879675441, + 370371188237525,69348874393137901, + 15514534163557086905] correct = zeros((25,),'d') for k in range(0,13): if (k % 2): @@ -2294,7 +2296,7 @@ def test_riccati_yn(self): class TestRound(TestCase): def test_round(self): - rnd = map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6))) + rnd = list(map(int,(special.round(10.1),special.round(10.4),special.round(10.5),special.round(10.6)))) # Note: According to the documentation, scipy.special.round is # supposed to round to the nearest even number if the fractional diff --git a/scipy/special/tests/test_data.py b/scipy/special/tests/test_data.py index 6e6792a3ceb2..bf927ff2db93 100644 --- a/scipy/special/tests/test_data.py +++ b/scipy/special/tests/test_data.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import os import numpy as np diff --git a/scipy/special/tests/test_lambertw.py b/scipy/special/tests/test_lambertw.py index 96ff81ca30dd..3629af7b7943 100644 --- a/scipy/special/tests/test_lambertw.py +++ b/scipy/special/tests/test_lambertw.py @@ -6,6 +6,8 @@ # [1] mpmath source code, Subversion revision 992 # http://code.google.com/p/mpmath/source/browse/trunk/mpmath/tests/test_functions2.py?spec=svn994&r=992 +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_, assert_equal, assert_array_almost_equal from scipy.special import lambertw diff --git a/scipy/special/tests/test_logit.py b/scipy/special/tests/test_logit.py index b296d7f9c2a3..cfa1610be6dd 100644 --- a/scipy/special/tests/test_logit.py +++ b/scipy/special/tests/test_logit.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import TestCase, assert_equal, assert_almost_equal from scipy.special import logit, expit diff --git a/scipy/special/tests/test_mpmath.py b/scipy/special/tests/test_mpmath.py index 981016237f2f..da4f76662ca1 100644 --- a/scipy/special/tests/test_mpmath.py +++ b/scipy/special/tests/test_mpmath.py @@ -2,6 +2,8 @@ Test Scipy functions versus mpmath, if available. """ +from __future__ import division, print_function, absolute_import + import re import numpy as np from numpy.testing import dec @@ -26,7 +28,7 @@ def try_int(v): except ValueError: return v def get_version(v): - return map(try_int, re.split('[^0-9]', v)) + return list(map(try_int, re.split('[^0-9]', v))) return dec.skipif(get_version(min_ver) > get_version(mpmath.__version__), "mpmath %s required" % min_ver) diff --git a/scipy/special/tests/test_orthogonal.py b/scipy/special/tests/test_orthogonal.py index 3a6eeddf60bd..c24cf41ac97e 100644 --- a/scipy/special/tests/test_orthogonal.py +++ b/scipy/special/tests/test_orthogonal.py @@ -1,5 +1,8 @@ +from __future__ import division, print_function, absolute_import + from numpy.testing import assert_array_almost_equal, assert_almost_equal, \ rand, TestCase +from scipy.lib.six.moves import xrange import numpy as np from numpy import array, sqrt import scipy.special.orthogonal as orth diff --git a/scipy/special/tests/test_orthogonal_eval.py b/scipy/special/tests/test_orthogonal_eval.py index 6ef0385e5a79..9d17d1a44136 100644 --- a/scipy/special/tests/test_orthogonal_eval.py +++ b/scipy/special/tests/test_orthogonal_eval.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_ import scipy.special.orthogonal as orth @@ -61,7 +63,7 @@ def polyfunc(*p): olderr = np.seterr(all='raise') try: - ds = FuncData(polyfunc, dataset, range(len(param_ranges)+2), -1, + ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1, rtol=rtol) ds.check() finally: @@ -175,7 +177,7 @@ def polyfunc(*p): olderr = np.seterr(all='raise') try: - ds = FuncData(polyfunc, dataset, range(len(param_ranges)+2), -1, + ds = FuncData(polyfunc, dataset, list(range(len(param_ranges)+2)), -1, rtol=rtol) ds.check() finally: diff --git a/scipy/special/tests/test_spfun_stats.py b/scipy/special/tests/test_spfun_stats.py index 57404802950b..5cc1b2cbbb0f 100644 --- a/scipy/special/tests/test_spfun_stats.py +++ b/scipy/special/tests/test_spfun_stats.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_array_equal, TestCase, run_module_suite, \ assert_array_almost_equal_nulp diff --git a/scipy/special/utils/convert.py b/scipy/special/utils/convert.py index 7a72eb556fe7..572858678a08 100644 --- a/scipy/special/utils/convert.py +++ b/scipy/special/utils/convert.py @@ -1,5 +1,7 @@ # This script is used to parse BOOST special function test data into something # we can easily import in numpy. It is ugly as hell, but it works. +from __future__ import division, print_function, absolute_import + import re import os @@ -185,5 +187,5 @@ def dump_datasets(filename): if __name__ == '__main__': for filename in DATA_FILES: filename = os.path.join(BOOST_SRC, filename) - print "================= %s ===============" % filename + print("================= %s ===============" % filename) dump_datasets(filename) diff --git a/scipy/special/utils/datafunc.py b/scipy/special/utils/datafunc.py index ee4e161b7180..1c039553f563 100644 --- a/scipy/special/utils/datafunc.py +++ b/scipy/special/utils/datafunc.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import csv import numpy as np @@ -8,7 +10,7 @@ def parse_txt_data(filename): reader = csv.reader(f, delimiter=',') data = [] for row in reader: - data.append(map(float, row)) + data.append(list(map(float, row))) nc = len(data[0]) for i in data: if not nc == len(i): @@ -60,5 +62,5 @@ def run_test(filename, funcs, args=[0]): for root, dirs, files in os.walk(DATA_DIR): for f in files: name = os.path.join(root, f) - print name + print(name) data.append(parse_txt_data(name)) diff --git a/scipy/special/utils/makenpz.py b/scipy/special/utils/makenpz.py index bba52c5afdff..0ec6c184c10b 100644 --- a/scipy/special/utils/makenpz.py +++ b/scipy/special/utils/makenpz.py @@ -5,6 +5,9 @@ Build a npz containing all data files in the directory. """ + +from __future__ import division, print_function, absolute_import + import os import numpy as np from optparse import OptionParser @@ -33,7 +36,7 @@ def main(): try: data[key] = np.loadtxt(fn) except ValueError: - print "Failed to load", fn + print("Failed to load", fn) savez_compress(outp, **data) @@ -44,7 +47,7 @@ def savez_compress(file, *args, **kwds): # Import deferred for startup time improvement import tempfile - if isinstance(file, basestring): + if isinstance(file, str): if not file.endswith('.npz'): file = file + '.npz' @@ -61,7 +64,7 @@ def savez_compress(file, *args, **kwds): fd, tmpfile = tempfile.mkstemp(suffix='-numpy.npy') os.close(fd) try: - for key, val in namedict.iteritems(): + for key, val in namedict.items(): fname = key + '.npy' fid = open(tmpfile, 'wb') try: diff --git a/scipy/stats/__init__.py b/scipy/stats/__init__.py index 1310b27f20b2..77c5404ab4a2 100644 --- a/scipy/stats/__init__.py +++ b/scipy/stats/__init__.py @@ -324,18 +324,19 @@ interface package rpy. """ +from __future__ import division, print_function, absolute_import -from stats import * -from distributions import * -from rv import * -from morestats import * -from _binned_statistic import * -from kde import gaussian_kde -import mstats -from contingency import chi2_contingency +from .stats import * +from .distributions import * +from .rv import * +from .morestats import * +from ._binned_statistic import * +from .kde import gaussian_kde +from . import mstats +from .contingency import chi2_contingency #remove vonmises_cython from __all__, I don't know why it is included -__all__ = filter(lambda s:not (s.startswith('_') or s.endswith('cython')),dir()) +__all__ = [s for s in dir() if not (s.startswith('_') or s.endswith('cython'))] from numpy.testing import Tester test = Tester().test diff --git a/scipy/stats/_binned_statistic.py b/scipy/stats/_binned_statistic.py index ec85cb380ec9..6e2480f39d57 100644 --- a/scipy/stats/_binned_statistic.py +++ b/scipy/stats/_binned_statistic.py @@ -1,5 +1,7 @@ -import numpy as np +from __future__ import division, print_function, absolute_import +import numpy as np +from scipy.lib.six import callable def binned_statistic(x, values, statistic='mean', bins=10, range=None): diff --git a/scipy/stats/_support.py b/scipy/stats/_support.py index 122cd9051046..49145c6bdd50 100644 --- a/scipy/stats/_support.py +++ b/scipy/stats/_support.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from numpy import asarray import numpy as np import copy @@ -63,7 +65,7 @@ def unique(inarray): for item in inarray[1:]: newflag = 1 for unq in uniques: # NOTE: cmp --> 0=same, -1=<, 1=> - test = np.sum(abs(np.array(map(cmp,item,unq))),axis=0) + test = np.sum(abs(np.array(list(map(cmp,item,unq)))),axis=0) if test == 0: # if item identical to any 1 row in uniques newflag = 0 # then not a novel item to add break @@ -175,7 +177,7 @@ def collapse(a, keepcols, collapsecols, stderr=0, ns=0, cfcn=None): """ if cfcn is None: - cfcn = lambda(x): np.mean(x, axis=0) + cfcn = lambda x: np.mean(x, axis=0) a = asarray(a) if keepcols == []: avgcol = colex(a,collapsecols) diff --git a/scipy/stats/_tukeylambda_stats.py b/scipy/stats/_tukeylambda_stats.py index 312a1085fdf5..5916fff6c54b 100644 --- a/scipy/stats/_tukeylambda_stats.py +++ b/scipy/stats/_tukeylambda_stats.py @@ -1,4 +1,4 @@ - +from __future__ import division, print_function, absolute_import import numpy as np from numpy import array, poly1d diff --git a/scipy/stats/contingency.py b/scipy/stats/contingency.py index db82c00961b3..664e43eac68d 100644 --- a/scipy/stats/contingency.py +++ b/scipy/stats/contingency.py @@ -3,8 +3,12 @@ # Author: Warren Weckesser, Enthought, Inc. +from __future__ import division, print_function, absolute_import + +from scipy.lib.six.moves import xrange import numpy as np from scipy import special +from functools import reduce __all__ = ['margins', 'expected_freq', 'chi2_contingency'] @@ -52,7 +56,7 @@ def margins(a): array([[[60, 66, 72, 78]]]) """ margsums = [] - ranged = range(a.ndim) + ranged = list(range(a.ndim)) for k in ranged: marg = np.apply_over_axes(np.sum, a, [j for j in ranged if j != k]) margsums.append(marg) diff --git a/scipy/stats/distributions.py b/scipy/stats/distributions.py index ce5a2720473d..135cf6462719 100644 --- a/scipy/stats/distributions.py +++ b/scipy/stats/distributions.py @@ -4,10 +4,12 @@ # Author: Travis Oliphant 2002-2011 with contributions from # SciPy Developers 2004-2011 # +from __future__ import division, print_function, absolute_import import math import warnings -from copy import copy + +from scipy.lib.six import callable, string_types, text_type, get_method_function from scipy.misc import comb, derivative from scipy import special @@ -27,8 +29,8 @@ import numpy as np import numpy.random as mtrand from numpy import flatnonzero as nonzero -import vonmises_cython -from _tukeylambda_stats import tukeylambda_variance as _tlvar, \ +from . import vonmises_cython +from ._tukeylambda_stats import tukeylambda_variance as _tlvar, \ tukeylambda_kurtosis as _tlkurt __all__ = [ @@ -983,9 +985,9 @@ def __init__(self, momtype=1, a=None, b=None, xa=None, xb=None, if not hasattr(self,'numargs'): #allows more general subclassing with *args - cdf_signature = inspect.getargspec(self._cdf.im_func) + cdf_signature = inspect.getargspec(get_method_function(self._cdf)) numargs1 = len(cdf_signature[0]) - 2 - pdf_signature = inspect.getargspec(self._pdf.im_func) + pdf_signature = inspect.getargspec(get_method_function(self._pdf)) numargs2 = len(pdf_signature[0]) - 2 self.numargs = max(numargs1, numargs2) #nin correction @@ -1051,7 +1053,7 @@ def _construct_doc(self): self.__doc__ = doccer.docformat(self.__doc__, tempdict) def _ppf_to_solve(self, x, q,*args): - return apply(self.cdf, (x, )+args)-q + return self.cdf(*(x, )+args)-q def _ppf_single_call(self, q, *args): left = right = None @@ -1529,7 +1531,7 @@ def stats(self,*args,**kwds): args = tuple(map(asarray,args)) cond = self._argcheck(*args) & (scale > 0) & (loc==loc) - signature = inspect.getargspec(self._stats.im_func) + signature = inspect.getargspec(get_method_function(self._stats)) if (signature[2] is not None) or ('moments' in signature[0]): mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments}) else: @@ -1631,7 +1633,7 @@ def moment(self, n, *args, **kwds): if (n < 0): raise ValueError("Moment must be positive.") mu, mu2, g1, g2 = None, None, None, None if (n > 0) and (n < 5): - signature = inspect.getargspec(self._stats.im_func) + signature = inspect.getargspec(get_method_function(self._stats)) if (signature[2] is not None) or ('moments' in signature[0]): mdict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]} else: @@ -1687,11 +1689,11 @@ def _reduce_func(self, args, kwds): args = list(args) Nargs = len(args) fixedn = [] - index = range(Nargs) + index = list(range(Nargs)) names = ['f%d' % n for n in range(Nargs - 2)] + ['floc', 'fscale'] x0 = [] for n, key in zip(index, names): - if kwds.has_key(key): + if key in kwds: fixedn.append(n) args[n] = kwds[key] else: @@ -1771,8 +1773,8 @@ def fit(self, data, *args, **kwds): if Narg > self.numargs: raise ValueError("Too many input arguments.") start = [None]*2 - if (Narg < self.numargs) or not (kwds.has_key('loc') and - kwds.has_key('scale')): + if (Narg < self.numargs) or not ('loc' in kwds and + 'scale' in kwds): start = self._fitstart(data) # get distribution specific starting locations args += start[Narg:-2] loc = kwds.get('loc', start[-2]) @@ -1782,7 +1784,7 @@ def fit(self, data, *args, **kwds): optimizer = kwds.get('optimizer', optimize.fmin) # convert string to function in scipy.optimize - if not callable(optimizer) and isinstance(optimizer, (str, unicode)): + if not callable(optimizer) and isinstance(optimizer, (text_type,) + string_types): if not optimizer.startswith('fmin_'): optimizer = "fmin_"+optimizer if optimizer == 'fmin_': @@ -5408,7 +5410,7 @@ def _drv2_ppfsingle(self, q, *args): # Use basic bisection algorithm def reverse_dict(dict): newdict = {} - sorted_keys = copy(dict.keys()) + sorted_keys = list(dict.keys()) sorted_keys.sort() for key in sorted_keys[::-1]: newdict[dict[key]] = key @@ -5635,9 +5637,9 @@ def __init__(self, a=0, b=inf, name=None, badvalue=None, self, rv_discrete) self.numargs=0 else: - cdf_signature = inspect.getargspec(self._cdf.im_func) + cdf_signature = inspect.getargspec(get_method_function(self._cdf)) numargs1 = len(cdf_signature[0]) - 2 - pmf_signature = inspect.getargspec(self._pmf.im_func) + pmf_signature = inspect.getargspec(get_method_function(self._pmf)) numargs2 = len(pmf_signature[0]) - 2 self.numargs = max(numargs1, numargs2) @@ -6144,7 +6146,7 @@ def stats(self, *args, **kwds): args = tuple(map(asarray,args)) cond = self._argcheck(*args) & (loc==loc) - signature = inspect.getargspec(self._stats.im_func) + signature = inspect.getargspec(get_method_function(self._stats)) if (signature[2] is not None) or ('moments' in signature[0]): mu, mu2, g1, g2 = self._stats(*args,**{'moments':moments}) else: @@ -6239,7 +6241,7 @@ def moment(self, n, *args, **kwds): # Non-central moments in standard form. if (n < 0): raise ValueError("Moment must be positive.") mu, mu2, g1, g2 = None, None, None, None if (n > 0) and (n < 5): - signature = inspect.getargspec(self._stats.im_func) + signature = inspect.getargspec(get_method_function(self._stats)) if (signature[2] is not None) or ('moments' in signature[0]): dict = {'moments':{1:'m',2:'v',3:'vs',4:'vk'}[n]} else: @@ -6288,7 +6290,7 @@ def entropy(self, *args, **kwds): loc= kwds.get('loc') args, loc = self._fix_loc(args, loc) loc = asarray(loc) - args = map(asarray,args) + args = list(map(asarray,args)) cond0 = self._argcheck(*args) & (loc==loc) output = zeros(shape(cond0),'d') place(output,(1-cond0),self.badvalue) @@ -6406,7 +6408,7 @@ def fun(x): count += 1 if count > maxcount: # fixme: replace with proper warning - print 'sum did not converge' + print('sum did not converge') return tot/invfac diff --git a/scipy/stats/kde.py b/scipy/stats/kde.py index 47cecfa75239..2af96a8d2420 100644 --- a/scipy/stats/kde.py +++ b/scipy/stats/kde.py @@ -17,10 +17,13 @@ # #------------------------------------------------------------------------------- +from __future__ import division, print_function, absolute_import + # Standard library imports. import warnings # Scipy imports. +from scipy.lib.six import callable, string_types from scipy import linalg, special from numpy import atleast_2d, reshape, zeros, newaxis, dot, exp, pi, sqrt, \ ravel, power, atleast_1d, squeeze, sum, transpose @@ -28,8 +31,9 @@ from numpy.random import randint, multivariate_normal # Local imports. -import stats -import mvn +from . import stats +from . import mvn +import collections __all__ = ['gaussian_kde'] @@ -475,7 +479,7 @@ def set_bandwidth(self, bw_method=None): self.covariance_factor = self.scotts_factor elif bw_method == 'silverman': self.covariance_factor = self.silverman_factor - elif np.isscalar(bw_method) and not isinstance(bw_method, basestring): + elif np.isscalar(bw_method) and not isinstance(bw_method, string_types): self._bw_method = 'use constant' self.covariance_factor = lambda: bw_method elif callable(bw_method): diff --git a/scipy/stats/morestats.py b/scipy/stats/morestats.py index a676e8a5fe91..bf9d883e396d 100644 --- a/scipy/stats/morestats.py +++ b/scipy/stats/morestats.py @@ -2,15 +2,16 @@ # # Further updates and enhancements by many SciPy developers. # +from __future__ import division, print_function, absolute_import import math import types import warnings -import statlib -import stats -from stats import find_repeats -import distributions +from . import statlib +from . import stats +from .stats import find_repeats +from . import distributions from numpy import isscalar, r_, log, sum, around, unique, asarray from numpy import zeros, arange, sort, amin, amax, any, where, \ atleast_1d, sqrt, ceil, floor, array, poly1d, compress, not_equal, \ @@ -332,7 +333,7 @@ def probplot(x, sparams=(), dist='norm', fit=True, plot=None): sparams = () if isscalar(sparams): sparams = (sparams,) - if not isinstance(sparams, types.TupleType): + if not isinstance(sparams, tuple): sparams = tuple(sparams) """ res = inspect.getargspec(ppf_func) @@ -1265,7 +1266,7 @@ def wilcoxon(x,y=None): d = x else: x, y = map(asarray, (x, y)) - if len(x) <> len(y): + if len(x) != len(y): raise ValueError('Unequal N in wilcoxon. Aborting.') d = x-y d = compress(not_equal(d,0),d,axis=-1) # Keep all non-zero differences diff --git a/scipy/stats/mstats.py b/scipy/stats/mstats.py index 7d8c58ffa3d5..64028cfa93a6 100644 --- a/scipy/stats/mstats.py +++ b/scipy/stats/mstats.py @@ -76,6 +76,7 @@ zscore """ +from __future__ import division, print_function, absolute_import -from mstats_basic import * -from mstats_extras import * +from .mstats_basic import * +from .mstats_extras import * diff --git a/scipy/stats/mstats_basic.py b/scipy/stats/mstats_basic.py index 7fc663e9794d..d47eb6ae4ab2 100644 --- a/scipy/stats/mstats_basic.py +++ b/scipy/stats/mstats_basic.py @@ -8,6 +8,8 @@ #TODO : ttest_reel looks botched: what are x1,x2,v1,v2 for ? #TODO : reimplement ksonesamp +from __future__ import division, print_function, absolute_import + __author__ = "Pierre GF Gerard-Marchant" __docformat__ = "restructuredtext en" @@ -42,16 +44,18 @@ import numpy.ma as ma from numpy.ma import MaskedArray, masked, nomask +from scipy.lib.six import iteritems + import itertools import warnings #import scipy.stats as stats -import stats +from . import stats import scipy.special as special import scipy.misc as misc #import scipy.stats.futil as futil -import futil +from . import futil genmissingvaldoc = """ Notes @@ -419,8 +423,8 @@ def spearmanr(x, y, use_ties=True): if use_ties: xties = count_tied_groups(x) yties = count_tied_groups(y) - corr_x = np.sum(v*k*(k**2-1) for (k,v) in xties.iteritems())/12. - corr_y = np.sum(v*k*(k**2-1) for (k,v) in yties.iteritems())/12. + corr_x = np.sum(v*k*(k**2-1) for (k,v) in iteritems(xties))/12. + corr_y = np.sum(v*k*(k**2-1) for (k,v) in iteritems(yties))/12. else: corr_x = corr_y = 0 denom = n*(n**2 - 1)/6. @@ -482,8 +486,8 @@ def kendalltau(x, y, use_ties=True, use_missing=False): if use_ties: xties = count_tied_groups(x) yties = count_tied_groups(y) - corr_x = np.sum([v*k*(k-1) for (k,v) in xties.iteritems()], dtype=float) - corr_y = np.sum([v*k*(k-1) for (k,v) in yties.iteritems()], dtype=float) + corr_x = np.sum([v*k*(k-1) for (k,v) in iteritems(xties)], dtype=float) + corr_y = np.sum([v*k*(k-1) for (k,v) in iteritems(yties)], dtype=float) denom = ma.sqrt((n*(n-1)-corr_x)/2. * (n*(n-1)-corr_y)/2.) else: denom = n*(n-1)/2. @@ -491,15 +495,15 @@ def kendalltau(x, y, use_ties=True, use_missing=False): # var_s = n*(n-1)*(2*n+5) if use_ties: - var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in xties.iteritems()) - var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in yties.iteritems()) - v1 = np.sum([v*k*(k-1) for (k, v) in xties.iteritems()], dtype=float) *\ - np.sum([v*k*(k-1) for (k, v) in yties.iteritems()], dtype=float) + var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(xties)) + var_s -= np.sum(v*k*(k-1)*(2*k+5)*1. for (k,v) in iteritems(yties)) + v1 = np.sum([v*k*(k-1) for (k, v) in iteritems(xties)], dtype=float) *\ + np.sum([v*k*(k-1) for (k, v) in iteritems(yties)], dtype=float) v1 /= 2.*n*(n-1) if n > 2: - v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in xties.iteritems()], + v2 = np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(xties)], dtype=float) * \ - np.sum([v*k*(k-1)*(k-2) for (k,v) in yties.iteritems()], + np.sum([v*k*(k-1)*(k-2) for (k,v) in iteritems(yties)], dtype=float) v2 /= 9.*n*(n-1)*(n-2) else: @@ -531,7 +535,7 @@ def kendalltau_seasonal(x): # n_tot = x.count() ties = count_tied_groups(x.compressed()) - corr_ties = np.sum(v*k*(k-1) for (k,v) in ties.iteritems()) + corr_ties = np.sum(v*k*(k-1) for (k,v) in iteritems(ties)) denom_tot = ma.sqrt(1.*n_tot*(n_tot-1)*(n_tot*(n_tot-1)-corr_ties))/2. # R = rankdata(x, axis=0, use_missing=True) @@ -541,7 +545,7 @@ def kendalltau_seasonal(x): denom_szn = ma.empty(m, dtype=float) for j in range(m): ties_j = count_tied_groups(x[:,j].compressed()) - corr_j = np.sum(v*k*(k-1) for (k,v) in ties_j.iteritems()) + corr_j = np.sum(v*k*(k-1) for (k,v) in iteritems(ties_j)) cmb = n_p[j]*(n_p[j]-1) for k in range(j,m,1): K[j,k] = np.sum(msign((x[i:,j]-x[i,j])*(x[i:,k]-x[i,k])).sum() @@ -698,8 +702,8 @@ def theilslopes(y, x=None, alpha=0.05): (xties, yties) = (count_tied_groups(x), count_tied_groups(y)) nt = ny*(ny-1)/2. sigsq = (ny*(ny-1)*(2*ny+5)/18.) - sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in xties.iteritems()) - sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in yties.iteritems()) + sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in iteritems(xties)) + sigsq -= np.sum(v*k*(k-1)*(2*k+5) for (k,v) in iteritems(yties)) sigma = np.sqrt(sigsq) Ru = min(np.round((nt - z*sigma)/2. + 1), len(slopes)-1) @@ -811,7 +815,7 @@ def mannwhitneyu(x,y, use_continuity=True): mu = (nx*ny)/2. sigsq = (nt**3 - nt)/12. ties = count_tied_groups(ranks) - sigsq -= np.sum(v*(k**3-k) for (k,v) in ties.iteritems())/12. + sigsq -= np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/12. sigsq *= nx*ny/float(nt*(nt-1)) # if use_continuity: @@ -833,7 +837,7 @@ def kruskalwallis(*args): H = 12./(ntot*(ntot+1)) * (sumrk**2/ngrp).sum() - 3*(ntot+1) # Tie correction ties = count_tied_groups(ranks) - T = 1. - np.sum(v*(k**3-k) for (k,v) in ties.iteritems())/float(ntot**3-ntot) + T = 1. - np.sum(v*(k**3-k) for (k,v) in iteritems(ties))/float(ntot**3-ntot) if T == 0: raise ValueError('All numbers are identical in kruskal') H /= T diff --git a/scipy/stats/mstats_extras.py b/scipy/stats/mstats_extras.py index 3c8d259221e7..47daa1b6ebe1 100644 --- a/scipy/stats/mstats_extras.py +++ b/scipy/stats/mstats_extras.py @@ -6,6 +6,8 @@ :date: $Date: 2007-10-29 17:18:13 +0200 (Mon, 29 Oct 2007) $ :version: $Id: morestats.py 3473 2007-10-29 15:18:13Z jarrod.millman $ """ +from __future__ import division, print_function, absolute_import + __author__ = "Pierre GF Gerard-Marchant" __docformat__ = "restructuredtext en" @@ -23,7 +25,7 @@ import numpy.ma as ma from numpy.ma import MaskedArray -import mstats_basic as mstats +from . import mstats_basic as mstats from scipy.stats.distributions import norm, beta, t, binom @@ -150,8 +152,8 @@ def _hdsd_1D(data,prob): for (i,p) in enumerate(prob): _w = betacdf(vv, (n+1)*p, (n+1)*(1-p)) w = _w[1:] - _w[:-1] - mx_ = np.fromiter([np.dot(w,xsorted[np.r_[range(0,k), - range(k+1,n)].astype(int_)]) + mx_ = np.fromiter([np.dot(w,xsorted[np.r_[list(range(0,k)), + list(range(k+1,n))].astype(int_)]) for k in range(n)], dtype=float_) mx_var = np.array(mx_.var(), copy=False, ndmin=1) * n / float(n-1) hdsd[i] = float(n-1) * np.sqrt(np.diag(mx_var).diagonal() / float(n)) diff --git a/scipy/stats/rv.py b/scipy/stats/rv.py index 98c8a6842adc..592991ef38e3 100644 --- a/scipy/stats/rv.py +++ b/scipy/stats/rv.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import from numpy import vectorize from numpy.random import random_sample @@ -22,7 +23,7 @@ def randwppf(ppf, args=(), size=None): """ U = random_sample(size=size) - return apply(ppf, (U,)+args) + return ppf(*(U,)+args) def randwcdf(cdf, mean=1.0, args=(), size=None): """returns an array of randomly distributed integers of a distribution @@ -43,4 +44,4 @@ def _ppf(q, *nargs): _vppf = vectorize(_ppf) U = random_sample(size=size) - return apply(_vppf,(U,)+args) + return _vppf(*(U,)+args) diff --git a/scipy/stats/setup.py b/scipy/stats/setup.py index 1ada8bec7b0f..45b090837033 100755 --- a/scipy/stats/setup.py +++ b/scipy/stats/setup.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/stats/setupscons.py b/scipy/stats/setupscons.py index 91ca83c11e60..00194175b533 100755 --- a/scipy/stats/setupscons.py +++ b/scipy/stats/setupscons.py @@ -1,4 +1,5 @@ #!/usr/bin/env python +from __future__ import division, print_function, absolute_import from os.path import join diff --git a/scipy/stats/stats.py b/scipy/stats/stats.py index 045f6b721f10..48b5415cab8a 100644 --- a/scipy/stats/stats.py +++ b/scipy/stats/stats.py @@ -181,26 +181,31 @@ ## changed name of skewness and askewness to skew and askew ## fixed (a)histogram (which sometimes counted points n: + if len(args[i]) != n: raise ValueError('Unequal N in friedmanchisquare. Aborting.') # Rank data - data = apply(_support.abut,args) + data = _support.abut(*args) data = data.astype(float) for i in range(len(data)): data[i] = rankdata(data[i]) diff --git a/scipy/stats/tests/test_binned_statistic.py b/scipy/stats/tests/test_binned_statistic.py index 7040a4e0e89f..bbb796c3b1f4 100644 --- a/scipy/stats/tests/test_binned_statistic.py +++ b/scipy/stats/tests/test_binned_statistic.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np from numpy.testing import assert_array_almost_equal from scipy.stats import \ diff --git a/scipy/stats/tests/test_contingency.py b/scipy/stats/tests/test_contingency.py index 94942d0645ab..8f13592b9991 100644 --- a/scipy/stats/tests/test_contingency.py +++ b/scipy/stats/tests/test_contingency.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import run_module_suite, assert_equal, assert_array_equal, \ diff --git a/scipy/stats/tests/test_continuous_basic.py b/scipy/stats/tests/test_continuous_basic.py index c2bf5b899c21..d1c0b7c965c9 100644 --- a/scipy/stats/tests/test_continuous_basic.py +++ b/scipy/stats/tests/test_continuous_basic.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import warnings import numpy.testing as npt diff --git a/scipy/stats/tests/test_continuous_extra.py b/scipy/stats/tests/test_continuous_extra.py index 391f7653c56f..4049c8bec4bb 100644 --- a/scipy/stats/tests/test_continuous_extra.py +++ b/scipy/stats/tests/test_continuous_extra.py @@ -4,6 +4,7 @@ # run it, # 6 distributions return nan for entropy # truncnorm fails by design for private method _ppf test +from __future__ import division, print_function, absolute_import import numpy.testing as npt diff --git a/scipy/stats/tests/test_discrete_basic.py b/scipy/stats/tests/test_discrete_basic.py index 66b4b2b3d043..d47d93ae4a9c 100644 --- a/scipy/stats/tests/test_discrete_basic.py +++ b/scipy/stats/tests/test_discrete_basic.py @@ -1,6 +1,9 @@ +from __future__ import division, print_function, absolute_import + import numpy.testing as npt import numpy as np import nose +from scipy.lib.six.moves import xrange from scipy import stats @@ -134,7 +137,7 @@ def check_ppf_ppf(distfn, arg): def check_pmf_cdf(distfn, arg, msg): startind = np.int(distfn._ppf(0.01,*arg)-1) - index = range(startind,startind+10) + index = list(range(startind,startind+10)) cdfs = distfn.cdf(index,*arg) npt.assert_almost_equal(cdfs, distfn.pmf(index, *arg).cumsum() + \ cdfs[0] - distfn.pmf(index[0],*arg), diff --git a/scipy/stats/tests/test_distributions.py b/scipy/stats/tests/test_distributions.py index f8cb0dfeb30b..a4c44667d4a4 100644 --- a/scipy/stats/tests/test_distributions.py +++ b/scipy/stats/tests/test_distributions.py @@ -1,6 +1,7 @@ """ Test functions for stats module """ +from __future__ import division, print_function, absolute_import from numpy.testing import TestCase, run_module_suite, assert_equal, \ assert_array_equal, assert_almost_equal, assert_array_almost_equal, \ @@ -106,7 +107,7 @@ def test_rvs(self): assert_(vals.dtype.char in typecodes['AllInteger']) val = stats.randint.rvs(15,46) assert_((val >= 15) & (val < 46)) - assert_(isinstance(val, numpy.ScalarType), msg=`type(val)`) + assert_(isinstance(val, numpy.ScalarType), msg=repr(type(val))) val = stats.randint(15,46).rvs(3) assert_(val.dtype.char in typecodes['AllInteger']) @@ -921,7 +922,7 @@ def test_norm_logcdf(): This precision was enhanced in ticket 1614. """ - x = -np.asarray(range(0, 120, 4)) + x = -np.asarray(list(range(0, 120, 4))) # Values from R expected = [-0.69314718, -10.36010149, -35.01343716, -75.41067300, -131.69539607, -203.91715537, -292.09872100, -396.25241451, diff --git a/scipy/stats/tests/test_fit.py b/scipy/stats/tests/test_fit.py index 2f7e8acb6da4..13c15bfa324d 100644 --- a/scipy/stats/tests/test_fit.py +++ b/scipy/stats/tests/test_fit.py @@ -5,7 +5,7 @@ # with current parameters: relatively small sample size, default starting values # Ran 84 tests in 401.797s # FAILED (failures=15) - +from __future__ import division, print_function, absolute_import import numpy.testing as npt import numpy as np diff --git a/scipy/stats/tests/test_kdeoth.py b/scipy/stats/tests/test_kdeoth.py index 13fbf58c9b84..eba92c9835db 100644 --- a/scipy/stats/tests/test_kdeoth.py +++ b/scipy/stats/tests/test_kdeoth.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + from scipy import stats import numpy as np from numpy.testing import assert_almost_equal, assert_, assert_raises, \ diff --git a/scipy/stats/tests/test_morestats.py b/scipy/stats/tests/test_morestats.py index 62ba93dd6f6a..be04997adf7a 100644 --- a/scipy/stats/tests/test_morestats.py +++ b/scipy/stats/tests/test_morestats.py @@ -2,6 +2,7 @@ # # Further enhancements and tests added by numerous SciPy developers. # +from __future__ import division, print_function, absolute_import import warnings diff --git a/scipy/stats/tests/test_mstats_basic.py b/scipy/stats/tests/test_mstats_basic.py index eb6432287707..cb24c8f35b46 100644 --- a/scipy/stats/tests/test_mstats_basic.py +++ b/scipy/stats/tests/test_mstats_basic.py @@ -1,6 +1,7 @@ """ Tests for the stats.mstats module (support for maskd arrays) """ +from __future__ import division, print_function, absolute_import import numpy as np diff --git a/scipy/stats/tests/test_mstats_extras.py b/scipy/stats/tests/test_mstats_extras.py index 2fe5d7235864..8aed03c0cb6a 100644 --- a/scipy/stats/tests/test_mstats_extras.py +++ b/scipy/stats/tests/test_mstats_extras.py @@ -4,6 +4,8 @@ :author: Pierre Gerard-Marchant :contact: pierregm_at_uga_dot_edu """ +from __future__ import division, print_function, absolute_import + __author__ = "Pierre GF Gerard-Marchant ($Author: backtopop $)" import numpy as np diff --git a/scipy/stats/tests/test_rank.py b/scipy/stats/tests/test_rank.py index 8b0b65b493cc..2e8362dc8123 100644 --- a/scipy/stats/tests/test_rank.py +++ b/scipy/stats/tests/test_rank.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import TestCase, run_module_suite, assert_equal, \ diff --git a/scipy/stats/tests/test_stats.py b/scipy/stats/tests/test_stats.py index b63617698f00..6adf29e2641e 100644 --- a/scipy/stats/tests/test_stats.py +++ b/scipy/stats/tests/test_stats.py @@ -6,6 +6,7 @@ Additional tests by a host of SciPy developers. """ +from __future__ import division, print_function, absolute_import import sys @@ -1015,19 +1016,19 @@ def test_fraction(self): scoreatperc = stats.scoreatpercentile # Test defaults - assert_equal(scoreatperc(range(10), 50), 4.5) - assert_equal(scoreatperc(range(10), 50, (2,7)), 4.5) - assert_equal(scoreatperc(range(100), 50, limit=(1, 8)), 4.5) + assert_equal(scoreatperc(list(range(10)), 50), 4.5) + assert_equal(scoreatperc(list(range(10)), 50, (2,7)), 4.5) + assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8)), 4.5) assert_equal(scoreatperc(np.array([1, 10 ,100]), 50, (10,100)), 55) assert_equal(scoreatperc(np.array([1, 10 ,100]), 50, (1,10)), 5.5) # explicitly specify interpolation_method 'fraction' (the default) - assert_equal(scoreatperc(range(10), 50, interpolation_method='fraction'), + assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='fraction'), 4.5) - assert_equal(scoreatperc(range(10), 50, limit=(2, 7), + assert_equal(scoreatperc(list(range(10)), 50, limit=(2, 7), interpolation_method='fraction'), 4.5) - assert_equal(scoreatperc(range(100), 50, limit=(1, 8), + assert_equal(scoreatperc(list(range(100)), 50, limit=(1, 8), interpolation_method='fraction'), 4.5) assert_equal(scoreatperc(np.array([1, 10 ,100]), 50, (10, 100), @@ -1041,17 +1042,17 @@ def test_lower_higher(self): scoreatperc = stats.scoreatpercentile # interpolation_method 'lower'/'higher' - assert_equal(scoreatperc(range(10), 50, + assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='lower'), 4) - assert_equal(scoreatperc(range(10), 50, + assert_equal(scoreatperc(list(range(10)), 50, interpolation_method='higher'), 5) - assert_equal(scoreatperc(range(10), 50, (2,7), + assert_equal(scoreatperc(list(range(10)), 50, (2,7), interpolation_method='lower'), 4) - assert_equal(scoreatperc(range(10), 50, limit=(2,7), + assert_equal(scoreatperc(list(range(10)), 50, limit=(2,7), interpolation_method='higher'), 5) - assert_equal(scoreatperc(range(100), 50, (1,8), + assert_equal(scoreatperc(list(range(100)), 50, (1,8), interpolation_method='lower'), 4) - assert_equal(scoreatperc(range(100), 50, (1,8), + assert_equal(scoreatperc(list(range(100)), 50, (1,8), interpolation_method='higher'), 5) assert_equal(scoreatperc(np.array([1, 10 ,100]), 50, (10,100), interpolation_method='lower'), 10) diff --git a/scipy/stats/tests/test_tukeylambda_stats.py b/scipy/stats/tests/test_tukeylambda_stats.py index 282f336dfe48..a76dbd0e46d5 100644 --- a/scipy/stats/tests/test_tukeylambda_stats.py +++ b/scipy/stats/tests/test_tukeylambda_stats.py @@ -1,3 +1,4 @@ +from __future__ import division, print_function, absolute_import import numpy as np from numpy.testing import assert_allclose, assert_equal diff --git a/scipy/stats/vonmises.py b/scipy/stats/vonmises.py index 555062bff41c..1e344825c7a0 100644 --- a/scipy/stats/vonmises.py +++ b/scipy/stats/vonmises.py @@ -1,3 +1,5 @@ +from __future__ import division, print_function, absolute_import + import numpy as np import scipy.stats from scipy.special import i0 diff --git a/setup.py b/setup.py index 829b3726b77e..bdb733b9a5de 100755 --- a/setup.py +++ b/setup.py @@ -144,64 +144,21 @@ def configuration(parent_package='',top_path=None): def setup_package(): from numpy.distutils.core import setup - old_path = os.getcwd() - local_path = os.path.dirname(os.path.abspath(sys.argv[0])) - src_path = local_path - if sys.version_info[0] == 3: - src_path = os.path.join(local_path, 'build', 'py3k') - sys.path.insert(0, os.path.join(local_path, 'tools')) - import py3tool - print("Converting to Python3 via 2to3...") - py3tool.sync_2to3('scipy', os.path.join(src_path, 'scipy')) - - site_cfg = os.path.join(local_path, 'site.cfg') - if os.path.isfile(site_cfg): - shutil.copy(site_cfg, src_path) - - # Ugly hack to make pip work with Python 3 - # Explanation: pip messes with __file__ which interacts badly with the - # change in directory due to the 2to3 conversion. Therefore we restore - # __file__ to what it would have been otherwise. - - global __file__ - __file__ = os.path.join(os.curdir, os.path.basename(__file__)) - if '--egg-base' in sys.argv: - # Change pip-egg-info entry to absolute path, so pip can find it - # after changing directory. - idx = sys.argv.index('--egg-base') - if sys.argv[idx + 1] == 'pip-egg-info': - sys.argv[idx + 1] = os.path.join(local_path, 'pip-egg-info') - - os.chdir(local_path) - sys.path.insert(0, local_path) - sys.path.insert(0, os.path.join(local_path, 'scipy')) # to retrieve version - - # Run build - old_path = os.getcwd() - os.chdir(src_path) - sys.path.insert(0, src_path) - # Rewrite the version file everytime write_version_py() - try: - setup( - name = 'scipy', - maintainer = "SciPy Developers", - maintainer_email = "scipy-dev@scipy.org", - description = DOCLINES[0], - long_description = "\n".join(DOCLINES[2:]), - url = "http://www.scipy.org", - download_url = "http://sourceforge.net/project/showfiles.php?group_id=27747&package_id=19531", - license = 'BSD', - classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], - platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], - configuration=configuration ) - finally: - del sys.path[0] - os.chdir(old_path) - - return + setup( + name = 'scipy', + maintainer = "SciPy Developers", + maintainer_email = "scipy-dev@scipy.org", + description = DOCLINES[0], + long_description = "\n".join(DOCLINES[2:]), + url = "http://www.scipy.org", + download_url = "http://sourceforge.net/project/showfiles.php?group_id=27747&package_id=19531", + license = 'BSD', + classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], + platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], + configuration=configuration) if __name__ == '__main__': diff --git a/setupegg.py b/setupegg.py index 31e8b3aed593..92dc50006310 100755 --- a/setupegg.py +++ b/setupegg.py @@ -4,4 +4,4 @@ """ from setuptools import setup -execfile('setup.py') +exec(compile(open('setup.py').read(), 'setup.py', 'exec')) diff --git a/setupeggscons.py b/setupeggscons.py index 2baae18674c9..8e9908952abe 100755 --- a/setupeggscons.py +++ b/setupeggscons.py @@ -4,4 +4,4 @@ """ from setuptools import setup -execfile('setupscons.py') +exec(compile(open('setupscons.py').read(), 'setupscons.py', 'exec')) diff --git a/setupscons.py b/setupscons.py index 35c230abf14a..eae0e70c781f 100755 --- a/setupscons.py +++ b/setupscons.py @@ -97,7 +97,7 @@ def setup_package(): url = "http://www.scipy.org", download_url = "http://sourceforge.net/project/showfiles.php?group_id=27747&package_id=19531", license = 'BSD', - classifiers=filter(None, CLASSIFIERS.split('\n')), + classifiers=[_f for _f in CLASSIFIERS.split('\n') if _f], platforms = ["Windows", "Linux", "Solaris", "Mac OS-X", "Unix"], configuration=configuration ) finally: diff --git a/setupsconsegg.py b/setupsconsegg.py index 2baae18674c9..8e9908952abe 100755 --- a/setupsconsegg.py +++ b/setupsconsegg.py @@ -4,4 +4,4 @@ """ from setuptools import setup -execfile('setupscons.py') +exec(compile(open('setupscons.py').read(), 'setupscons.py', 'exec')) diff --git a/tools/authors.py b/tools/authors.py index bc07b5517ac2..7e65a8fef942 100644 --- a/tools/authors.py +++ b/tools/authors.py @@ -8,47 +8,61 @@ """ # Author: Pauli Virtanen . This script is in the public domain. +from __future__ import division, print_function, absolute_import + from subprocess import Popen, PIPE, call import tempfile import optparse import re +import sys import os import subprocess +try: + from scipy.lib.six import u, PY3 +except ImportError: + sys.path.insert(0, os.path.join(os.path.dirname(__file__), + os.pardir, 'scipy', 'lib')) + from six import u, PY3 +if PY3: + stdout_b = sys.stdout.buffer +else: + stdout_b = sys.stdout + NAME_MAP = { - u'87': u'Han Genuit', - u'aarchiba': u'Anne Archibald', - u'ArmstrongJ': u'Jeff Armstrong', - u'cgholke': u'Christoph Gohlke', - u'cgohlke': u'Christoph Gohlke', - u'chris.burns': u'Chris Burns', - u'Christolph Gohlke': u'Christoph Gohlke', - u'ckuster': u'Christopher Kuster', - u'Collin Stocks': u'Collin RM Stocks', - u'Derek Homeir': u'Derek Homeier', - u'Derek Homier': u'Derek Homeier', - u'dhuard': u'David Huard', - u'dsimcha': u'David Simcha', - u'edschofield': u'Ed Schofield', - u'Gael varoquaux': u'Gaël Varoquaux', - u'gotgenes': u'Chris Lasher', - u'Han': u'Han Genuit', - u'Jake Vanderplas': u'Jacob Vanderplas', - u'josef': u'Josef Perktold', - u'josef-pktd': u'Josef Perktold', - u'Mark': u'Mark Wiebe', - u'mdroe': u'Michael Droettboom', - u'pierregm': u'Pierre GM', - u'rgommers': u'Ralf Gommers', - u'sebhaase': u'Sebastian Haase', - u'Travis E. Oliphant': u'Travis Oliphant', - u'warren.weckesser': u'Warren Weckesser', - u'weathergod': u'Benjamin Root', - u'Andreas H': u'Andreas Hilboll', - u'honnorat': u'Marc Honnorat', - u'lmwang': u'Liming Wang', - u'wa03': u'Josh Lawrence', - u'loluengo': u'Lorenzo Luengo', + u('87'): u('Han Genuit'), + u('aarchiba'): u('Anne Archibald'), + u('ArmstrongJ'): u('Jeff Armstrong'), + u('cgholke'): u('Christoph Gohlke'), + u('cgohlke'): u('Christoph Gohlke'), + u('chris.burns'): u('Chris Burns'), + u('Christolph Gohlke'): u('Christoph Gohlke'), + u('ckuster'): u('Christopher Kuster'), + u('Collin Stocks'): u('Collin RM Stocks'), + u('Derek Homeir'): u('Derek Homeier'), + u('Derek Homier'): u('Derek Homeier'), + u('dhuard'): u('David Huard'), + u('dsimcha'): u('David Simcha'), + u('edschofield'): u('Ed Schofield'), + u('Gael varoquaux'): u('Gaël Varoquaux'), + u('gotgenes'): u('Chris Lasher'), + u('Han'): u('Han Genuit'), + u('Jake Vanderplas'): u('Jacob Vanderplas'), + u('josef'): u('Josef Perktold'), + u('josef-pktd'): u('Josef Perktold'), + u('Mark'): u('Mark Wiebe'), + u('mdroe'): u('Michael Droettboom'), + u('pierregm'): u('Pierre GM'), + u('rgommers'): u('Ralf Gommers'), + u('sebhaase'): u('Sebastian Haase'), + u('Travis E. Oliphant'): u('Travis Oliphant'), + u('warren.weckesser'): u('Warren Weckesser'), + u('weathergod'): u('Benjamin Root'), + u('Andreas H'): u('Andreas Hilboll'), + u('honnorat'): u('Marc Honnorat'), + u('lmwang'): u('Liming Wang'), + u('wa03'): u('Josh Lawrence'), + u('loluengo'): u('Lorenzo Luengo'), } def main(): @@ -73,29 +87,29 @@ def analyze_line(line, names, disp=False): line = line.strip().decode('utf-8') # Check the commit author name - m = re.match('^@@@([^@]*)@@@', line) + m = re.match(u('^@@@([^@]*)@@@'), line) if m: name = m.group(1) line = line[m.end():] name = NAME_MAP.get(name, name) if disp: if name not in names: - print " - Author:", name + stdout_b.write((" - Author: %s\n" % name).encode('utf-8')) names.add(name) # Look for "thanks to" messages in the commit log - m = re.search(ur'([Tt]hanks to|[Cc]ourtesy of) ([A-Z][A-Za-z]*? [A-Z][A-Za-z]*? [A-Z][A-Za-z]*|[A-Z][A-Za-z]*? [A-Z]\. [A-Z][A-Za-z]*|[A-Z][A-Za-z ]*? [A-Z][A-Za-z]*|[a-z0-9]+)($|\.| )', line) + m = re.search(u(r'([Tt]hanks to|[Cc]ourtesy of) ([A-Z][A-Za-z]*? [A-Z][A-Za-z]*? [A-Z][A-Za-z]*|[A-Z][A-Za-z]*? [A-Z]\. [A-Z][A-Za-z]*|[A-Z][A-Za-z ]*? [A-Z][A-Za-z]*|[a-z0-9]+)($|\.| )'), line) if m: name = m.group(2) - if name not in ('this',): + if name not in (u('this'),): if disp: - print " - Log :", line.strip() + stdout_b.write(" - Log : %s\n" % line.strip().encode('utf-8')) name = NAME_MAP.get(name, name) names.add(name) line = line[m.end():].strip() - line = re.sub(ur'^(and|, and|, ) ', u'Thanks to ', line) - analyze_line(line, names) + line = re.sub(u(r'^(and|, and|, ) '), u('Thanks to '), line) + analyze_line(line.encode('utf-8'), names) # Find all authors before the named range for line in git.pipe('log', '--pretty=@@@%an@@@%n@@@%cn@@@%n%b', @@ -109,18 +123,18 @@ def analyze_line(line, names, disp=False): # Sort def name_key(fullname): - m = re.search(u' [a-z ]*[A-Za-z-]+$', fullname) + m = re.search(u(' [a-z ]*[A-Za-z-]+$'), fullname) if m: forename = fullname[:m.start()].strip() surname = fullname[m.start():].strip() else: - forename = u"" + forename = "" surname = fullname.strip() - if surname.startswith('van der '): + if surname.startswith(u('van der ')): surname = surname[8:] - if surname.startswith('de '): + if surname.startswith(u('de ')): surname = surname[3:] - if surname.startswith('von '): + if surname.startswith(u('von ')): surname = surname[4:] return (surname.lower(), forename.lower()) @@ -128,27 +142,29 @@ def name_key(fullname): authors.sort(key=name_key) # Print - print """ + stdout_b.write(b""" Authors ======= This release contains work by the following people (contributed at least one patch to this release, names in alphabetical order): -""" + +""") for author in authors: if author in all_authors: - print (u"* %s" % author).encode('utf-8') + stdout_b.write(("* %s\n" % author).encode('utf-8')) else: - print (u"* %s +" % author).encode('utf-8') + stdout_b.write(("* %s +\n" % author).encode('utf-8')) - print """ + stdout_b.write((""" A total of %(count)d people contributed to this release. People with a "+" by their names contributed a patch for the first time. -""" % dict(count=len(authors)) - print ("\nNOTE: Check this list manually! It is automatically generated " - "and some names\n may be missing.") +""" % dict(count=len(authors))).encode('utf-8')) + + stdout_b.write(("\nNOTE: Check this list manually! It is automatically generated " + "and some names\n may be missing.\n").encode('utf-8')) #------------------------------------------------------------------------------ # Communicating with Git diff --git a/tools/py3tool.py b/tools/py3tool.py deleted file mode 100755 index a56ede0c7caf..000000000000 --- a/tools/py3tool.py +++ /dev/null @@ -1,351 +0,0 @@ -#!/usr/bin/env python3 -# -*- python -*- -""" -%prog SUBMODULE... - -Hack to pipe submodules of Numpy through 2to3 and build them in-place -one-by-one. - -Example usage: - - python3 tools/py3tool.py testing distutils core - -This will copy files to _py3k/numpy, add a dummy __init__.py and -version.py on the top level, and copy and 2to3 the files of the three -submodules. - -When running py3tool again, only changed files are re-processed, which -makes the test-bugfix cycle faster. - -""" -from optparse import OptionParser -import shutil -import os -import sys -import re -import subprocess -import fnmatch - -if os.environ.get('USE_2TO3CACHE'): - import lib2to3cache - -BASE = os.path.normpath(os.path.join(os.path.dirname(__file__), '..')) -TEMP = os.path.normpath(os.path.join(BASE, '_py3k')) - -SCRIPT_2TO3 = os.path.join(BASE, 'tools', '2to3.py') - -EXTRA_2TO3_FLAGS = { - '*/setup.py': '-x import', - #'numpy/core/code_generators/generate_umath.py': '-x import', - #'numpy/core/code_generators/generate_numpy_api.py': '-x import', - #'numpy/core/code_generators/generate_ufunc_api.py': '-x import', - #'numpy/core/defchararray.py': '-x unicode', - #'numpy/compat/py3k.py': '-x unicode', - #'numpy/ma/timer_comparison.py': 'skip', - #'numpy/distutils/system_info.py': '-x reduce', - #'numpy/f2py/auxfuncs.py': '-x reduce', - #'numpy/lib/arrayterator.py': '-x reduce', - #'numpy/lib/tests/test_arrayterator.py': '-x reduce', - #'numpy/ma/core.py': '-x reduce', - #'numpy/ma/tests/test_core.py': '-x reduce', - #'numpy/ma/tests/test_old_ma.py': '-x reduce', - #'numpy/ma/timer_comparison.py': '-x reduce', - #'numpy/oldnumeric/ma.py': '-x reduce', -} - -def main(): - p = OptionParser(usage=__doc__.strip()) - p.add_option("--clean", "-c", action="store_true", - help="clean source directory") - options, args = p.parse_args() - - if not args: - p.error('no submodules given') - else: - dirs = ['scipy/%s' % x for x in map(os.path.basename, args)] - - # Prepare - if not os.path.isdir(TEMP): - os.makedirs(TEMP) - - # Set up dummy files (for building only submodules) - dummy_files = { - '__init__.py': 'from scipy.version import version as __version__', - 'version.py': 'version = "0.8.0.dev"' - } - - for fn, content in dummy_files.items(): - fn = os.path.join(TEMP, 'scipy', fn) - if not os.path.isfile(fn): - try: - os.makedirs(os.path.dirname(fn)) - except OSError: - pass - f = open(fn, 'wb+') - f.write(content.encode('ascii')) - f.close() - - # Environment - pp = [os.path.abspath(TEMP)] - def getenv(): - env = dict(os.environ) - env.update({'PYTHONPATH': ':'.join(pp)}) - return env - - # Copy - for d in dirs: - src = os.path.join(BASE, d) - dst = os.path.join(TEMP, d) - - # Run 2to3 - sync_2to3(dst=dst, - src=src, - patchfile=os.path.join(TEMP, os.path.basename(d) + '.patch'), - clean=options.clean) - - # Run setup.py, falling back to Pdb post-mortem on exceptions - setup_py = os.path.join(dst, 'setup.py') - if os.path.isfile(setup_py): - code = """\ -import pdb, sys, traceback -p = pdb.Pdb() -try: - import __main__ - __main__.__dict__.update({ - "__name__": "__main__", "__file__": "setup.py", - "__builtins__": __builtins__}) - fp = open("setup.py", "rb") - try: - exec(compile(fp.read(), "setup.py", 'exec')) - finally: - fp.close() -except SystemExit: - raise -except: - traceback.print_exc() - t = sys.exc_info()[2] - p.interaction(None, t) -""" - ret = subprocess.call([sys.executable, '-c', code, - 'build_ext', '-i'], - cwd=dst, - env=getenv()) - if ret != 0: - raise RuntimeError("Build failed.") - - # Run nosetests - subprocess.call(['nosetests3', '-v', d], cwd=TEMP) - -def custom_mangling(filename): - import_mangling = [ - os.path.join('cluster', '__init__.py'), - os.path.join('cluster', 'hierarchy.py'), - os.path.join('cluster', 'vq.py'), - os.path.join('fftpack', 'basic.py'), - os.path.join('fftpack', 'pseudo_diffs.py'), - os.path.join('integrate', 'odepack.py'), - os.path.join('integrate', 'quadpack.py'), - os.path.join('integrate', '_ode.py'), - os.path.join('interpolate', 'fitpack.py'), - os.path.join('interpolate', 'fitpack2.py'), - os.path.join('interpolate', 'interpolate.py'), - os.path.join('interpolate', 'interpolate_wrapper.py'), - os.path.join('interpolate', 'ndgriddata.py'), - os.path.join('io', 'array_import.py'), - os.path.join('io', '__init__.py'), - os.path.join('io', 'matlab', 'miobase.py'), - os.path.join('io', 'matlab', 'mio4.py'), - os.path.join('io', 'matlab', 'mio5.py'), - os.path.join('io', 'matlab', 'mio5_params.py'), - os.path.join('linalg', 'basic.py'), - os.path.join('linalg', 'decomp.py'), - os.path.join('linalg', 'lapack.py'), - os.path.join('linalg', 'flinalg.py'), - os.path.join('linalg', 'iterative.py'), - os.path.join('linalg', 'misc.py'), - os.path.join('lib', 'blas', '__init__.py'), - os.path.join('lib', 'lapack', '__init__.py'), - os.path.join('ndimage', 'filters.py'), - os.path.join('ndimage', 'fourier.py'), - os.path.join('ndimage', 'interpolation.py'), - os.path.join('ndimage', 'measurements.py'), - os.path.join('ndimage', 'morphology.py'), - os.path.join('optimize', 'minpack.py'), - os.path.join('optimize', 'zeros.py'), - os.path.join('optimize', 'lbfgsb.py'), - os.path.join('optimize', 'cobyla.py'), - os.path.join('optimize', 'slsqp.py'), - os.path.join('optimize', 'nnls.py'), - os.path.join('signal', '__init__.py'), - os.path.join('signal', 'bsplines.py'), - os.path.join('signal', 'signaltools.py'), - os.path.join('signal', 'spectral.py'), - os.path.join('signal', 'fir_filter_design.py'), - os.path.join('special', '__init__.py'), - os.path.join('special', 'add_newdocs.py'), - os.path.join('special', 'basic.py'), - os.path.join('special', 'lambertw.py'), - os.path.join('special', 'orthogonal.py'), - os.path.join('spatial', '__init__.py'), - os.path.join('spatial', 'distance.py'), - os.path.join('sparse', 'linalg', 'isolve', 'iterative.py'), - os.path.join('sparse', 'linalg', 'dsolve', 'linsolve.py'), - os.path.join('sparse', 'linalg', 'dsolve', 'umfpack', 'umfpack.py'), - os.path.join('sparse', 'linalg', 'eigen', 'arpack', 'arpack.py'), - os.path.join('sparse', 'linalg', 'eigen', 'arpack', 'speigs.py'), - os.path.join('sparse', 'linalg', 'iterative', 'isolve', 'iterative.py'), - os.path.join('sparse', 'csgraph', '__init__.py'), - os.path.join('sparse', 'csgraph', '_validation.py'), - os.path.join('stats', 'stats.py'), - os.path.join('stats', 'distributions.py'), - os.path.join('stats', 'morestats.py'), - os.path.join('stats', 'kde.py'), - os.path.join('stats', 'mstats_basic.py'), - ] - - if any(filename.endswith(x) for x in import_mangling): - print(filename) - f = open(filename, 'r', encoding='utf-8') - text = f.read() - f.close() - for mod in ['_vq', '_hierarchy_wrap', '_fftpack', 'convolve', - '_flinalg', 'fblas', 'flapack', 'cblas', 'clapack', - 'calc_lwork', '_cephes', 'specfun', 'orthogonal_eval', - 'lambertw', 'ckdtree', '_distance_wrap', '_logit', - '_ufuncs', '_ufuncs_cxx', - '_minpack', '_zeros', '_lbfgsb', '_cobyla', '_slsqp', - '_nnls', - 'sigtools', 'spline', '_spectral', - '_fitpack', 'dfitpack', '_interpolate', - '_odepack', '_quadpack', 'vode', '_dop', 'lsoda', - 'vonmises_cython', '_rank', - 'futil', 'mvn', - '_nd_image', - 'numpyio', - '_superlu', '_arpack', '_iterative', '_umfpack', - 'interpnd', - 'mio_utils', 'mio5_utils', 'streams', - '_min_spanning_tree', '_shortest_path', '_tools', '_traversal' - ]: - text = re.sub(r'^(\s*)import %s' % mod, - r'\1from . import %s' % mod, - text, flags=re.M) - text = re.sub(r'^(\s*)from %s import' % mod, - r'\1from .%s import' % mod, - text, flags=re.M) - #text = text.replace('from matrixlib', 'from .matrixlib') - f = open(filename, 'w', encoding='utf-8') - f.write(text) - f.close() - -def walk_sync(dir1, dir2, _seen=None): - if _seen is None: - seen = {} - else: - seen = _seen - - if not dir1.endswith(os.path.sep): - dir1 = dir1 + os.path.sep - - # Walk through stuff (which we haven't yet gone through) in dir1 - for root, dirs, files in os.walk(dir1): - sub = root[len(dir1):] - if sub in seen: - dirs = [x for x in dirs if x not in seen[sub][0]] - files = [x for x in files if x not in seen[sub][1]] - seen[sub][0].extend(dirs) - seen[sub][1].extend(files) - else: - seen[sub] = (dirs, files) - if not dirs and not files: - continue - yield os.path.join(dir1, sub), os.path.join(dir2, sub), dirs, files - - if _seen is None: - # Walk through stuff (which we haven't yet gone through) in dir2 - for root2, root1, dirs, files in walk_sync(dir2, dir1, _seen=seen): - yield root1, root2, dirs, files - -def sync_2to3(src, dst, patchfile=None, clean=False): - import lib2to3.main - from io import StringIO - - to_convert = [] - - for src_dir, dst_dir, dirs, files in walk_sync(src, dst): - for fn in dirs + files: - src_fn = os.path.join(src_dir, fn) - dst_fn = os.path.join(dst_dir, fn) - - # skip temporary etc. files - if fn.startswith('.#') or fn.endswith('~'): - continue - - # remove non-existing - if os.path.exists(dst_fn) and not os.path.exists(src_fn): - if clean: - if os.path.isdir(dst_fn): - shutil.rmtree(dst_fn) - else: - os.unlink(dst_fn) - continue - - # make directories - if os.path.isdir(src_fn): - if not os.path.isdir(dst_fn): - os.makedirs(dst_fn) - continue - - dst_dir = os.path.dirname(dst_fn) - if os.path.isfile(dst_fn) and not os.path.isdir(dst_dir): - os.makedirs(dst_dir) - - # don't replace up-to-date files - try: - if os.path.isfile(dst_fn) and \ - os.stat(dst_fn).st_mtime >= os.stat(src_fn).st_mtime: - continue - except OSError: - pass - - # copy file - shutil.copyfile(src_fn, dst_fn) - - # add .py files to 2to3 list - if dst_fn.endswith('.py'): - to_convert.append((src_fn, dst_fn)) - - # run 2to3 - flag_sets = {} - for fn, dst_fn in to_convert: - flag = '' - for pat, opt in EXTRA_2TO3_FLAGS.items(): - if fnmatch.fnmatch(fn, pat): - flag = opt - break - flag_sets.setdefault(flag, []).append(dst_fn) - - if patchfile: - p = open(patchfile, 'wb+') - else: - p = open(os.devnull, 'wb') - - for flags, filenames in flag_sets.items(): - if flags == 'skip': - continue - - _old_stdout = sys.stdout - try: - sys.stdout = StringIO() - lib2to3.main.main("lib2to3.fixes", ['-w', '-n'] + flags.split()+filenames) - finally: - sys.stdout = _old_stdout - - for fn, dst_fn in to_convert: - # perform custom mangling - custom_mangling(dst_fn) - - p.close() - -if __name__ == "__main__": - main()