From 58853f5184c7f326bdf6a73e64d91ed223d5a674 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 4 Jun 2021 15:54:56 -0400 Subject: [PATCH 1/9] Remove python 2.7 and 3.5 from GHA tests platforms --- .github/workflows/ci.yml | 8 ++++---- CHANGELOG.rst | 1 + 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b9a3c607..3b598d97 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -22,7 +22,7 @@ jobs: matrix: # First all python versions in basic linux os: [ ubuntu-latest ] - py: [ 2.7, 3.5, 3.6, 3.7, 3.8, 3.9, pypy3 ] + py: [ 3.6, 3.7, 3.8, 3.9, pypy3 ] CC: [ gcc ] CXX: [ g++ ] @@ -30,18 +30,18 @@ jobs: include: # A couple in MacOS - os: macos-latest - py: 2.7 + py: 3.7 CC: cc CXX: c++ - os: macos-latest - py: 3.7 + py: 3.9 CC: cc CXX: c++ # Check one with clang compiler - os: ubuntu-latest - py: 3.7 + py: 3.8 CC: clang CXX: clang++ diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 6ad78fb6..a2fac2f3 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -9,6 +9,7 @@ whose issue numbers are listed below for the relevant items. API Changes ----------- +- No longer supports Python 2.7. Supports Python versions 3.6, 3.7, 3,8, 3.9. Performance improvements ------------------------ From 0eb79226810008e46be37ee1a594143591c7b7e5 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 4 Jun 2021 15:55:52 -0400 Subject: [PATCH 2/9] Remove bits that were just for py2.7 compatibility --- setup.py | 2 -- tests/test_catalog.py | 4 +--- tests/test_config.py | 19 ++++++------------- tests/test_helper.py | 8 ++------ tests/test_mpi.py | 12 +----------- tests/test_mpi3pt.py | 9 +-------- tests/test_ng.py | 3 +-- tests/test_nk.py | 3 +-- tests/test_reader.py | 4 +--- tests/test_rperp.py | 1 - treecorr/__init__.py | 4 ++-- treecorr/config.py | 1 - treecorr/reader.py | 2 +- 13 files changed, 17 insertions(+), 55 deletions(-) diff --git a/setup.py b/setup.py index 1dadb179..a2f93984 100644 --- a/setup.py +++ b/setup.py @@ -89,8 +89,6 @@ def get_compiler_type(compiler, check_unknown=True, output=False): print('compiler version information: ') for line in lines: print(line.decode().strip()) - # Python3 needs this decode bit. - # Python2.7 doesn't need it, but it works fine. line = lines[0].decode(encoding='UTF-8') if line.startswith('Configured'): line = lines[1].decode(encoding='UTF-8') diff --git a/tests/test_catalog.py b/tests/test_catalog.py index fe72efd8..0c4f25c1 100644 --- a/tests/test_catalog.py +++ b/tests/test_catalog.py @@ -23,6 +23,7 @@ from numpy import pi import fitsio import treecorr +from unittest import mock from test_helper import get_from_wiki, CaptureLog, assert_raises, do_pickle, timer, assert_warns @@ -900,9 +901,6 @@ def test_ext(): k_col='k', g1_col='g1', g2_col='g2', ext=1) - if sys.version_info < (3,): return # mock only available on python 3 - from unittest import mock - # test that the case where we can't slice works # by pretending that we are using an old fitsio version, # temporarily. diff --git a/tests/test_config.py b/tests/test_config.py index e8c0a95e..0ec1649b 100644 --- a/tests/test_config.py +++ b/tests/test_config.py @@ -18,6 +18,7 @@ import logging import fitsio import numpy as np +from unittest import mock from test_helper import CaptureLog, assert_raises, timer, assert_warns @@ -337,8 +338,6 @@ def test_check(): # corr2 has a list of standard aliases # It is currently empty, but let's mock it up to test the functionality. - if sys.version_info < (3,): return # mock only available on python 3 - from unittest import mock with mock.patch('treecorr.corr2_aliases', {'n2_file_name' : 'nn_file_name'}): with assert_warns(FutureWarning): config2 = treecorr.config.check_config(config1.copy(), valid_params, @@ -496,8 +495,6 @@ def test_omp(): # It's hard to tell what happens in the next step, since we can't control what # treecorr._lib.SetOMPThreads does. It depends on whether OpenMP is enabled and # how many cores are available. So let's mock it up. - if sys.version_info < (3,): return # mock only available on python 3 - from unittest import mock with mock.patch('treecorr.util._lib') as _lib: # First mock with OpenMP enables and able to use lots of threads _lib.SetOMPThreads = lambda x: x @@ -564,9 +561,9 @@ def test_gen_read_write(): with assert_raises(ValueError): treecorr.util.gen_read(file_name, file_type='Invalid') - with assert_raises((OSError, IOError)): # IOError on py2.7 + with assert_raises(OSError): treecorr.util.gen_read(file_name, file_type='ASCII') - with assert_raises((OSError, IOError)): + with assert_raises(OSError): treecorr.util.gen_read(file_name, file_type='FITS') # Now some working I/O @@ -665,8 +662,6 @@ def test_gen_read_write(): assert 'assumed to be HDF' in cl.output # Check that errors are reasonable if fitsio not installed. - if sys.version_info < (3,): return # mock only available on python 3 - from unittest import mock with mock.patch.dict(sys.modules, {'fitsio':None}): with assert_raises(ImportError): treecorr.util.gen_write(file_name2, ['a', 'b'], [a,b]) @@ -735,9 +730,9 @@ def test_gen_multi_read_write(): with assert_raises(ValueError): treecorr.util.gen_multi_read(file_name, names, file_type='Invalid') - with assert_raises((OSError, IOError)): + with assert_raises(OSError): treecorr.util.gen_multi_read(file_name, names, file_type='ASCII') - with assert_raises((OSError, IOError)): + with assert_raises(OSError): treecorr.util.gen_multi_read(file_name, names, file_type='FITS') @@ -833,15 +828,13 @@ def test_gen_multi_read_write(): alt_names = ['k1','k2','k3'] with assert_raises(OSError): treecorr.util.gen_multi_read(file_name3, alt_names, logger=cl.logger) - with assert_raises((OSError, IOError)): + with assert_raises(OSError): treecorr.util.gen_multi_read(file_name4, alt_names, logger=cl.logger) if h5py: with assert_raises(OSError): treecorr.util.gen_multi_read(file_name5, alt_names, logger=cl.logger) # Check that errors are reasonable if fitsio not installed. - if sys.version_info < (3,): return # mock only available on python 3 - from unittest import mock with mock.patch.dict(sys.modules, {'fitsio':None}): with assert_raises(ImportError): treecorr.util.gen_multi_write(file_name2, col_names, names, data) diff --git a/tests/test_helper.py b/tests/test_helper.py index e25edc32..036f06ce 100644 --- a/tests/test_helper.py +++ b/tests/test_helper.py @@ -38,6 +38,7 @@ def get_from_wiki(file_name, host=None): except ImportError: from urllib import urlopen import shutil + import ssl print('downloading %s from %s...'%(local_file_name,url)) # urllib.request.urlretrieve(url,local_file_name) @@ -49,13 +50,9 @@ def get_from_wiki(file_name, host=None): # But that can only be done with urlopen, not urlretrieve. So, here is the solution. # cf. http://stackoverflow.com/questions/7243750/download-file-from-web-in-python-3 # http://stackoverflow.com/questions/27835619/ssl-certificate-verify-failed-error + context = ssl._create_unverified_context() try: - import ssl - context = ssl._create_unverified_context() u = urlopen(url, context=context) - except (AttributeError, TypeError): - # Note: prior to 2.7.9, there is no such function or even the context keyword. - u = urlopen(url) except urllib.error.HTTPError as e: print('Caught ',e) print('Wait 10 sec and try again.') @@ -161,7 +158,6 @@ def nop(): pass _t = Dummy('nop') assert_raises = getattr(_t, 'assertRaises') -#if sys.version_info > (3,2): if False: # Note: this should work, but at least sometimes it fails with: # RuntimeError: dictionary changed size during iteration diff --git a/tests/test_mpi.py b/tests/test_mpi.py index fbb516dc..572a7f84 100644 --- a/tests/test_mpi.py +++ b/tests/test_mpi.py @@ -14,48 +14,41 @@ from __future__ import print_function import unittest import sys -if sys.version_info > (3,0): - from mockmpi import mock_mpiexec +from mockmpi import mock_mpiexec from test_helper import timer from mpi_test import setup, do_mpi_gg, do_mpi_ng, do_mpi_nk, do_mpi_nn, do_mpi_kk, do_mpi_kg -@unittest.skipIf(sys.version_info < (3, 0), "mock_mpiexec doesn't support python 2") @timer def test_mpi_gg(): output = __name__ == '__main__' mock_mpiexec(4, do_mpi_gg, output) mock_mpiexec(1, do_mpi_gg, output) -@unittest.skipIf(sys.version_info < (3, 0), "mock_mpiexec doesn't support python 2") @timer def test_mpi_ng(): output = __name__ == '__main__' mock_mpiexec(4, do_mpi_ng, output) mock_mpiexec(1, do_mpi_ng, output) -@unittest.skipIf(sys.version_info < (3, 0), "mock_mpiexec doesn't support python 2") @timer def test_mpi_nk(): output = __name__ == '__main__' mock_mpiexec(4, do_mpi_nk, output) mock_mpiexec(1, do_mpi_nk, output) -@unittest.skipIf(sys.version_info < (3, 0), "mock_mpiexec doesn't support python 2") @timer def test_mpi_nn(): output = __name__ == '__main__' mock_mpiexec(4, do_mpi_nn, output) mock_mpiexec(1, do_mpi_nn, output) -@unittest.skipIf(sys.version_info < (3, 0), "mock_mpiexec doesn't support python 2") @timer def test_mpi_kg(): output = __name__ == '__main__' mock_mpiexec(4, do_mpi_kg, output) mock_mpiexec(1, do_mpi_kg, output) -@unittest.skipIf(sys.version_info < (3, 0), "mock_mpiexec doesn't support python 2") @timer def test_mpi_kk(): output = __name__ == '__main__' @@ -63,9 +56,6 @@ def test_mpi_kk(): mock_mpiexec(1, do_mpi_kk, output) if __name__ == '__main__': - if sys.version_info < (3,0): - print("mockmpi does not support python 2") - exit() setup() test_mpi_gg() test_mpi_ng() diff --git a/tests/test_mpi3pt.py b/tests/test_mpi3pt.py index 9f8b1417..8ec4073c 100644 --- a/tests/test_mpi3pt.py +++ b/tests/test_mpi3pt.py @@ -14,36 +14,29 @@ from __future__ import print_function import unittest import sys -if sys.version_info > (3,0): - from mockmpi import mock_mpiexec +from mockmpi import mock_mpiexec from test_helper import timer from mpi_test3pt import * -@unittest.skipIf(sys.version_info < (3, 0), "mock_mpiexec doesn't support python 2") @timer def test_mpi_ggg(): output = __name__ == '__main__' mock_mpiexec(1, do_mpi_ggg, output) mock_mpiexec(4, do_mpi_ggg, output) -@unittest.skipIf(sys.version_info < (3, 0), "mock_mpiexec doesn't support python 2") @timer def test_mpi_kkk(): output = __name__ == '__main__' mock_mpiexec(1, do_mpi_kkk, output) mock_mpiexec(4, do_mpi_kkk, output) -@unittest.skipIf(sys.version_info < (3, 0), "mock_mpiexec doesn't support python 2") @timer def test_mpi_kkk2(): output = __name__ == '__main__' mock_mpiexec(4, do_mpi_kkk2, output) if __name__ == '__main__': - if sys.version_info > (3,0): - print("mockmpi does not support python 2") - exit() setup() test_mpi_ggg() test_mpi_kkk() diff --git a/tests/test_ng.py b/tests/test_ng.py index e9d306bf..ef8ae833 100644 --- a/tests/test_ng.py +++ b/tests/test_ng.py @@ -19,6 +19,7 @@ import coord import time import fitsio +from unittest import mock from test_helper import do_pickle, CaptureLog from test_helper import assert_raises, timer, assert_warns @@ -424,8 +425,6 @@ def test_single(): # There is special handling for single-row catalogs when using np.genfromtxt rather # than pandas. So mock it up to make sure we test it. - if sys.version_info < (3,): return # mock only available on python 3 - from unittest import mock treecorr.Catalog._emitted_pandas_warning = False # Reset this, in case already triggered. with mock.patch.dict(sys.modules, {'pandas':None}): with CaptureLog() as cl: diff --git a/tests/test_nk.py b/tests/test_nk.py index 91c81a1a..aaf6b04a 100644 --- a/tests/test_nk.py +++ b/tests/test_nk.py @@ -18,6 +18,7 @@ import sys import coord import fitsio +from unittest import mock from test_helper import do_pickle, CaptureLog from test_helper import assert_raises, timer, assert_warns @@ -367,8 +368,6 @@ def test_single(): # There is special handling for single-row catalogs when using np.genfromtxt rather # than pandas. So mock it up to make sure we test it. - if sys.version_info < (3,): return # mock only available on python 3 - from unittest import mock treecorr.Catalog._emitted_pandas_warning = False # Reset this, in case already triggered. with mock.patch.dict(sys.modules, {'pandas':None}): with CaptureLog() as cl: diff --git a/tests/test_reader.py b/tests/test_reader.py index 3a9add29..e82f1086 100644 --- a/tests/test_reader.py +++ b/tests/test_reader.py @@ -15,6 +15,7 @@ import sys import numpy as np import fitsio +from unittest import mock from treecorr.reader import FitsReader, HdfReader, PandasReader, AsciiReader, ParquetReader from test_helper import get_from_wiki, assert_raises, timer @@ -80,9 +81,6 @@ def test_fits_reader(): d = r.read(['DEC'], np.arange(10), 'AARDWOLF') assert d.size==10 - if sys.version_info < (3,): return # mock only available on python 3 - from unittest import mock - # Again check things not allowed if not in context with assert_raises(RuntimeError): r.read(['RA'], slice(0,10,2), 1) diff --git a/tests/test_rperp.py b/tests/test_rperp.py index 58645825..8bebd680 100644 --- a/tests/test_rperp.py +++ b/tests/test_rperp.py @@ -138,7 +138,6 @@ def test_nn_direct_oldrperp(): # If we set Rperp_alias = 'OldRperp', we can use Rperp. # Use mock for this - if sys.version_info < (3,): return # mock only available on python 3 from unittest import mock with mock.patch('treecorr.util.Rperp_alias', 'OldRperp'): dd.process(cat1, cat2, metric='Rperp') diff --git a/treecorr/__init__.py b/treecorr/__init__.py index 333c9df6..581c1605 100644 --- a/treecorr/__init__.py +++ b/treecorr/__init__.py @@ -32,9 +32,9 @@ if not os.path.exists(lib_file): # pragma: no cover alt_files = glob.glob(os.path.join(os.path.dirname(__file__),'_treecorr*.so')) if len(alt_files) == 0: - raise IOError("No file '_treecorr.so' found in %s"%treecorr_dir) + raise OSError("No file '_treecorr.so' found in %s"%treecorr_dir) if len(alt_files) > 1: - raise IOError("Multiple files '_treecorr*.so' found in %s: %s"%(treecorr_dir,alt_files)) + raise OSError("Multiple files '_treecorr*.so' found in %s: %s"%(treecorr_dir,alt_files)) lib_file = alt_files[0] # Load the C functions with cffi diff --git a/treecorr/config.py b/treecorr/config.py index 3605e1a7..581b6880 100644 --- a/treecorr/config.py +++ b/treecorr/config.py @@ -15,7 +15,6 @@ .. module:: config """ -from __future__ import print_function import sys import coord import numpy as np diff --git a/treecorr/reader.py b/treecorr/reader.py index 3fbd35ef..8f20dbf8 100644 --- a/treecorr/reader.py +++ b/treecorr/reader.py @@ -202,7 +202,7 @@ def __enter__(self): delimiter=self.delimiter, max_rows=1) self.col_names = [] if len(data.shape) != 1: # pragma: no cover - raise IOError('Unable to parse the input catalog as a numpy array') + raise OSError('Unable to parse the input catalog as a numpy array') self.ncols = data.shape[0] return self From 16dc3cf0ab8480d01a0e535980c3307edf8e56e5 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 4 Jun 2021 17:06:56 -0400 Subject: [PATCH 3/9] Add depr_pos_kwargs --- treecorr/util.py | 57 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 57 insertions(+) diff --git a/treecorr/util.py b/treecorr/util.py index db25ab99..a2a3dd17 100644 --- a/treecorr/util.py +++ b/treecorr/util.py @@ -18,6 +18,9 @@ import numpy as np import os import coord +import functools +import inspect +import warnings from . import _lib, _ffi, Rperp_alias @@ -918,3 +921,57 @@ def __get__(self, obj, cls): value = self.fget(obj) setattr(obj, self.func_name, value) return value + + + +def depr_pos_kwargs(fn): + """ + This decorator will allow the old API where keywords are allowed as positional variables, + but it will give a deprecation warning about it. + + @depr_pos_kwargs + def func_with_kwargs(a, *, b=3, c=4): + ... + + # Expected usage: + func_with_kwargs(1, b=5, c=9) + + # This works, but gives a deprecation warning + func_with_kwargs(1, 5, 9) + """ + # Note: this is inspired by the legacy_api_wrap decorator by flying-sheep, which does something + # similar. + # https://github.com/flying-sheep/legacy-api-wrap/blob/master/legacy_api_wrap.py + # However, it was reimplemented from scratch my MJ. + + params = inspect.signature(fn).parameters + nparams = len(params) + nkwargs = len(fn.__kwdefaults__) + npos = nparams - nkwargs + + @functools.wraps(fn) + def wrapper(*args, **kwargs): + if len(args) > npos: + # Make sure providing too many params is still a TypeError. + if len(args) > nparams: + raise TypeError("{} takes at most {} arguments but {} were given.".format( + fn.__name__, nparams, len(args))) + + # Which names need to turn into kwargs? + kw_names = list(params.keys())[npos:len(args)] + + # Warn about deprecated syntax + warnings.warn( + "Use of keyword-only arguments as positional arguments is deprecated in "+ + "the function " + fn.__name__ + ". " + + "The following parameters now require an explicit keyword name: "+ + str(kw_names), FutureWarning) + + # But make it work. + for a, n in zip(args[npos:], kw_names): + kwargs[n] = a + args = args[:npos] + + return fn(*args, **kwargs) + + return wrapper From 369f73dd93ea00b3aec7f3bf03b572280355f9d2 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 4 Jun 2021 18:22:57 -0400 Subject: [PATCH 4/9] Make most kwarg parameters in Catalog methods, kw-only. --- tests/test_catalog.py | 64 ++++++++++++++++++++++---------- tests/test_patch.py | 8 ++-- treecorr/binnedcorr2.py | 16 +++++--- treecorr/catalog.py | 43 +++++++++++++-------- treecorr/corr2.py | 8 ++-- treecorr/corr3.py | 4 +- treecorr/field.py | 2 +- treecorr/ggcorrelation.py | 23 ++++++++---- treecorr/gggcorrelation.py | 76 +++++++++++++++++++++++++++----------- treecorr/kgcorrelation.py | 16 +++++--- treecorr/kkcorrelation.py | 22 +++++++---- treecorr/kkkcorrelation.py | 76 +++++++++++++++++++++++++++----------- treecorr/ngcorrelation.py | 16 +++++--- treecorr/nkcorrelation.py | 16 +++++--- treecorr/nncorrelation.py | 22 +++++++---- treecorr/nnncorrelation.py | 76 +++++++++++++++++++++++++++----------- 16 files changed, 328 insertions(+), 160 deletions(-) diff --git a/tests/test_catalog.py b/tests/test_catalog.py index 0c4f25c1..c8756048 100644 --- a/tests/test_catalog.py +++ b/tests/test_catalog.py @@ -1575,6 +1575,14 @@ def test_list(): np.testing.assert_almost_equal(cats[k].x, x_list[k]) np.testing.assert_almost_equal(cats[k].y, y_list[k]) + # Providing positional args past list_key is now deprecated. + with assert_warns(FutureWarning): + cats = treecorr.read_catalogs(config, 'file_name', 'file_list', 0, None, None) + np.testing.assert_equal(len(cats), ncats) + for k in range(ncats): + np.testing.assert_almost_equal(cats[k].x, x_list[k]) + np.testing.assert_almost_equal(cats[k].y, y_list[k]) + @timer def test_write(): # Test that writing a Catalog to a file and then reading it back in works correctly @@ -1734,12 +1742,12 @@ def test_field(): t0 = time.time() nfield1 = cat1.getNField() - nfield2 = cat2.getNField(0.01, 1) - nfield3 = cat3.getNField(1,300, logger=logger) + nfield2 = cat2.getNField(min_size=0.01, max_size=1) + nfield3 = cat3.getNField(min_size=1, max_size=300, logger=logger) t1 = time.time() nfield1b = cat1.getNField() - nfield2b = cat2.getNField(0.01, 1) - nfield3b = cat3.getNField(1,300, logger=logger) + nfield2b = cat2.getNField(min_size=0.01, max_size=1) + nfield3b = cat3.getNField(min_size=1, max_size=300, logger=logger) t2 = time.time() assert cat1.nfields.count == 1 assert cat2.nfields.count == 1 @@ -1754,14 +1762,30 @@ def test_field(): print('nfield: ',t1-t0,t2-t1) assert t2-t1 < t1-t0 + # Check warning if not using kwargs for now-kwarg-only params + with assert_warns(FutureWarning): + nfield2c = cat2.getNField(0.01, 1) + with assert_warns(FutureWarning): + nfield2d = cat2.getNField(0.01, max_size=1) + with assert_warns(FutureWarning): + nfield2e = cat2.getNField(0.01, 1, None, False, None, 10, None, None) + with assert_raises(TypeError): + # One too many, so this is still an error. + nfield2e = cat2.getNField(0.01, 1, None, False, None, 10, None, None, 77) + for k in nfield2.__dict__: + if k != 'data': + assert nfield2c.__dict__[k] == nfield2.__dict__[k] + assert nfield2d.__dict__[k] == nfield2.__dict__[k] + assert nfield2e.__dict__[k] == nfield2.__dict__[k] + t0 = time.time() gfield1 = cat1.getGField() - gfield2 = cat2.getGField(0.01, 1) - gfield3 = cat3.getGField(1,300, logger=logger) + gfield2 = cat2.getGField(min_size=0.01, max_size=1) + gfield3 = cat3.getGField(min_size=1, max_size=300, logger=logger) t1 = time.time() gfield1b = cat1.getGField() - gfield2b = cat2.getGField(0.01, 1) - gfield3b = cat3.getGField(1,300, logger=logger) + gfield2b = cat2.getGField(min_size=0.01, max_size=1) + gfield3b = cat3.getGField(min_size=1, max_size=300, logger=logger) t2 = time.time() assert_raises(TypeError, cat4.getGField) assert cat1.gfields.count == 1 @@ -1778,12 +1802,12 @@ def test_field(): t0 = time.time() kfield1 = cat1.getKField() - kfield2 = cat2.getKField(0.01, 1) - kfield3 = cat3.getKField(1,300, logger=logger) + kfield2 = cat2.getKField(min_size=0.01, max_size=1) + kfield3 = cat3.getKField(min_size=1, max_size=300, logger=logger) t1 = time.time() kfield1b = cat1.getKField() - kfield2b = cat2.getKField(0.01, 1) - kfield3b = cat3.getKField(1,300, logger=logger) + kfield2b = cat2.getKField(min_size=0.01, max_size=1) + kfield3b = cat3.getKField(min_size=1, max_size=300, logger=logger) t2 = time.time() assert_raises(TypeError, cat4.getKField) assert cat1.kfields.count == 1 @@ -1881,12 +1905,12 @@ def test_field(): t0 = time.time() nfield1 = cat1.getNField() - nfield2 = cat1.getNField(0.01, 1) - nfield3 = cat1.getNField(1,300, logger=logger) + nfield2 = cat1.getNField(min_size=0.01, max_size=1) + nfield3 = cat1.getNField(min_size=1, max_size=300, logger=logger) t1 = time.time() nfield1b = cat1.getNField() - nfield2b = cat1.getNField(0.01, 1) - nfield3b = cat1.getNField(1,300, logger=logger) + nfield2b = cat1.getNField(min_size=0.01, max_size=1) + nfield3b = cat1.getNField(min_size=1, max_size=300, logger=logger) t2 = time.time() assert cat1.nfields.count == 3 print('after resize(3) nfield: ',t1-t0,t2-t1) @@ -1919,12 +1943,12 @@ def test_field(): assert cat1.nfields.size == 0 t0 = time.time() nfield1 = cat1.getNField() - nfield2 = cat1.getNField(0.01, 1) - nfield3 = cat1.getNField(1,300, logger=logger) + nfield2 = cat1.getNField(min_size=0.01, max_size=1) + nfield3 = cat1.getNField(min_size=1, max_size=300, logger=logger) t1 = time.time() nfield1b = cat1.getNField() - nfield2b = cat1.getNField(0.01, 1) - nfield3b = cat1.getNField(1,300, logger=logger) + nfield2b = cat1.getNField(min_size=0.01, max_size=1) + nfield3b = cat1.getNField(min_size=1, max_size=300, logger=logger) t2 = time.time() # This time, not much time difference. print('after resize(0) nfield: ',t1-t0,t2-t1) diff --git a/tests/test_patch.py b/tests/test_patch.py index 829d8cbd..ebd00143 100644 --- a/tests/test_patch.py +++ b/tests/test_patch.py @@ -120,8 +120,8 @@ def test_cat_patches(): np.testing.assert_array_equal(catb.y,cat5.patches[i].y[cata.nobj:]) # get_patches from a single patch will return a list with just itself. - assert cata.get_patches(False) == [cata] - assert catb.get_patches(True) == [catb] + assert cata.get_patches(low_mem=False) == [cata] + assert catb.get_patches(low_mem=True) == [catb] # Patches start in an unloaded state (by default) cat5b = treecorr.Catalog(file_name5, ra_col=1, dec_col=2, ra_units='rad', dec_units='rad', @@ -204,8 +204,8 @@ def test_cat_patches(): np.testing.assert_array_equal(catb.y,cat6.patches[i].y[cata.nobj:]) # get_patches from a single patch will return a list with just itself. - assert cata.get_patches(False) == [cata] - assert catb.get_patches(True) == [catb] + assert cata.get_patches(low_mem=False) == [cata] + assert catb.get_patches(low_mem=True) == [catb] # 7. Set a single patch number cat7 = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad', patch=3) diff --git a/treecorr/binnedcorr2.py b/treecorr/binnedcorr2.py index 526c1261..6afc4cab 100644 --- a/treecorr/binnedcorr2.py +++ b/treecorr/binnedcorr2.py @@ -1029,15 +1029,19 @@ def sample_pairs(self, n, cat1, cat2, min_sep, max_sep, metric=None): # The first one though is definitely possible, so we need to check that. self.logger.debug("In sample_pairs, making default field for cat1") min_size, max_size = self._get_minmax_size() - f1 = cat1.getNField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 1, - self.min_top, self.max_top, self.coords) + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) if f2 is None or f2._coords != self._coords: self.logger.debug("In sample_pairs, making default field for cat2") min_size, max_size = self._get_minmax_size() - f2 = cat2.getNField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 2, - self.min_top, self.max_top, self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) # Apply units to min_sep, max_sep: min_sep *= self._sep_units diff --git a/treecorr/catalog.py b/treecorr/catalog.py index 16e422e8..0b3cdb8f 100644 --- a/treecorr/catalog.py +++ b/treecorr/catalog.py @@ -27,6 +27,7 @@ from .util import parse_file_type, LRU_Cache, gen_write, gen_read, set_omp_threads from .util import double_ptr as dp from .util import long_ptr as lp +from .util import depr_pos_kwargs from .field import NField, KField, GField, NSimpleField, KSimpleField, GSimpleField class Catalog(object): @@ -168,6 +169,8 @@ class Catalog(object): file. Any optional kwargs may be given here in the config dict if desired. Invalid keys in the config dict are ignored. (default: None) + Keyword-only Arguments: + num (int): Which number catalog are we reading. e.g. for NG correlations the catalog for the N has num=0, the one for G has num=1. This is only necessary if you are using a config dict where things like ``x_col`` @@ -214,8 +217,6 @@ class Catalog(object): may instead give patch_centers either as a file name or an array from which the patches will be determined. (default: None) - Keyword Arguments: - file_type (str): What kind of file is the input file. Valid options are 'ASCII', 'FITS' 'HDF', or 'Parquet' (default: if the file_name extension starts with .fit, then use 'FITS', or with .hdf, then use 'HDF', or with '.par', @@ -507,7 +508,8 @@ class Catalog(object): } _emitted_pandas_warning = False # Only emit the warning once. Set to True once we have. - def __init__(self, file_name=None, config=None, num=0, logger=None, is_rand=False, + @depr_pos_kwargs + def __init__(self, file_name=None, config=None, *, num=0, logger=None, is_rand=False, x=None, y=None, z=None, ra=None, dec=None, r=None, w=None, wpos=None, flag=None, g1=None, g2=None, k=None, patch=None, patch_centers=None, rng=None, **kwargs): @@ -1623,7 +1625,8 @@ def field(self): # But if the weakref is alive, this returns the field we want. return self._field() - def getNField(self, min_size=0, max_size=None, split_method=None, brute=False, + @depr_pos_kwargs + def getNField(self, *, min_size=0, max_size=None, split_method=None, brute=False, min_top=None, max_top=10, coords=None, logger=None): """Return an `NField` based on the positions in this catalog. @@ -1657,7 +1660,8 @@ def getNField(self, min_size=0, max_size=None, split_method=None, brute=False, return field - def getKField(self, min_size=0, max_size=None, split_method=None, brute=False, + @depr_pos_kwargs + def getKField(self, *, min_size=0, max_size=None, split_method=None, brute=False, min_top=None, max_top=10, coords=None, logger=None): """Return a `KField` based on the k values in this catalog. @@ -1693,7 +1697,8 @@ def getKField(self, min_size=0, max_size=None, split_method=None, brute=False, return field - def getGField(self, min_size=0, max_size=None, split_method=None, brute=False, + @depr_pos_kwargs + def getGField(self, *, min_size=0, max_size=None, split_method=None, brute=False, min_top=None, max_top=10, coords=None, logger=None): """Return a `GField` based on the g1,g2 values in this catalog. @@ -1729,7 +1734,8 @@ def getGField(self, min_size=0, max_size=None, split_method=None, brute=False, return field - def getNSimpleField(self, logger=None): + @depr_pos_kwargs + def getNSimpleField(self, *, logger=None): """Return an `NSimpleField` based on the positions in this catalog. The `NSimpleField` object is cached, so this is efficient to call multiple times. @@ -1746,7 +1752,8 @@ def getNSimpleField(self, logger=None): return self.nsimplefields(logger=logger) - def getKSimpleField(self, logger=None): + @depr_pos_kwargs + def getKSimpleField(self, *, logger=None): """Return a `KSimpleField` based on the k values in this catalog. The `KSimpleField` object is cached, so this is efficient to call multiple times. @@ -1765,7 +1772,8 @@ def getKSimpleField(self, logger=None): return self.ksimplefields(logger=logger) - def getGSimpleField(self, logger=None): + @depr_pos_kwargs + def getGSimpleField(self, *, logger=None): """Return a `GSimpleField` based on the g1,g2 values in this catalog. The `GSimpleField` object is cached, so this is efficient to call multiple times. @@ -2039,7 +2047,8 @@ def read_patches(self, save_patch_dir=None): for i, name in enumerate(file_names)] self.logger.info('Patches created from files %s .. %s',file_names[0],file_names[-1]) - def get_patches(self, low_mem=False): + @depr_pos_kwargs + def get_patches(self, *, low_mem=False): """Return a list of Catalog instances each representing a single patch from this Catalog After calling this function once, the patches may be repeatedly accessed by the @@ -2124,7 +2133,8 @@ def get_patches(self, low_mem=False): return self._patches - def write(self, file_name, file_type=None, cat_precision=None): + @depr_pos_kwargs + def write(self, file_name, *, file_type=None, cat_precision=None): """Write the catalog to a file. The position columns are output using the same units as were used when building the @@ -2278,7 +2288,8 @@ def __eq__(self, other): np.array_equal(self.patch, other.patch)) -def read_catalogs(config, key=None, list_key=None, num=0, logger=None, is_rand=None): +@depr_pos_kwargs +def read_catalogs(config, key=None, list_key=None, *, num=0, logger=None, is_rand=None): """Read in a list of catalogs for the given key. key should be the file_name parameter or similar key word. @@ -2333,11 +2344,12 @@ def read_catalogs(config, key=None, list_key=None, num=0, logger=None, is_rand=N file_names = file_names.split() ret = [] for file_name in file_names: - ret += Catalog(file_name, config, num, logger, is_rand).get_patches() + ret += Catalog(file_name, config, num=num, logger=logger, is_rand=is_rand).get_patches() return ret -def calculateVarG(cat_list, low_mem=False): +@depr_pos_kwargs +def calculateVarG(cat_list, *, low_mem=False): """Calculate the overall shear variance from a list of catalogs. The catalogs are assumed to be equivalent, so this is just the average shear @@ -2365,7 +2377,8 @@ def calculateVarG(cat_list, low_mem=False): cat.unload() return varg / sumw -def calculateVarK(cat_list, low_mem=False): +@depr_pos_kwargs +def calculateVarK(cat_list, *, low_mem=False): """Calculate the overall kappa variance from a list of catalogs. The catalogs are assumed to be equivalent, so this is just the average kappa diff --git a/treecorr/corr2.py b/treecorr/corr2.py index ebc9872e..82baa636 100644 --- a/treecorr/corr2.py +++ b/treecorr/corr2.py @@ -131,10 +131,10 @@ def corr2(config, logger=None): set_omp_threads(num_threads, logger) # Read in the input files. Each of these is a list. - cat1 = read_catalogs(config, 'file_name', 'file_list', 0, logger) - cat2 = read_catalogs(config, 'file_name2', 'file_list2', 1, logger) - rand1 = read_catalogs(config, 'rand_file_name', 'rand_file_list', 0, logger) - rand2 = read_catalogs(config, 'rand_file_name2', 'rand_file_list2', 1, logger) + cat1 = read_catalogs(config, 'file_name', 'file_list', num=0, logger=logger) + cat2 = read_catalogs(config, 'file_name2', 'file_list2', num=1, logger=logger) + rand1 = read_catalogs(config, 'rand_file_name', 'rand_file_list', num=0, logger=logger) + rand2 = read_catalogs(config, 'rand_file_name2', 'rand_file_list2', num=1, logger=logger) if len(cat1) == 0: raise TypeError("Either file_name or file_list is required") if len(cat2) == 0: cat2 = None diff --git a/treecorr/corr3.py b/treecorr/corr3.py index 9a8746b0..1202d11a 100644 --- a/treecorr/corr3.py +++ b/treecorr/corr3.py @@ -106,9 +106,9 @@ def corr3(config, logger=None): set_omp_threads(num_threads, logger) # Read in the input files. Each of these is a list. - cat1 = read_catalogs(config, 'file_name', 'file_list', 0, logger) + cat1 = read_catalogs(config, 'file_name', 'file_list', num=0, logger=logger) # TODO: when giving file_name2, file_name3, should now do the real CrossCorrelation process. - rand1 = read_catalogs(config, 'rand_file_name', 'rand_file_list', 0, logger) + rand1 = read_catalogs(config, 'rand_file_name', 'rand_file_list', num=0, logger=logger) if len(cat1) == 0: raise TypeError("Either file_name or file_list is required") if len(rand1) == 0: rand1 = None diff --git a/treecorr/field.py b/treecorr/field.py index 0f54eeaa..cf7e8a4f 100644 --- a/treecorr/field.py +++ b/treecorr/field.py @@ -511,7 +511,7 @@ class NField(Field): An NField is typically created from a Catalog object using - >>> nfield = cat.getNField(min_size, max_size, b) + >>> nfield = cat.getNField(min_size=min_size, max_size=max_size) Parameters: cat (Catalog): The catalog from which to make the field. diff --git a/treecorr/ggcorrelation.py b/treecorr/ggcorrelation.py index 8473f5b0..d81ce158 100644 --- a/treecorr/ggcorrelation.py +++ b/treecorr/ggcorrelation.py @@ -206,8 +206,11 @@ def process_auto(self, cat, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - field = cat.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + field = cat.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',field.nTopLevelNodes) _lib.ProcessAuto2(self.corr, field.data, self.output_dots, @@ -242,12 +245,16 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getGField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 1, - self.min_top, self.max_top, self.coords) - f2 = cat2.getGField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 2, - self.min_top, self.max_top, self.coords) + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, diff --git a/treecorr/gggcorrelation.py b/treecorr/gggcorrelation.py index 4169c86e..a656c9ad 100644 --- a/treecorr/gggcorrelation.py +++ b/treecorr/gggcorrelation.py @@ -301,8 +301,10 @@ def process_auto(self, cat, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - field = cat.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + field = cat.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',field.nTopLevelNodes) _lib.ProcessAuto3(self.corr, field.data, self.output_dots, @@ -339,10 +341,16 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) # Note: all 3 correlation objects are the same. Thus, all triangles will be placed @@ -381,12 +389,21 @@ def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f3 = cat3.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) # Note: all 6 correlation objects are the same. Thus, all triangles will be placed @@ -1318,10 +1335,16 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) # Note: all 3 correlation objects are the same. Thus, all triangles will be placed @@ -1361,12 +1384,21 @@ def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f3 = cat3.getGField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) _lib.ProcessCross3(self.g1g2g3.corr, self.g1g3g2.corr, diff --git a/treecorr/kgcorrelation.py b/treecorr/kgcorrelation.py index 6098ca83..48fca86d 100644 --- a/treecorr/kgcorrelation.py +++ b/treecorr/kgcorrelation.py @@ -203,12 +203,16 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getKField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 1, - self.min_top, self.max_top, self.coords) - f2 = cat2.getGField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 2, - self.min_top, self.max_top, self.coords) + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, diff --git a/treecorr/kkcorrelation.py b/treecorr/kkcorrelation.py index 6ba80e73..65bfaedc 100644 --- a/treecorr/kkcorrelation.py +++ b/treecorr/kkcorrelation.py @@ -199,8 +199,10 @@ def process_auto(self, cat, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - field = cat.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + field = cat.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',field.nTopLevelNodes) _lib.ProcessAuto2(self.corr, field.data, self.output_dots, @@ -234,12 +236,16 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getKField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 1, - self.min_top, self.max_top, self.coords) - f2 = cat2.getKField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 2, - self.min_top, self.max_top, self.coords) + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, diff --git a/treecorr/kkkcorrelation.py b/treecorr/kkkcorrelation.py index 55aa0105..ba2e312e 100644 --- a/treecorr/kkkcorrelation.py +++ b/treecorr/kkkcorrelation.py @@ -232,8 +232,10 @@ def process_auto(self, cat, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - field = cat.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + field = cat.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',field.nTopLevelNodes) _lib.ProcessAuto3(self.corr, field.data, self.output_dots, @@ -270,10 +272,16 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) # Note: all 3 correlation objects are the same. Thus, all triangles will be placed @@ -312,12 +320,21 @@ def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f3 = cat3.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) # Note: all 6 correlation objects are the same. Thus, all triangles will be placed @@ -812,10 +829,16 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) # Note: all 3 correlation objects are the same. Thus, all triangles will be placed @@ -855,12 +878,21 @@ def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f3 = cat3.getKField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) _lib.ProcessCross3(self.k1k2k3.corr, self.k1k3k2.corr, diff --git a/treecorr/ngcorrelation.py b/treecorr/ngcorrelation.py index 5f7dab94..9a3ff187 100644 --- a/treecorr/ngcorrelation.py +++ b/treecorr/ngcorrelation.py @@ -215,12 +215,16 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getNField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 1, - self.min_top, self.max_top, self.coords) - f2 = cat2.getGField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 2, - self.min_top, self.max_top, self.coords) + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getGField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, diff --git a/treecorr/nkcorrelation.py b/treecorr/nkcorrelation.py index 6e53996b..0b41fb39 100644 --- a/treecorr/nkcorrelation.py +++ b/treecorr/nkcorrelation.py @@ -214,12 +214,16 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getNField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 1, - self.min_top, self.max_top, self.coords) - f2 = cat2.getKField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 2, - self.min_top, self.max_top, self.coords) + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getKField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, diff --git a/treecorr/nncorrelation.py b/treecorr/nncorrelation.py index 370eb31e..57c43c7b 100644 --- a/treecorr/nncorrelation.py +++ b/treecorr/nncorrelation.py @@ -218,8 +218,10 @@ def process_auto(self, cat, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - field = cat.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + field = cat.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',field.nTopLevelNodes) _lib.ProcessAuto2(self.corr, field.data, self.output_dots, @@ -254,12 +256,16 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getNField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 1, - self.min_top, self.max_top, self.coords) - f2 = cat2.getNField(min_size, max_size, self.split_method, - self.brute is True or self.brute == 2, - self.min_top, self.max_top, self.coords) + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, diff --git a/treecorr/nnncorrelation.py b/treecorr/nnncorrelation.py index 4ba04cf7..bb64b22a 100644 --- a/treecorr/nnncorrelation.py +++ b/treecorr/nnncorrelation.py @@ -255,8 +255,10 @@ def process_auto(self, cat, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - field = cat.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + field = cat.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, brute=bool(self.brute), + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',field.nTopLevelNodes) _lib.ProcessAuto3(self.corr, field.data, self.output_dots, @@ -294,10 +296,16 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) # Note: all 3 correlation objects are the same. Thus, all triangles will be placed @@ -337,12 +345,21 @@ def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f3 = cat3.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) # Note: all 6 correlation objects are the same. Thus, all triangles will be placed @@ -1137,10 +1154,16 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) # Note: all 3 correlation objects are the same. Thus, all triangles will be placed @@ -1185,12 +1208,21 @@ def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): self._set_num_threads(num_threads) min_size, max_size = self._get_minmax_size() - f1 = cat1.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f2 = cat2.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) - f3 = cat3.getNField(min_size, max_size, self.split_method, - bool(self.brute), self.min_top, self.max_top, self.coords) + f1 = cat1.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 1, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f2 = cat2.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 2, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) + f3 = cat3.getNField(min_size=min_size, max_size=max_size, + split_method=self.split_method, + brute=self.brute is True or self.brute == 3, + min_top=self.min_top, max_top=self.max_top, + coords=self.coords) self.logger.info('Starting %d jobs.',f1.nTopLevelNodes) _lib.ProcessCross3(self.n1n2n3.corr, self.n1n3n2.corr, From 7032545cfca6d3b9367c73a4355d009bc475bc1f Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Fri, 4 Jun 2021 22:48:37 -0400 Subject: [PATCH 5/9] Make kwargs kw-only in field.py --- tests/test_kmeans.py | 16 ++++++++-------- treecorr/catalog.py | 38 ++++++++++++++++++++++++++------------ treecorr/field.py | 32 +++++++++++++++++++++----------- treecorr/util.py | 3 +++ 4 files changed, 58 insertions(+), 31 deletions(-) diff --git a/tests/test_kmeans.py b/tests/test_kmeans.py index 34b3aa27..f15fa4e0 100644 --- a/tests/test_kmeans.py +++ b/tests/test_kmeans.py @@ -472,7 +472,7 @@ def test_init_random(): print('3d with init=random') npatch = 10 field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, 'random') + cen1 = field.kmeans_initialize_centers(npatch, init='random') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -498,7 +498,7 @@ def test_init_random(): # Use a field with lots of top level cells print('3d with init=random, min_top=10') field = cat.getNField(min_top=10) - cen1 = field.kmeans_initialize_centers(npatch, 'random') + cen1 = field.kmeans_initialize_centers(npatch, init='random') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -525,7 +525,7 @@ def test_init_random(): cat = treecorr.Catalog(x=x, y=y) xy = np.array([x, y]).T field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, 'random') + cen1 = field.kmeans_initialize_centers(npatch, init='random') assert cen1.shape == (npatch, 2) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -553,7 +553,7 @@ def test_init_random(): cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad') xyz = np.array([cat.x, cat.y, cat.z]).T field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, 'random') + cen1 = field.kmeans_initialize_centers(npatch, init='random') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -621,7 +621,7 @@ def test_init_kmpp(): print('3d with init=kmeans++') npatch = 10 field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++') + cen1 = field.kmeans_initialize_centers(npatch, init='kmeans++') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -647,7 +647,7 @@ def test_init_kmpp(): # Use a field with lots of top level cells print('3d with init=kmeans++, min_top=10') field = cat.getNField(min_top=10) - cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++') + cen1 = field.kmeans_initialize_centers(npatch, init='kmeans++') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -674,7 +674,7 @@ def test_init_kmpp(): cat = treecorr.Catalog(x=x, y=y) xy = np.array([x, y]).T field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++') + cen1 = field.kmeans_initialize_centers(npatch, init='kmeans++') assert cen1.shape == (npatch, 2) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -702,7 +702,7 @@ def test_init_kmpp(): cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad') xyz = np.array([cat.x, cat.y, cat.z]).T field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++') + cen1 = field.kmeans_initialize_centers(npatch, init='kmeans++') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) diff --git a/treecorr/catalog.py b/treecorr/catalog.py index 0b3cdb8f..3444dfdd 100644 --- a/treecorr/catalog.py +++ b/treecorr/catalog.py @@ -1481,8 +1481,14 @@ def set_patch(data, patch_col): def nfields(self): if not hasattr(self, '_nfields'): # Make simple functions that call NField, etc. with self as the first argument. - def get_nfield(*args, **kwargs): - return NField(self, *args, **kwargs) + # Note: LRU_Cache keys on the args, not kwargs, so everything but logger should + # be in args for this function. We convert them to kwargs for the NFields init call. + def get_nfield(min_size, max_size, split_method, brute, min_top, max_top, coords, + rng, logger=None): + return NField(self, min_size=min_size, max_size=max_size, + split_method=split_method, brute=brute, + min_top=min_top, max_top=max_top, coords=coords, + rng=rng, logger=logger) # Now wrap these in LRU_Caches with (initially) just 1 element being cached. self._nfields = LRU_Cache(get_nfield, 1) return self._nfields @@ -1490,40 +1496,48 @@ def get_nfield(*args, **kwargs): @property def kfields(self): if not hasattr(self, '_kfields'): - def get_kfield(*args, **kwargs): - return KField(self, *args, **kwargs) + def get_kfield(min_size, max_size, split_method, brute, min_top, max_top, coords, + rng, logger=None): + return KField(self, min_size=min_size, max_size=max_size, + split_method=split_method, brute=brute, + min_top=min_top, max_top=max_top, coords=coords, + rng=rng, logger=logger) self._kfields = LRU_Cache(get_kfield, 1) return self._kfields @property def gfields(self): if not hasattr(self, '_gfields'): - def get_gfield(*args, **kwargs): - return GField(self, *args, **kwargs) + def get_gfield(min_size, max_size, split_method, brute, min_top, max_top, coords, + rng, logger=None): + return GField(self, min_size=min_size, max_size=max_size, + split_method=split_method, brute=brute, + min_top=min_top, max_top=max_top, coords=coords, + rng=rng, logger=logger) self._gfields = LRU_Cache(get_gfield, 1) return self._gfields @property def nsimplefields(self): if not hasattr(self, '_nsimplefields'): - def get_nsimplefield(*args,**kwargs): - return NSimpleField(self,*args,**kwargs) + def get_nsimplefield(logger=None): + return NSimpleField(self, logger=logger) self._nsimplefields = LRU_Cache(get_nsimplefield, 1) return self._nsimplefields @property def ksimplefields(self): if not hasattr(self, '_ksimplefields'): - def get_ksimplefield(*args,**kwargs): - return KSimpleField(self,*args,**kwargs) + def get_ksimplefield(logger=None): + return KSimpleField(self, logger=logger) self._ksimplefields = LRU_Cache(get_ksimplefield, 1) return self._ksimplefields @property def gsimplefields(self): if not hasattr(self, '_gsimplefields'): - def get_gsimplefield(*args,**kwargs): - return GSimpleField(self,*args,**kwargs) + def get_gsimplefield(logger=None): + return GSimpleField(self, logger=logger) self._gsimplefields = LRU_Cache(get_gsimplefield, 1) return self._gsimplefields diff --git a/treecorr/field.py b/treecorr/field.py index cf7e8a4f..cdf05ec1 100644 --- a/treecorr/field.py +++ b/treecorr/field.py @@ -22,6 +22,7 @@ from .util import get_omp_threads, parse_xyzsep, coord_enum from .util import long_ptr as lp from .util import double_ptr as dp +from .util import depr_pos_kwargs def _parse_split_method(split_method): if split_method == 'middle': return 0 @@ -263,7 +264,8 @@ def _get_near(self, x, y, z, sep): _lib.FieldGetNear(self.data, x, y, z, sep, self._d, self._coords, lp(ind), n) return ind - def run_kmeans(self, npatch, max_iter=200, tol=1.e-5, init='tree', alt=False, rng=None): + @depr_pos_kwargs + def run_kmeans(self, npatch, *, max_iter=200, tol=1.e-5, init='tree', alt=False, rng=None): r"""Use k-means algorithm to set patch labels for a field. The k-means algorithm (cf. https://en.wikipedia.org/wiki/K-means_clustering) identifies @@ -374,12 +376,13 @@ def run_kmeans(self, npatch, max_iter=200, tol=1.e-5, init='tree', alt=False, rn spherical geometries. In the latter case, the centers represent (x,y,z) coordinates on the unit sphere. """ - centers = self.kmeans_initialize_centers(npatch, init, rng) - self.kmeans_refine_centers(centers, max_iter, tol, alt) + centers = self.kmeans_initialize_centers(npatch, init=init, rng=rng) + self.kmeans_refine_centers(centers, max_iter=max_iter, tol=tol, alt=alt) patches = self.kmeans_assign_patches(centers) return patches, centers - def kmeans_initialize_centers(self, npatch, init='tree', rng=None): + @depr_pos_kwargs + def kmeans_initialize_centers(self, npatch, *, init='tree', rng=None): """Use the field's tree structure to assign good initial centers for a K-Means run. The classic K-Means algorithm involves starting with random points as the initial @@ -435,7 +438,8 @@ def kmeans_initialize_centers(self, npatch, init='tree', rng=None): return centers - def kmeans_refine_centers(self, centers, max_iter=200, tol=1.e-5, alt=False): + @depr_pos_kwargs + def kmeans_refine_centers(self, centers, *, max_iter=200, tol=1.e-5, alt=False): """Fast implementation of the K-Means algorithm The standard K-Means algorithm is as follows @@ -530,7 +534,8 @@ class NField(Field): number generation. (default: None) logger (Logger): A logger file if desired. (default: None) """ - def __init__(self, cat, min_size=0, max_size=None, split_method='mean', brute=False, + @depr_pos_kwargs + def __init__(self, cat, *, min_size=0, max_size=None, split_method='mean', brute=False, min_top=None, max_top=10, coords=None, rng=None, logger=None): if logger: if cat.name != '': @@ -596,7 +601,8 @@ class KField(Field): number generation. (default: None) logger (Logger): A logger file if desired. (default: None) """ - def __init__(self, cat, min_size=0, max_size=None, split_method='mean', brute=False, + @depr_pos_kwargs + def __init__(self, cat, *, min_size=0, max_size=None, split_method='mean', brute=False, min_top=None, max_top=10, coords=None, rng=None, logger=None): if logger: if cat.name != '': @@ -660,7 +666,8 @@ class GField(Field): number generation. (default: None) logger (Logger): A logger file if desired. (default: None) """ - def __init__(self, cat, min_size=0, max_size=None, split_method='mean', brute=False, + @depr_pos_kwargs + def __init__(self, cat, *, min_size=0, max_size=None, split_method='mean', brute=False, min_top=None, max_top=10, coords=None, rng=None, logger=None): if logger: if cat.name != '': @@ -740,7 +747,8 @@ class NSimpleField(SimpleField): cat (Catalog): The catalog from which to make the field. logger (Logger): A logger file if desired. (default: None) """ - def __init__(self, cat, logger=None): + @depr_pos_kwargs + def __init__(self, cat, *, logger=None): if logger: if cat.name != '': logger.info('Building NSimpleField from cat %s',cat.name) @@ -784,7 +792,8 @@ class KSimpleField(SimpleField): cat (Catalog): The catalog from which to make the field. logger (Logger): A logger file if desired. (default: None) """ - def __init__(self, cat, logger=None): + @depr_pos_kwargs + def __init__(self, cat, *, logger=None): if logger: if cat.name != '': logger.info('Building KSimpleField from cat %s',cat.name) @@ -829,7 +838,8 @@ class GSimpleField(SimpleField): cat (Catalog): The catalog from which to make the field. logger (Logger): A logger file if desired. (default: None) """ - def __init__(self, cat, logger=None): + @depr_pos_kwargs + def __init__(self, cat, *, logger=None): if logger: if cat.name != '': logger.info('Building GSimpleField from cat %s',cat.name) diff --git a/treecorr/util.py b/treecorr/util.py index a2a3dd17..74d3e2af 100644 --- a/treecorr/util.py +++ b/treecorr/util.py @@ -946,6 +946,9 @@ def func_with_kwargs(a, *, b=3, c=4): params = inspect.signature(fn).parameters nparams = len(params) + # NB. This will trigger a TypeError on initial use if there are no kw-only parameters. + # This is actually a feature, since it's probably a sign that the developer forgot to + # add the *, item to the parameter list. This decorator doesn't make sense without it. nkwargs = len(fn.__kwdefaults__) npos = nparams - nkwargs From 372555be7f51091b6d4413b9ea30347d7f60ff56 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 5 Jun 2021 01:46:33 -0400 Subject: [PATCH 6/9] Convert kwargs to kw-only for BinnedCorr2 and all 2pt Correlation classes --- tests/test_gg.py | 13 +++++- tests/test_ng.py | 22 ++++++++-- tests/test_nk.py | 12 ++++-- tests/test_nn.py | 40 +++++++++++-------- tests/test_patch.py | 84 +++++++++++++++++++-------------------- tests/test_patch3pt.py | 18 ++++----- tests/test_periodic.py | 2 +- tests/test_rperp.py | 4 +- treecorr/binnedcorr2.py | 35 +++++++++------- treecorr/binnedcorr3.py | 2 +- treecorr/corr2.py | 40 +++++++++---------- treecorr/ggcorrelation.py | 38 ++++++++++++------ treecorr/kgcorrelation.py | 23 ++++++----- treecorr/kkcorrelation.py | 25 +++++++----- treecorr/ngcorrelation.py | 42 ++++++++++++-------- treecorr/nkcorrelation.py | 28 ++++++++----- treecorr/nncorrelation.py | 42 ++++++++++++-------- treecorr/util.py | 7 +--- 18 files changed, 281 insertions(+), 196 deletions(-) diff --git a/tests/test_gg.py b/tests/test_gg.py index 991ce016..48e9fdc6 100644 --- a/tests/test_gg.py +++ b/tests/test_gg.py @@ -667,7 +667,7 @@ def test_mapsq(): # (We provide the range where the results worked out well above.) R = gg.rnom[16::2] print('R = ',R) - mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq(R) + mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = gg.calculateMapSq(R=R) true_mapsq = true_mapsq[16::2] print('mapsq = ',mapsq) print('true_mapsq = ',true_mapsq) @@ -679,6 +679,15 @@ def test_mapsq(): print('max = ',max(abs(mxsq))) np.testing.assert_allclose(mxsq, 0., atol=3.e-8) + # Giving R as a positional argument is currently still allowed, but deprecated. + with assert_warns(FutureWarning): + mapsq_2, mapsq_im_2, mxsq_2, mxsq_im_2, varmapsq_2 = gg.calculateMapSq(R) + np.testing.assert_array_equal(mapsq_2, mapsq) + np.testing.assert_array_equal(mapsq_im_2, mapsq_im) + np.testing.assert_array_equal(mxsq_2, mxsq) + np.testing.assert_array_equal(mxsq_im_2, mxsq_im) + np.testing.assert_array_equal(varmapsq_2, varmapsq) + mapsq_file = 'output/gg_m2b.txt' gg.writeMapSq(mapsq_file, R=R, precision=16) data = np.genfromtxt(mapsq_file, names=True) @@ -750,7 +759,7 @@ def test_mapsq(): # (We provide the range where the results worked out well above.) R = gg.rnom[6:40:4] print('R = ',R) - gamsq, vargamsq, gamsq_e, gamsq_b, vargamsq_eb = gg.calculateGamSq(R, eb=True) + gamsq, vargamsq, gamsq_e, gamsq_b, vargamsq_eb = gg.calculateGamSq(R=R, eb=True) true_gamsq = true_gamsq[6:40:4] print('gamsq_e = ',gamsq_e) print('true_gamsq = ',true_gamsq) diff --git a/tests/test_ng.py b/tests/test_ng.py index ef8ae833..be3a222b 100644 --- a/tests/test_ng.py +++ b/tests/test_ng.py @@ -677,7 +677,7 @@ def test_ng(): verbose=1) rg.process(rand_cat, source_cat) print('rg.xi = ',rg.xi) - xi, xi_im, varxi = ng.calculateXi(rg) + xi, xi_im, varxi = ng.calculateXi(rg=rg) print('compensated xi = ',xi) print('compensated xi_im = ',xi_im) print('true_gammat = ',true_gt) @@ -689,6 +689,13 @@ def test_ng(): np.testing.assert_allclose(xi, true_gt, rtol=0.1) np.testing.assert_allclose(xi_im, 0, atol=5.e-3) + # rg is still allowed as a positional argument, but deprecated + with assert_warns(FutureWarning): + xi_2, xi_im_2, varxi_2 = ng.calculateXi(rg) + np.testing.assert_array_equal(xi_2, xi) + np.testing.assert_array_equal(xi_im_2, xi_im) + np.testing.assert_array_equal(varxi_2, varxi) + # Check that we get the same result using the corr2 function: lens_cat.write(os.path.join('data','ng_lens.fits')) source_cat.write(os.path.join('data','ng_source.fits')) @@ -730,7 +737,7 @@ def test_ng(): np.testing.assert_almost_equal(data['npairs'], ng.npairs) out_file_name2 = os.path.join('output','ng_out2.fits') - ng.write(out_file_name2, rg) + ng.write(out_file_name2, rg=rg) data = fitsio.read(out_file_name2) np.testing.assert_almost_equal(data['r_nom'], np.exp(ng.logr)) np.testing.assert_almost_equal(data['meanr'], ng.meanr) @@ -886,7 +893,7 @@ def test_nmap(): np.testing.assert_allclose(data['sig_nmap'], np.sqrt(varnmap), rtol=1.e-8) fits_name = os.path.join('output', 'ng_norm.zzz') - ng.writeNorm(fits_name, gg, dd, rr, file_type='fits') + ng.writeNorm(fits_name, gg=gg, dd=dd, rr=rr, file_type='fits') data = fitsio.read(fits_name) np.testing.assert_allclose(data['NMap'], nmap, rtol=1.e-6) np.testing.assert_allclose(data['NMx'], nmx, atol=1.e-6) @@ -898,6 +905,13 @@ def test_nmap(): np.testing.assert_allclose(data['NMap_norm'], nmap_norm, rtol=1.e-6) np.testing.assert_allclose(data['Nsq_Mapsq'], napsq_mapsq, rtol=1.e-6) + with assert_warns(FutureWarning): + # This one in particular is worth checking, since some kw-onlt args don't have defaults, + # so it didn't actually work with my original implementation of depr_pos_kwargs + ng.writeNorm(fits_name, gg, dd, rr, file_type='fits') + data2 = fitsio.read(fits_name) + np.testing.assert_array_equal(data2, data) + fits_name = os.path.join('output', 'ng_nmap2.fits') ng.writeNMap(fits_name, R=R, rg=rg) data = fitsio.read(fits_name) @@ -1315,7 +1329,7 @@ def test_varxi(): print('Compensated:') - all_xis = [ng.calculateXi(rg) for (ng,rg) in zip(all_ngs, all_rgs)] + all_xis = [ng.calculateXi(rg=rg) for (ng,rg) in zip(all_ngs, all_rgs)] mean_wt = np.mean([ng.weight for ng in all_ngs], axis=0) mean_xi = np.mean([xi[0] for xi in all_xis], axis=0) var_xi = np.var([xi[0] for xi in all_xis], axis=0) diff --git a/tests/test_nk.py b/tests/test_nk.py index aaf6b04a..03f7ab93 100644 --- a/tests/test_nk.py +++ b/tests/test_nk.py @@ -427,7 +427,7 @@ def test_nk(): verbose=1) rk.process(rand_cat, source_cat) print('rk.xi = ',rk.xi) - xi, varxi = nk.calculateXi(rk) + xi, varxi = nk.calculateXi(rk=rk) print('compensated xi = ',xi) print('true_kappa = ',true_k) print('ratio = ',xi / true_k) @@ -437,6 +437,12 @@ def test_nk(): # to the smallish number of lenses, not to edge effects np.testing.assert_allclose(nk.xi, true_k, rtol=0.05, atol=1.e-3) + # rk is still allowed as a positional argument, but deprecated + with assert_warns(FutureWarning): + xi_2, varxi_2 = nk.calculateXi(rk) + np.testing.assert_array_equal(xi_2, xi) + np.testing.assert_array_equal(varxi_2, varxi) + # Check that we get the same result using the corr2 function lens_cat.write(os.path.join('data','nk_lens.fits')) source_cat.write(os.path.join('data','nk_source.fits')) @@ -475,7 +481,7 @@ def test_nk(): np.testing.assert_almost_equal(data['npairs'], nk.npairs) out_file_name2 = os.path.join('output','nk_out2.fits') - nk.write(out_file_name2, rk) + nk.write(out_file_name2, rk=rk) data = fitsio.read(out_file_name2) np.testing.assert_almost_equal(data['r_nom'], np.exp(nk.logr)) np.testing.assert_almost_equal(data['meanr'], nk.meanr) @@ -571,7 +577,7 @@ def test_varxi(): print('Compensated:') - all_xis = [nk.calculateXi(rk) for (nk,rk) in zip(all_nks, all_rks)] + all_xis = [nk.calculateXi(rk=rk) for (nk,rk) in zip(all_nks, all_rks)] mean_wt = np.mean([nk.weight for nk in all_nks], axis=0) mean_xi = np.mean([xi[0] for xi in all_xis], axis=0) var_xi = np.var([xi[0] for xi in all_xis], axis=0) diff --git a/tests/test_nn.py b/tests/test_nn.py index c491018c..ed5e5e46 100644 --- a/tests/test_nn.py +++ b/tests/test_nn.py @@ -481,13 +481,19 @@ def test_direct_count(): rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, brute=True, verbose=0) rr.process(rcat1,rcat2) - xi, varxi = dd.calculateXi(rr) + xi, varxi = dd.calculateXi(rr=rr) # After calling calculateXi, you can access the result via attributes np.testing.assert_array_equal(xi, dd.xi) np.testing.assert_array_equal(varxi, dd.varxi) np.testing.assert_array_equal(varxi, dd.cov.diagonal()) + # rr is still allowed as a positional argument, but deprecated + with assert_warns(FutureWarning): + xi_2, varxi_2 = dd.calculateXi(rr) + np.testing.assert_array_equal(xi_2, xi) + np.testing.assert_array_equal(varxi_2, varxi) + # First do this via the corr2 function. config = treecorr.config.read_config('configs/nn_direct.yaml') logger = treecorr.config.setup_logger(0) @@ -1117,7 +1123,7 @@ def test_direct_linear(): rd = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, brute=True, bin_type='Linear', verbose=0) rd.process(rcat1,cat2) - xi, varxi = dd.calculateXi(rr, dr, rd) + xi, varxi = dd.calculateXi(rr=rr, dr=dr, rd=rd) # After calling calculateXi, you can access the result via attributes np.testing.assert_array_equal(xi, dd.xi) @@ -1237,7 +1243,7 @@ def test_nn(): r = dd.meanr true_xi = 0.25/np.pi * (L/s)**2 * np.exp(-0.25*r**2/s**2) - 1. - xi, varxi = dd.calculateXi(rr,dr) + xi, varxi = dd.calculateXi(rr=rr,dr=dr) print('xi = ',xi) print('true_xi = ',true_xi) print('ratio = ',xi / true_xi) @@ -1250,7 +1256,7 @@ def test_nn(): np.testing.assert_allclose(np.log(np.abs(xi)), np.log(np.abs(true_xi)), atol=0.1*tol_factor) - simple_xi, simple_varxi = dd.calculateXi(rr) + simple_xi, simple_varxi = dd.calculateXi(rr=rr) print('simple xi = ',simple_xi) print('max rel diff = ',max(abs((simple_xi - true_xi)/true_xi))) # The simple calculation (i.e. dd/rr-1, rather than (dd-2dr+rr)/rr as above) is only @@ -1293,7 +1299,7 @@ def test_nn(): np.testing.assert_allclose(corr2_output['xi'], xi, rtol=1.e-3) # Check the read function (not at very high accuracy for the ASCII I/O) - dd.calculateXi(rr,dr) # reset this to the better calculation + dd.calculateXi(rr=rr, dr=dr) # reset this to the better calculation dd2 = treecorr.NNCorrelation(bin_size=0.1, min_sep=1., max_sep=25., sep_units='arcmin') dd2.read(out_file_name) np.testing.assert_allclose(dd2.logr, dd.logr, rtol=1.e-3) @@ -1320,7 +1326,7 @@ def test_nn(): np.testing.assert_almost_equal(header['tot'], dd.tot) out_file_name2 = os.path.join('output','nn_out2.fits') - dd.write(out_file_name2, rr) + dd.write(out_file_name2, rr=rr) data = fitsio.read(out_file_name2) np.testing.assert_almost_equal(data['r_nom'], np.exp(dd.logr)) np.testing.assert_almost_equal(data['meanr'], dd.meanr) @@ -1333,7 +1339,7 @@ def test_nn(): np.testing.assert_almost_equal(header['tot'], dd.tot) out_file_name3 = os.path.join('output','nn_out3.fits') - dd.write(out_file_name3, rr, dr) + dd.write(out_file_name3, rr=rr, dr=dr) data = fitsio.read(out_file_name3) np.testing.assert_almost_equal(data['r_nom'], np.exp(dd.logr)) np.testing.assert_almost_equal(data['meanr'], dd.meanr) @@ -1382,7 +1388,7 @@ def test_nn(): dd.write(out_file_name3, rd=dr) # Check the read function - dd.calculateXi(rr,dr) # gets xi, varxi back in dd + dd.calculateXi(rr=rr, dr=dr) # gets xi, varxi back in dd dd2 = treecorr.NNCorrelation(bin_size=0.1, min_sep=1., max_sep=25., sep_units='arcmin') dd2.read(out_file_name1) np.testing.assert_almost_equal(dd2.logr, dd.logr) @@ -1525,7 +1531,7 @@ def test_3d(): r = dd.meanr true_xi = 1./(8.*np.pi**1.5) * (L/s)**3 * np.exp(-0.25*r**2/s**2) - 1. - simple_xi, varxi = dd.calculateXi(rr) + simple_xi, varxi = dd.calculateXi(rr=rr) print('simple xi = ',simple_xi) print('true_xi = ',true_xi) print('max rel diff = ',max(abs((simple_xi - true_xi)/true_xi))) @@ -1533,7 +1539,7 @@ def test_3d(): np.testing.assert_allclose(np.log(np.abs(simple_xi)), np.log(np.abs(true_xi)), rtol=0.1*tol_factor) - xi, varxi = dd.calculateXi(rr,dr) + xi, varxi = dd.calculateXi(rr=rr, dr=dr) print('xi = ',xi) print('true_xi = ',true_xi) print('ratio = ',xi / true_xi) @@ -1574,7 +1580,7 @@ def test_3d(): dd.process(cat) rr.process(rand) dr.process(cat,rand) - xi, varxi = dd.calculateXi(rr,dr) + xi, varxi = dd.calculateXi(rr=rr, dr=dr) np.testing.assert_allclose(xi, true_xi, rtol=0.1*tol_factor) np.testing.assert_allclose(np.log(np.abs(xi)), np.log(np.abs(true_xi)), rtol=0.1*tol_factor) @@ -1609,7 +1615,7 @@ def test_list(): rr.process(rand_cats) print('rr.npairs = ',rr.npairs) - xi, varxi = dd.calculateXi(rr) + xi, varxi = dd.calculateXi(rr=rr) print('xi = ',xi) # Now do the same thing with one big catalog for each. @@ -1619,7 +1625,7 @@ def test_list(): rand_catx = treecorr.Catalog(x=rx.reshape( (nobj*ncats,) ), y=ry.reshape( (nobj*ncats,) )) ddx.process(data_catx) rrx.process(rand_catx) - xix, varxix = ddx.calculateXi(rrx) + xix, varxix = ddx.calculateXi(rr=rrx) print('ddx.npairs = ',ddx.npairs) print('rrx.npairs = ',rrx.npairs) @@ -1874,7 +1880,7 @@ def test_varxi(): print('Uncompensated:') - all_xis = [dd.calculateXi(rr) for dd,rr in zip(all_dds, all_rrs)] + all_xis = [dd.calculateXi(rr=rr) for dd,rr in zip(all_dds, all_rrs)] mean_wt = np.mean([dd.weight for dd in all_dds], axis=0) mean_np = np.mean([dd.npairs for dd in all_dds], axis=0) mean_xi = np.mean([xi[0] for xi in all_xis], axis=0) @@ -1893,7 +1899,7 @@ def test_varxi(): print('Compensated:') - all_xis = [dd.calculateXi(rr, dr) for dd,dr,rr in zip(all_dds, all_drs, all_rrs)] + all_xis = [dd.calculateXi(rr=rr, dr=dr) for dd,dr,rr in zip(all_dds, all_drs, all_rrs)] mean_wt = np.mean([dd.weight for dd in all_dds], axis=0) mean_np = np.mean([dd.npairs for dd in all_dds], axis=0) mean_xi = np.mean([xi[0] for xi in all_xis], axis=0) @@ -1912,7 +1918,7 @@ def test_varxi(): print('Compensated with both dr and rd:') - all_xis = [dd.calculateXi(rr, dr, dr) for dd,dr,rr in zip(all_dds, all_drs, all_rrs)] + all_xis = [dd.calculateXi(rr=rr, dr=dr, rd=dr) for dd,dr,rr in zip(all_dds, all_drs, all_rrs)] mean_wt = np.mean([dd.weight for dd in all_dds], axis=0) mean_xi = np.mean([xi[0] for xi in all_xis], axis=0) var_xi = np.var([xi[0] for xi in all_xis], axis=0) @@ -1929,7 +1935,7 @@ def test_varxi(): print('Compensated with just rd') - all_xis = [dd.calculateXi(rr, rd=dr) for dd,dr,rr in zip(all_dds, all_drs, all_rrs)] + all_xis = [dd.calculateXi(rr=rr, rd=dr) for dd,dr,rr in zip(all_dds, all_drs, all_rrs)] mean_wt = np.mean([dd.weight for dd in all_dds], axis=0) mean_xi = np.mean([xi[0] for xi in all_xis], axis=0) var_xi = np.var([xi[0] for xi in all_xis], axis=0) diff --git a/tests/test_patch.py b/tests/test_patch.py index ebd00143..226ba735 100644 --- a/tests/test_patch.py +++ b/tests/test_patch.py @@ -1044,7 +1044,7 @@ def test_ng_jk(): print('Time for processing RG = ',t1-t0) ng4 = ng3.copy() - ng4.calculateXi(rg4) + ng4.calculateXi(rg=rg4) print('xi = ',ng4.xi) print('varxi = ',ng4.varxi) print('ratio = ',ng4.varxi / var_xi) @@ -1070,7 +1070,7 @@ def test_ng_jk(): print('Time for processing RG = ',t1-t0) ng5 = ng3.copy() - ng5.calculateXi(rg5) + ng5.calculateXi(rg=rg5) print('xi = ',ng5.xi) print('varxi = ',ng5.varxi) print('ratio = ',ng5.varxi / var_xi) @@ -1164,8 +1164,8 @@ def test_nn_jk(): nr = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30.) nn.process(cat) nr.process(cat, rand_cat) - xia, varxi = nn.calculateXi(rr) - xib, varxi = nn.calculateXi(rr,nr) + xia, varxi = nn.calculateXi(rr=rr) + xib, varxi = nn.calculateXi(rr=rr, dr=nr) all_xia.append(xia) all_xib.append(xib) @@ -1204,9 +1204,9 @@ def test_nn_jk(): t1 = time.time() nr1.process(cat, rand_cat) t2 = time.time() - xia1, varxia1 = nn1.calculateXi(rr) + xia1, varxia1 = nn1.calculateXi(rr=rr) t3 = time.time() - xib1, varxib1 = nn1.calculateXi(rr,nr1) + xib1, varxib1 = nn1.calculateXi(rr=rr, dr=nr1) t4 = time.time() print('Time for non-patch processing = ',t1-t0, t2-t1, t3-t2, t4-t3) @@ -1244,9 +1244,9 @@ def test_nn_jk(): t1 = time.time() nr2.process(catp, rand_cat) t2 = time.time() - xia2, varxia2 = nn2.calculateXi(rr) + xia2, varxia2 = nn2.calculateXi(rr=rr) t3 = time.time() - xib2, varxib2 = nn2.calculateXi(rr,nr2) + xib2, varxib2 = nn2.calculateXi(rr=rr, dr=nr2) t4 = time.time() print('Time for shot processing = ',t1-t0, t2-t1, t3-t2, t4-t3) print('nn2.weight = ',nn2.weight) @@ -1275,9 +1275,9 @@ def test_nn_jk(): t1 = time.time() nr3.process(catp, rand_cat) t2 = time.time() - xia3, varxia3 = nn3.calculateXi(rr) + xia3, varxia3 = nn3.calculateXi(rr=rr) t3 = time.time() - xib3, varxib3 = nn3.calculateXi(rr,nr3) + xib3, varxib3 = nn3.calculateXi(rr=rr, dr=nr3) t4 = time.time() print('Time for jackknife processing = ',t1-t0, t2-t1, t3-t2, t4-t3) print('xia = ',xia3) @@ -1317,14 +1317,14 @@ def test_nn_jk(): print('Time for cross processing = ',t1-t0) np.testing.assert_allclose(nn3.weight, 2*nn2.weight) rn3.process(rand_cat, catp) - xic3, varxic3 = nn3.calculateXi(rr,rd=rn3) + xic3, varxic3 = nn3.calculateXi(rr=rr, rd=rn3) print('xic = ',xic3) print('varxic = ',varxic3) print('ratio = ',varxic3 / var_xib) print('ratio = ',varxic3 / varxib3) np.testing.assert_allclose(xic3, xib3) np.testing.assert_allclose(varxic3, varxib3) - xid3, varxid3 = nn3.calculateXi(rr,dr=nr3,rd=rn3) + xid3, varxid3 = nn3.calculateXi(rr=rr, dr=nr3, rd=rn3) print('xid = ',xid3) print('varxid = ',varxid3) print('ratio = ',varxid3 / var_xib) @@ -1351,13 +1351,13 @@ def test_nn_jk(): np.testing.assert_allclose(nn4.weight, nn2.weight) # Save the initial results dict so we test feature of adding additional result keys in dr or rd. res = nn4.results.copy() - xia4, varxia4 = nn4.calculateXi(rr4) + xia4, varxia4 = nn4.calculateXi(rr=rr4) nn4.results = res.copy() - xib4, varxib4 = nn4.calculateXi(rr4,dr=nr4) + xib4, varxib4 = nn4.calculateXi(rr=rr4, dr=nr4) nn4.results = res.copy() - xic4, varxic4 = nn4.calculateXi(rr4,rd=rn4) + xic4, varxic4 = nn4.calculateXi(rr=rr4, rd=rn4) nn4.results = res.copy() - xid4, varxid4 = nn4.calculateXi(rr4,dr=nr4,rd=rn4) + xid4, varxid4 = nn4.calculateXi(rr=rr4, dr=nr4, rd=rn4) print('xia = ',xia4) print('xib = ',xib4) print('xic = ',xic4) @@ -1379,15 +1379,15 @@ def test_nn_jk(): # Check some invalid parameters # randoms need patches, at least for d part. with assert_raises(RuntimeError): - nn3.calculateXi(rr,dr=nr1) + nn3.calculateXi(rr=rr, dr=nr1) with assert_raises(RuntimeError): - nn3.calculateXi(rr,dr=rn3) + nn3.calculateXi(rr=rr, dr=rn3) with assert_raises(RuntimeError): - nn3.calculateXi(rr,rd=nr3) + nn3.calculateXi(rr=rr, rd=nr3) with assert_raises(RuntimeError): - nn3.calculateXi(rr,dr=nr3,rd=nr3) + nn3.calculateXi(rr=rr, dr=nr3, rd=nr3) with assert_raises(RuntimeError): - nn3.calculateXi(rr,dr=rn3,rd=rn3) + nn3.calculateXi(rr=rr, dr=rn3, rd=rn3) # Not run on patches, but need patches with assert_raises(ValueError): nn1.estimate_cov('jackknife') @@ -1411,17 +1411,17 @@ def test_nn_jk(): rn6.process(rand_catp7, catp7) nr6.process(catp7, rand_catp7) with assert_raises(RuntimeError): - nn6.calculateXi(rr4) + nn6.calculateXi(rr=rr4) with assert_raises(RuntimeError): - nn6.calculateXi(rr6, dr=nr4) + nn6.calculateXi(rr=rr6, dr=nr4) with assert_raises(RuntimeError): - nn6.calculateXi(rr6, rd=rn4) + nn6.calculateXi(rr=rr6, rd=rn4) with assert_raises(RuntimeError): - nn6.calculateXi(rr6, dr=nr4, rd=rn6) + nn6.calculateXi(rr=rr6, dr=nr4, rd=rn6) with assert_raises(RuntimeError): - nn6.calculateXi(rr6, dr=nr6, rd=rn4) + nn6.calculateXi(rr=rr6, dr=nr6, rd=rn4) with assert_raises(RuntimeError): - nn6.calculateXi(rr4, dr=nr6, rd=rn6) + nn6.calculateXi(rr=rr4, dr=nr6, rd=rn6) @timer def test_kappa_jk(): @@ -1535,7 +1535,7 @@ def test_kappa_jk(): print('Time for processing RK = ',t1-t0) nk2 = nk.copy() - nk2.calculateXi(rk2) + nk2.calculateXi(rk=rk2) print('xi = ',nk2.xi) print('varxi = ',nk2.varxi) print('ratio = ',nk2.varxi / var_nk_xi) @@ -1551,7 +1551,7 @@ def test_kappa_jk(): print('Time for processing RK = ',t1-t0) nk3 = nk.copy() - nk3.calculateXi(rk3) + nk3.calculateXi(rk=rk3) print('xi = ',nk3.xi) print('varxi = ',nk3.varxi) print('ratio = ',nk3.varxi / var_nk_xi) @@ -1984,7 +1984,7 @@ def make_gals(): print('Time for processing RG = ',t1-t0) ng3b = ng3.copy() - ng3b.calculateXi(rg3) + ng3b.calculateXi(rg=rg3) print('xi = ',ng3b.xi) print('varxi = ',ng3b.varxi) print('ratio = ',ng3b.varxi / var_xi) @@ -2076,7 +2076,7 @@ def test_brute_jk(): # Repeat with randoms. rk = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True) rk.process(rand_lens_cat, source_cat) - nk.calculateXi(rk) + nk.calculateXi(rk=rk) print('With randoms:') print('nk = ',nk.xi) print('var = ',nk.varxi) @@ -2095,7 +2095,7 @@ def test_brute_jk(): nk1.process(lens_cat1, source_cat1) rk1 = treecorr.NKCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True) rk1.process(rand_lens_cat1, source_cat1) - nk1.calculateXi(rk1) + nk1.calculateXi(rk=rk1) xi_list.append(nk1.xi) xi_list = np.array(xi_list) C = np.cov(xi_list.T, bias=True) * (len(xi_list)-1) @@ -2173,7 +2173,7 @@ def test_brute_jk(): # Repeat NG with randoms. rg = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True) rg.process(rand_lens_cat, source_cat) - ng.calculateXi(rg) + ng.calculateXi(rg=rg) xi_list = [] for i in range(npatch): @@ -2189,7 +2189,7 @@ def test_brute_jk(): ng1.process(lens_cat1, source_cat1) rg1 = treecorr.NGCorrelation(bin_size=0.3, min_sep=10., max_sep=30., brute=True) rg1.process(rand_lens_cat1, source_cat1) - ng1.calculateXi(rg1) + ng1.calculateXi(rg=rg1) xi_list.append(ng1.xi) xi_list = np.array(xi_list) C = np.cov(xi_list.T, bias=True) * (len(xi_list)-1) @@ -2239,14 +2239,14 @@ def test_brute_jk(): rd1.process(rand_lens_cat1, source_cat1) dr1 = treecorr.NNCorrelation(bin_size=0.3, min_sep=10., max_sep=30., bin_slop=0) dr1.process(lens_cat1, rand_source_cat1) - xi1_list.append(dd1.calculateXi(rr1)[0]) - xi2_list.append(dd1.calculateXi(rr1,dr=dr1)[0]) - xi3_list.append(dd1.calculateXi(rr1,rd=rd1)[0]) - xi4_list.append(dd1.calculateXi(rr1,dr=dr1,rd=rd1)[0]) + xi1_list.append(dd1.calculateXi(rr=rr1)[0]) + xi2_list.append(dd1.calculateXi(rr=rr1, dr=dr1)[0]) + xi3_list.append(dd1.calculateXi(rr=rr1, rd=rd1)[0]) + xi4_list.append(dd1.calculateXi(rr=rr1, dr=dr1, rd=rd1)[0]) print('(DD-RR)/RR') xi1_list = np.array(xi1_list) - xi1, varxi1 = dd.calculateXi(rr) + xi1, varxi1 = dd.calculateXi(rr=rr) varxi = np.diagonal(np.cov(xi1_list.T, bias=True)) * (len(xi1_list)-1) print('treecorr jackknife varxi = ',varxi1) print('direct jackknife varxi = ',varxi) @@ -2254,7 +2254,7 @@ def test_brute_jk(): print('(DD-2DR+RR)/RR') xi2_list = np.array(xi2_list) - xi2, varxi2 = dd.calculateXi(rr, dr=dr) + xi2, varxi2 = dd.calculateXi(rr=rr, dr=dr) varxi = np.diagonal(np.cov(xi2_list.T, bias=True)) * (len(xi2_list)-1) print('treecorr jackknife varxi = ',varxi2) print('direct jackknife varxi = ',varxi) @@ -2262,7 +2262,7 @@ def test_brute_jk(): print('(DD-2RD+RR)/RR') xi3_list = np.array(xi3_list) - xi3, varxi3 = dd.calculateXi(rr, rd=rd) + xi3, varxi3 = dd.calculateXi(rr=rr, rd=rd) varxi = np.diagonal(np.cov(xi3_list.T, bias=True)) * (len(xi3_list)-1) print('treecorr jackknife varxi = ',varxi3) print('direct jackknife varxi = ',varxi) @@ -2270,7 +2270,7 @@ def test_brute_jk(): print('(DD-DR-RD+RR)/RR') xi4_list = np.array(xi4_list) - xi4, varxi4 = dd.calculateXi(rr, rd=rd, dr=dr) + xi4, varxi4 = dd.calculateXi(rr=rr, rd=rd, dr=dr) varxi = np.diagonal(np.cov(xi4_list.T, bias=True)) * (len(xi4_list)-1) print('treecorr jackknife varxi = ',varxi4) print('direct jackknife varxi = ',varxi) diff --git a/tests/test_patch3pt.py b/tests/test_patch3pt.py index 9aefa532..820c9eb9 100644 --- a/tests/test_patch3pt.py +++ b/tests/test_patch3pt.py @@ -1248,25 +1248,25 @@ def cc_zeta(corrs): np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor) print('jackknife:') - cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta) + cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', func=cc_zeta) print(np.diagonal(cov)) print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns)))) np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor) print('sample:') - cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta) + cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', func=cc_zeta) print(np.diagonal(cov)) print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns)))) np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.2*tol_factor) print('marked:') - cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta) + cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', func=cc_zeta) print(np.diagonal(cov)) print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns)))) np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor) print('bootstrap:') - cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta) + cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', func=cc_zeta) print(np.diagonal(cov)) print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns)))) np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor) @@ -1282,25 +1282,25 @@ def cc_zeta(corrs): np.testing.assert_allclose(zeta_s3, zeta_s1.ravel(), rtol=0.05 * tol_factor) print('jackknife:') - cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', cc_zeta) + cov = treecorr.estimate_multi_cov([dddc,rrrc], 'jackknife', func=cc_zeta) print(np.diagonal(cov)) print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns)))) np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.9*tol_factor) print('sample:') - cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', cc_zeta) + cov = treecorr.estimate_multi_cov([dddc,rrrc], 'sample', func=cc_zeta) print(np.diagonal(cov)) print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns)))) np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.1*tol_factor) print('marked:') - cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', cc_zeta) + cov = treecorr.estimate_multi_cov([dddc,rrrc], 'marked_bootstrap', func=cc_zeta) print(np.diagonal(cov)) print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns)))) np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=1.5*tol_factor) print('bootstrap:') - cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', cc_zeta) + cov = treecorr.estimate_multi_cov([dddc,rrrc], 'bootstrap', func=cc_zeta) print(np.diagonal(cov)) print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns)))) np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=0.6*tol_factor) @@ -1436,7 +1436,7 @@ def test_brute_jk(): ggg_map3_list = np.array(ggg_map3_list) varmap3 = np.diagonal(np.cov(ggg_map3_list.T, bias=True)) * (len(ggg_map3_list)-1) covmap3 = treecorr.estimate_multi_cov([ggg], 'jackknife', - lambda corrs: corrs[0].calculateMap3()[0]) + func=lambda corrs: corrs[0].calculateMap3()[0]) print('GGG: treecorr jackknife varmap3 = ',np.diagonal(covmap3)) print('GGG: direct jackknife varmap3 = ',varmap3) np.testing.assert_allclose(np.diagonal(covmap3), varmap3) diff --git a/tests/test_periodic.py b/tests/test_periodic.py index 9fa76c56..1c0b97ef 100644 --- a/tests/test_periodic.py +++ b/tests/test_periodic.py @@ -89,7 +89,7 @@ def test_direct_count(): rr = treecorr.NNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, bin_slop=0, verbose=0, xperiod=Lx, yperiod=Ly) rr.process(rcat1,rcat2, metric='Periodic') - xi, varxi = dd.calculateXi(rr) + xi, varxi = dd.calculateXi(rr=rr) print('xi = ',xi) # Do this via the corr2 function. diff --git a/tests/test_rperp.py b/tests/test_rperp.py index 8bebd680..bd94cf07 100644 --- a/tests/test_rperp.py +++ b/tests/test_rperp.py @@ -285,8 +285,8 @@ def test_rperp_minmax(): print('dr2 npairs = ',dr2.npairs[2:-2]) np.testing.assert_allclose(dr1.npairs, dr2.npairs[2:-2], rtol=1.e-6) - xi1, varxi1 = dd1.calculateXi(rr1, dr1) - xi2, varxi2 = dd2.calculateXi(rr2, dr2) + xi1, varxi1 = dd1.calculateXi(rr=rr1, dr=dr1) + xi2, varxi2 = dd2.calculateXi(rr=rr2, dr=dr2) print('xi1 = ',xi1) print('xi2 = ',xi2[2:-2]) np.testing.assert_allclose(xi1, xi2[2:-2], rtol=1.e-6) diff --git a/treecorr/binnedcorr2.py b/treecorr/binnedcorr2.py index 6afc4cab..7bc6412c 100644 --- a/treecorr/binnedcorr2.py +++ b/treecorr/binnedcorr2.py @@ -23,6 +23,7 @@ from . import _lib from .config import merge_config, setup_logger, get from .util import parse_metric, metric_enum, coord_enum, set_omp_threads, lazy_property +from .util import depr_pos_kwargs class Namespace(object): pass @@ -271,7 +272,8 @@ class BinnedCorr2(object): 'How many threads should be used. num_threads <= 0 means auto based on num cores.'), } - def __init__(self, config=None, logger=None, rng=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, rng=None, **kwargs): self._corr = None # Do this first to make sure we always have it for __del__ self.config = merge_config(config,kwargs,BinnedCorr2._valid_params) if logger is None: @@ -635,7 +637,7 @@ def is_my_job(my_indices, i, j, n): return ret if len(cat1) == 1 and cat1[0].npatch == 1: - self.process_auto(cat1[0],metric,num_threads) + self.process_auto(cat1[0], metric=metric, num_threads=num_threads) else: # When patch processing, keep track of the pair-wise results. if self.npatch1 == 1: @@ -658,7 +660,7 @@ def is_my_job(my_indices, i, j, n): if is_my_job(my_indices, i, i, n): temp._clear() self.logger.info('Process patch %d auto',i) - temp.process_auto(c1,metric,num_threads) + temp.process_auto(c1, metric=metric, num_threads=num_threads) if (i,i) not in self.results: self.results[(i,i)] = temp.copy() else: @@ -670,7 +672,7 @@ def is_my_job(my_indices, i, j, n): temp._clear() if not self._trivially_zero(c1,c2,metric): self.logger.info('Process patches %d,%d cross',i,j) - temp.process_cross(c1,c2,metric,num_threads) + temp.process_cross(c1, c2, metric=metric, num_threads=num_threads) else: self.logger.info('Skipping %d,%d pair, which are too far apart ' + 'for this set of separations',i,j) @@ -733,9 +735,9 @@ def is_my_job(my_indices, i, j, n1, n2): for c1,c2 in zip(cat1,cat2): if c1.ntot != c2.ntot: raise ValueError("Number of objects must be equal for pairwise.") - self.process_pairwise(c1,c2,metric,num_threads) + self.process_pairwise(c1, c2, metric=metric, num_threads=num_threads) elif len(cat1) == 1 and len(cat2) == 1 and cat1[0].npatch == 1 and cat2[0].npatch == 1: - self.process_cross(cat1[0],cat2[0],metric,num_threads) + self.process_cross(cat1[0], cat2[0], metric=metric, num_threads=num_threads) else: # When patch processing, keep track of the pair-wise results. if self.npatch1 == 1: @@ -767,7 +769,7 @@ def is_my_job(my_indices, i, j, n1, n2): temp._clear() if not self._trivially_zero(c1,c2,metric): self.logger.info('Process patches %d,%d cross',i,j) - temp.process_cross(c1,c2,metric,num_threads) + temp.process_cross(c1, c2, metric=metric, num_threads=num_threads) else: self.logger.info('Skipping %d,%d pair, which are too far apart ' + 'for this set of separations',i,j) @@ -814,7 +816,8 @@ def getWeight(self): """ return self.weight.ravel() - def estimate_cov(self, method, func=None): + @depr_pos_kwargs + def estimate_cov(self, method, *, func=None): """Estimate the covariance matrix based on the data This function will calculate an estimate of the covariance matrix according to the @@ -887,7 +890,7 @@ def estimate_cov(self, method, func=None): all_func = lambda corrs: func(corrs[0]) else: all_func = None - return estimate_multi_cov([self], method, all_func) + return estimate_multi_cov([self], method=method, func=all_func) def _set_num_threads(self, num_threads): if num_threads is None: @@ -961,7 +964,8 @@ def _get_minmax_size(self): # (And for the max_size, always split 10 levels for the top-level cells.) return 0., 0. - def sample_pairs(self, n, cat1, cat2, min_sep, max_sep, metric=None): + @depr_pos_kwargs + def sample_pairs(self, n, cat1, cat2, *, min_sep, max_sep, metric=None): """Return a random sample of n pairs whose separations fall between min_sep and max_sep. This would typically be used to get some random subset of the indices of pairs that @@ -999,9 +1003,11 @@ def sample_pairs(self, n, cat1, cat2, min_sep, max_sep, metric=None): cat2 (Catalog): The catalog from which to sample the second object of each pair. (This may be the same as cat1.) min_sep (float): The minimum separation for the returned pairs (modulo some slop - allowed by the bin_slop parameter). + allowed by the bin_slop parameter). (Note: keyword name is required + for this parameter: min_sep=min_sep) max_sep (float): The maximum separation for the returned pairs (modulo some slop - allowed by the bin_slop parameter). + allowed by the bin_slop parameter). (Note: keyword name is required + for this parameter: max_sep=max_sep) metric (str): Which metric to use. See `Metrics` for details. (default: self.metric, or 'Euclidean' if not set yet) @@ -1156,7 +1162,8 @@ def _bootstrap_pairs(self, indx): return ret -def estimate_multi_cov(corrs, method, func=None): +@depr_pos_kwargs +def estimate_multi_cov(corrs, method, *, func=None): """Estimate the covariance matrix of multiple statistics. This is like the method `BinnedCorr2.estimate_cov`, except that it will acoommodate @@ -1187,7 +1194,7 @@ def estimate_multi_cov(corrs, method, func=None): along with the GG xi+ and xi- from the same area, using jackknife covariance estimation, you would write:: - >>> cov = treecorr.estimate_multi_cov([ng,gg], 'jackknife') + >>> cov = treecorr.estimate_multi_cov([ng,gg], method='jackknife') In all cases, the relevant processing needs to already have been completed and finalized. And for all methods other than 'shot', the processing should have involved an appropriate diff --git a/treecorr/binnedcorr3.py b/treecorr/binnedcorr3.py index 7f486128..101f6a7b 100644 --- a/treecorr/binnedcorr3.py +++ b/treecorr/binnedcorr3.py @@ -1100,7 +1100,7 @@ def estimate_cov(self, method, func=None): all_func = lambda corrs: func(corrs[0]) else: all_func = None - return estimate_multi_cov([self], method, all_func) + return estimate_multi_cov([self], method, func=all_func) def _set_num_threads(self, num_threads): if num_threads is None: diff --git a/treecorr/corr2.py b/treecorr/corr2.py index 82baa636..6aff5413 100644 --- a/treecorr/corr2.py +++ b/treecorr/corr2.py @@ -147,7 +147,7 @@ def corr2(config, logger=None): # Do GG correlation function if necessary if 'gg_file_name' in config or 'm2_file_name' in config: logger.warning("Performing GG calculations...") - gg = GGCorrelation(config,logger) + gg = GGCorrelation(config, logger=logger) gg.process(cat1,cat2) logger.info("Done GG calculations.") if 'gg_file_name' in config: @@ -162,7 +162,7 @@ def corr2(config, logger=None): if cat2 is None: raise TypeError("file_name2 is required for ng correlation") logger.warning("Performing NG calculations...") - ng = NGCorrelation(config,logger) + ng = NGCorrelation(config, logger=logger) ng.process(cat1,cat2) logger.info("Done NG calculation.") @@ -172,12 +172,12 @@ def corr2(config, logger=None): if config.get('ng_statistic',None) == 'compensated': raise TypeError("rand_files is required for ng_statistic = compensated") elif config.get('ng_statistic','compensated') == 'compensated': - rg = NGCorrelation(config,logger) + rg = NGCorrelation(config, logger=logger) rg.process(rand1,cat2) logger.info("Done RG calculation.") if 'ng_file_name' in config: - ng.write(config['ng_file_name'], rg) + ng.write(config['ng_file_name'], rg=rg) logger.warning("Wrote NG correlation to %s",config['ng_file_name']) if 'nm_file_name' in config: ng.writeNMap(config['nm_file_name'], rg=rg, m2_uform=config['m2_uform'], @@ -185,17 +185,17 @@ def corr2(config, logger=None): logger.warning("Wrote NMap values to %s",config['nm_file_name']) if 'norm_file_name' in config: - gg = GGCorrelation(config,logger) + gg = GGCorrelation(config, logger=logger) gg.process(cat2) logger.info("Done GG calculation for norm") - dd = NNCorrelation(config,logger) + dd = NNCorrelation(config, logger=logger) dd.process(cat1) logger.info("Done DD calculation for norm") - rr = NNCorrelation(config,logger) + rr = NNCorrelation(config, logger=logger) rr.process(rand1) logger.info("Done RR calculation for norm") if config['nn_statistic'] == 'compensated': - dr = NNCorrelation(config,logger) + dr = NNCorrelation(config, logger=logger) dr.process(cat1,rand1) logger.info("Done DR calculation for norm") else: @@ -207,7 +207,7 @@ def corr2(config, logger=None): # Do NN correlation function if necessary if 'nn_file_name' in config: logger.warning("Performing DD calculations...") - dd = NNCorrelation(config,logger) + dd = NNCorrelation(config, logger=logger) dd.process(cat1,cat2) logger.info("Done DD calculations.") @@ -218,38 +218,38 @@ def corr2(config, logger=None): rr = None elif cat2 is None: logger.warning("Performing RR calculations...") - rr = NNCorrelation(config,logger) + rr = NNCorrelation(config, logger=logger) rr.process(rand1) logger.info("Done RR calculations.") if config['nn_statistic'] == 'compensated': logger.warning("Performing DR calculations...") - dr = NNCorrelation(config,logger) + dr = NNCorrelation(config, logger=logger) dr.process(cat1,rand1) logger.info("Done DR calculations.") else: if rand2 is None: raise TypeError("rand_file_name2 is required when file_name2 is given") logger.warning("Performing RR calculations...") - rr = NNCorrelation(config,logger) + rr = NNCorrelation(config, logger=logger) rr.process(rand1,rand2) logger.info("Done RR calculations.") if config['nn_statistic'] == 'compensated': logger.warning("Performing DR calculations...") - dr = NNCorrelation(config,logger) + dr = NNCorrelation(config, logger=logger) dr.process(cat1,rand2) logger.info("Done DR calculations.") - rd = NNCorrelation(config,logger) + rd = NNCorrelation(config, logger=logger) rd.process(rand1,cat2) logger.info("Done RD calculations.") - dd.write(config['nn_file_name'],rr,dr,rd) + dd.write(config['nn_file_name'], rr=rr, dr=dr, rd=rd) logger.warning("Wrote NN correlation to %s",config['nn_file_name']) # Do KK correlation function if necessary if 'kk_file_name' in config: logger.warning("Performing KK calculations...") - kk = KKCorrelation(config,logger) + kk = KKCorrelation(config, logger=logger) kk.process(cat1,cat2) logger.info("Done KK calculations.") kk.write(config['kk_file_name']) @@ -260,7 +260,7 @@ def corr2(config, logger=None): if cat2 is None: raise TypeError("file_name2 is required for nk correlation") logger.warning("Performing NK calculations...") - nk = NKCorrelation(config,logger) + nk = NKCorrelation(config, logger=logger) nk.process(cat1,cat2) logger.info("Done NK calculation.") @@ -269,11 +269,11 @@ def corr2(config, logger=None): if config.get('nk_statistic',None) == 'compensated': raise TypeError("rand_files is required for nk_statistic = compensated") elif config.get('nk_statistic','compensated') == 'compensated': - rk = NKCorrelation(config,logger) + rk = NKCorrelation(config, logger=logger) rk.process(rand1,cat2) logger.info("Done RK calculation.") - nk.write(config['nk_file_name'], rk) + nk.write(config['nk_file_name'], rk=rk) logger.warning("Wrote NK correlation to %s",config['nk_file_name']) # Do KG correlation function if necessary @@ -281,7 +281,7 @@ def corr2(config, logger=None): if cat2 is None: raise TypeError("file_name2 is required for kg correlation") logger.warning("Performing KG calculations...") - kg = KGCorrelation(config,logger) + kg = KGCorrelation(config, logger=logger) kg.process(cat1,cat2) logger.info("Done KG calculation.") kg.write(config['kg_file_name']) diff --git a/treecorr/ggcorrelation.py b/treecorr/ggcorrelation.py index d81ce158..9b531efe 100644 --- a/treecorr/ggcorrelation.py +++ b/treecorr/ggcorrelation.py @@ -22,6 +22,8 @@ from .binnedcorr2 import BinnedCorr2 from .util import double_ptr as dp from .util import gen_read, gen_write +from .util import depr_pos_kwargs + class GGCorrelation(BinnedCorr2): r"""This class handles the calculation and storage of a 2-point shear-shear correlation @@ -95,10 +97,11 @@ class GGCorrelation(BinnedCorr2): **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `GGCorrelation`. See class doc for details. """ - BinnedCorr2.__init__(self, config, logger, **kwargs) + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 3 # GData self._ro._d2 = 3 # GData @@ -180,7 +183,8 @@ def copy(self): def __repr__(self): return 'GGCorrelation(config=%r)'%self.config - def process_auto(self, cat, metric=None, num_threads=None): + @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): """Process a single catalog, accumulating the auto-correlation. This accumulates the weighted sums into the bins, but does not finalize @@ -217,7 +221,8 @@ def process_auto(self, cat, metric=None, num_threads=None): field._d, self._coords, self._bintype, self._metric) - def process_cross(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation. This accumulates the weighted sums into the bins, but does not finalize @@ -261,7 +266,8 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): f1._d, f2._d, self._coords, self._bintype, self._metric) - def process_pairwise(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation, only using the corresponding pairs of objects in each catalog. @@ -409,7 +415,8 @@ def _sum(self, others): np.sum([c.weight for c in others], axis=0, out=self.weight) np.sum([c.npairs for c in others], axis=0, out=self.npairs) - def process(self, cat1, cat2=None, metric=None, num_threads=None, comm=None, low_mem=False, + @depr_pos_kwargs + def process(self, cat1, cat2=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the correlation function. @@ -466,7 +473,8 @@ def process(self, cat1, cat2=None, metric=None, num_threads=None, comm=None, low self.finalize(varg1,varg2) - def write(self, file_name, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. The output file will include the following columns: @@ -519,7 +527,8 @@ def write(self, file_name, file_type=None, precision=None): params=params, precision=precision, file_type=file_type, logger=self.logger) - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there @@ -567,7 +576,8 @@ def read(self, file_name, file_type=None): self._ro.bin_type = params['bin_type'].strip() - def calculateMapSq(self, R=None, m2_uform=None): + @depr_pos_kwargs + def calculateMapSq(self, *, R=None, m2_uform=None): r"""Calculate the aperture mass statistics from the correlation function. .. math:: @@ -677,7 +687,8 @@ def calculateMapSq(self, R=None, m2_uform=None): return mapsq, mapsq_im, mxsq, mxsq_im, varmapsq - def calculateGamSq(self, R=None, eb=False): + @depr_pos_kwargs + def calculateGamSq(self, *, R=None, eb=False): r"""Calculate the tophat shear variance from the correlation function. .. math:: @@ -758,7 +769,8 @@ def calculateGamSq(self, R=None, eb=False): return gamsq, vargamsq, gamsq_e, gamsq_b, vargamsq_e - def writeMapSq(self, file_name, R=None, m2_uform=None, file_type=None, precision=None): + @depr_pos_kwargs + def writeMapSq(self, file_name, *, R=None, m2_uform=None, file_type=None, precision=None): r"""Write the aperture mass statistics based on the correlation function to the file, file_name. @@ -800,8 +812,8 @@ def writeMapSq(self, file_name, R=None, m2_uform=None, file_type=None, precision if R is None: R = self.rnom - mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = self.calculateMapSq(R, m2_uform=m2_uform) - gamsq, vargamsq = self.calculateGamSq(R) + mapsq, mapsq_im, mxsq, mxsq_im, varmapsq = self.calculateMapSq(R=R, m2_uform=m2_uform) + gamsq, vargamsq = self.calculateGamSq(R=R) if precision is None: precision = self.config.get('precision', 4) diff --git a/treecorr/kgcorrelation.py b/treecorr/kgcorrelation.py index 48fca86d..d931a5de 100644 --- a/treecorr/kgcorrelation.py +++ b/treecorr/kgcorrelation.py @@ -22,6 +22,7 @@ from .binnedcorr2 import BinnedCorr2 from .util import double_ptr as dp from .util import gen_read, gen_write +from .util import depr_pos_kwargs class KGCorrelation(BinnedCorr2): @@ -96,10 +97,11 @@ class KGCorrelation(BinnedCorr2): **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `KGCorrelation`. See class doc for details. """ - BinnedCorr2.__init__(self, config, logger, **kwargs) + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 2 # KData self._ro._d2 = 3 # GData @@ -175,7 +177,8 @@ def copy(self): def __repr__(self): return 'KGCorrelation(config=%r)'%self.config - def process_cross(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation. This accumulates the weighted sums into the bins, but does not finalize @@ -218,8 +221,8 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, f1._d, f2._d, self._coords, self._bintype, self._metric) - - def process_pairwise(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation, only using the corresponding pairs of objects in each catalog. @@ -343,7 +346,8 @@ def _sum(self, others): np.sum([c.weight for c in others], axis=0, out=self.weight) np.sum([c.npairs for c in others], axis=0, out=self.npairs) - def process(self, cat1, cat2, metric=None, num_threads=None, comm=None, low_mem=False, + @depr_pos_kwargs + def process(self, cat1, cat2, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the correlation function. @@ -387,7 +391,8 @@ def process(self, cat1, cat2, metric=None, num_threads=None, comm=None, low_mem= self.logger.info("varg = %f: sig_sn (per component) = %f",varg,math.sqrt(varg)) self.finalize(vark,varg) - def write(self, file_name, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. The output file will include the following columns: @@ -435,8 +440,8 @@ def write(self, file_name, file_type=None, precision=None): self.weight, self.npairs ], params=params, precision=precision, file_type=file_type, logger=self.logger) - - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there diff --git a/treecorr/kkcorrelation.py b/treecorr/kkcorrelation.py index 65bfaedc..80b77276 100644 --- a/treecorr/kkcorrelation.py +++ b/treecorr/kkcorrelation.py @@ -22,6 +22,7 @@ from .binnedcorr2 import BinnedCorr2 from .util import double_ptr as dp from .util import gen_read, gen_write +from .util import depr_pos_kwargs class KKCorrelation(BinnedCorr2): @@ -96,10 +97,11 @@ class KKCorrelation(BinnedCorr2): **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `KKCorrelation`. See class doc for details. """ - BinnedCorr2.__init__(self, config, logger, **kwargs) + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 2 # KData self._ro._d2 = 2 # KData @@ -173,7 +175,8 @@ def copy(self): def __repr__(self): return 'KKCorrelation(config=%r)'%self.config - def process_auto(self, cat, metric=None, num_threads=None): + @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): """Process a single catalog, accumulating the auto-correlation. This accumulates the weighted sums into the bins, but does not finalize @@ -208,7 +211,8 @@ def process_auto(self, cat, metric=None, num_threads=None): _lib.ProcessAuto2(self.corr, field.data, self.output_dots, field._d, self._coords, self._bintype, self._metric) - def process_cross(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation. This accumulates the weighted sums into the bins, but does not finalize @@ -251,7 +255,8 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, f1._d, f2._d, self._coords, self._bintype, self._metric) - def process_pairwise(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation, only using the corresponding pairs of objects in each catalog. @@ -371,7 +376,8 @@ def _sum(self, others): np.sum([c.weight for c in others], axis=0, out=self.weight) np.sum([c.npairs for c in others], axis=0, out=self.npairs) - def process(self, cat1, cat2=None, metric=None, num_threads=None, comm=None, low_mem=False, + @depr_pos_kwargs + def process(self, cat1, cat2=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the correlation function. @@ -427,7 +433,8 @@ def process(self, cat1, cat2=None, metric=None, num_threads=None, comm=None, low self.logger.info("vark2 = %f: sig_k = %f",vark2,math.sqrt(vark2)) self.finalize(vark1,vark2) - def write(self, file_name, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. The output file will include the following columns: @@ -471,8 +478,8 @@ def write(self, file_name, file_type=None, precision=None): self.xi, np.sqrt(self.varxi), self.weight, self.npairs ], params=params, precision=precision, file_type=file_type, logger=self.logger) - - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there diff --git a/treecorr/ngcorrelation.py b/treecorr/ngcorrelation.py index 9a3ff187..3f8ea786 100644 --- a/treecorr/ngcorrelation.py +++ b/treecorr/ngcorrelation.py @@ -22,6 +22,7 @@ from .binnedcorr2 import BinnedCorr2 from .util import double_ptr as dp from .util import gen_read, gen_write +from .util import depr_pos_kwargs class NGCorrelation(BinnedCorr2): @@ -94,10 +95,11 @@ class NGCorrelation(BinnedCorr2): **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `NGCorrelation`. See class doc for details. """ - BinnedCorr2.__init__(self, config, logger, **kwargs) + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 1 # NData self._ro._d2 = 3 # GData @@ -187,7 +189,8 @@ def copy(self): def __repr__(self): return 'NGCorrelation(config=%r)'%self.config - def process_cross(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation. This accumulates the weighted sums into the bins, but does not finalize @@ -230,8 +233,8 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, f1._d, f2._d, self._coords, self._bintype, self._metric) - - def process_pairwise(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation, only using the corresponding pairs of objects in each catalog. @@ -365,7 +368,8 @@ def _sum(self, others): self.xi_im = self.raw_xi_im self.varxi = self.raw_varxi - def process(self, cat1, cat2, metric=None, num_threads=None, comm=None, low_mem=False, + @depr_pos_kwargs + def process(self, cat1, cat2, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the correlation function. @@ -408,7 +412,8 @@ def process(self, cat1, cat2, metric=None, num_threads=None, comm=None, low_mem= self.logger.info("varg = %f: sig_sn (per component) = %f",varg,math.sqrt(varg)) self.finalize(varg) - def calculateXi(self, rg=None): + @depr_pos_kwargs + def calculateXi(self, *, rg=None): r"""Calculate the correlation function possibly given another correlation function that uses random points for the foreground objects. @@ -477,7 +482,8 @@ def _calculate_xi_from_pairs(self, pairs): self._rg._calculate_xi_from_pairs(pairs) self.xi -= self._rg.xi - def write(self, file_name, rg=None, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, rg=None, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. - If rg is None, the simple correlation function :math:`\langle \gamma_T\rangle` is used. @@ -519,7 +525,7 @@ def write(self, file_name, rg=None, file_type=None, precision=None): """ self.logger.info('Writing NG correlations to %s',file_name) - xi, xi_im, varxi = self.calculateXi(rg) + xi, xi_im, varxi = self.calculateXi(rg=rg) if precision is None: precision = self.config.get('precision', 4) @@ -533,8 +539,8 @@ def write(self, file_name, rg=None, file_type=None, precision=None): xi, xi_im, np.sqrt(varxi), self.weight, self.npairs ], params=params, precision=precision, file_type=file_type, logger=self.logger) - - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there @@ -576,7 +582,8 @@ def read(self, file_name, file_type=None): self.raw_xi_im = self.xi_im self.raw_varxi = self.varxi - def calculateNMap(self, R=None, rg=None, m2_uform=None): + @depr_pos_kwargs + def calculateNMap(self, *, R=None, rg=None, m2_uform=None): r"""Calculate the aperture mass statistics from the correlation function. .. math:: @@ -649,7 +656,7 @@ def calculateNMap(self, R=None, rg=None, m2_uform=None): Tx[s<2.] += 18./np.pi * ssqa * np.arccos(sa/2.) Tx *= ssq - xi, xi_im, varxi = self.calculateXi(rg) + xi, xi_im, varxi = self.calculateXi(rg=rg) # Now do the integral by taking the matrix products. # Note that dlogr = bin_size @@ -664,8 +671,9 @@ def calculateNMap(self, R=None, rg=None, m2_uform=None): return nmap, nmx, varnmap - - def writeNMap(self, file_name, R=None, rg=None, m2_uform=None, file_type=None, precision=None): + @depr_pos_kwargs + def writeNMap(self, file_name, *, R=None, rg=None, m2_uform=None, file_type=None, + precision=None): r"""Write the cross correlation of the foreground galaxy counts with the aperture mass based on the correlation function to the file, file_name. @@ -712,8 +720,8 @@ def writeNMap(self, file_name, R=None, rg=None, m2_uform=None, file_type=None, p [ R, nmap, nmx, np.sqrt(varnmap) ], precision=precision, file_type=file_type, logger=self.logger) - - def writeNorm(self, file_name, gg, dd, rr, R=None, dr=None, rg=None, + @depr_pos_kwargs + def writeNorm(self, file_name, *, gg, dd, rr, R=None, dr=None, rg=None, m2_uform=None, file_type=None, precision=None): r"""Write the normalized aperture mass cross-correlation to the file, file_name. diff --git a/treecorr/nkcorrelation.py b/treecorr/nkcorrelation.py index 0b41fb39..4e154010 100644 --- a/treecorr/nkcorrelation.py +++ b/treecorr/nkcorrelation.py @@ -22,6 +22,8 @@ from .binnedcorr2 import BinnedCorr2 from .util import double_ptr as dp from .util import gen_read, gen_write +from .util import depr_pos_kwargs + class NKCorrelation(BinnedCorr2): r"""This class handles the calculation and storage of a 2-point count-kappa correlation @@ -98,10 +100,11 @@ class NKCorrelation(BinnedCorr2): **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `NKCorrelation`. See class doc for details. """ - BinnedCorr2.__init__(self, config, logger, **kwargs) + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 1 # NData self._ro._d2 = 2 # KData @@ -186,7 +189,8 @@ def copy(self): def __repr__(self): return 'NKCorrelation(config=%r)'%self.config - def process_cross(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation. This accumulates the weighted sums into the bins, but does not finalize @@ -229,8 +233,8 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): _lib.ProcessCross2(self.corr, f1.data, f2.data, self.output_dots, f1._d, f2._d, self._coords, self._bintype, self._metric) - - def process_pairwise(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation, only using the corresponding pairs of objects in each catalog. @@ -354,7 +358,8 @@ def _sum(self, others): self.xi = self.raw_xi self.varxi = self.raw_varxi - def process(self, cat1, cat2, metric=None, num_threads=None, comm=None, low_mem=False, + @depr_pos_kwargs + def process(self, cat1, cat2, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the correlation function. @@ -397,7 +402,8 @@ def process(self, cat1, cat2, metric=None, num_threads=None, comm=None, low_mem= self.logger.info("vark = %f: sig_k = %f",vark,math.sqrt(vark)) self.finalize(vark) - def calculateXi(self, rk=None): + @depr_pos_kwargs + def calculateXi(self, *, rk=None): r"""Calculate the correlation function possibly given another correlation function that uses random points for the foreground objects. @@ -463,7 +469,7 @@ def _calculate_xi_from_pairs(self, pairs): self._rk._calculate_xi_from_pairs(pairs) self.xi -= self._rk.xi - def write(self, file_name, rk=None, file_type=None, precision=None): + def write(self, file_name, * ,rk=None, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. - If rk is None, the simple correlation function :math:`\langle \kappa \rangle(R)` is @@ -504,7 +510,7 @@ def write(self, file_name, rk=None, file_type=None, precision=None): """ self.logger.info('Writing NK correlations to %s',file_name) - xi, varxi = self.calculateXi(rk) + xi, varxi = self.calculateXi(rk=rk) if precision is None: precision = self.config.get('precision', 4) @@ -518,8 +524,8 @@ def write(self, file_name, rk=None, file_type=None, precision=None): xi, np.sqrt(varxi), self.weight, self.npairs ], params=params, precision=precision, file_type=file_type, logger=self.logger) - - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there diff --git a/treecorr/nncorrelation.py b/treecorr/nncorrelation.py index 57c43c7b..6cef58dc 100644 --- a/treecorr/nncorrelation.py +++ b/treecorr/nncorrelation.py @@ -21,6 +21,8 @@ from .binnedcorr2 import BinnedCorr2 from .util import double_ptr as dp from .util import gen_read, gen_write, lazy_property +from .util import depr_pos_kwargs + class NNCorrelation(BinnedCorr2): r"""This class handles the calculation and storage of a 2-point count-count correlation @@ -74,8 +76,8 @@ class NNCorrelation(BinnedCorr2): >>> rr.process... # Likewise for random-random correlations >>> dr.process... # If desired, also do data-random correlations >>> rd.process... # For cross-correlations, also do the reverse. - >>> nn.write(file_name,rr,dr,rd) # Write out to a file. - >>> xi,varxi = nn.calculateXi(rr,dr,rd) # Or get the correlation function directly. + >>> nn.write(file_name,rr=rr,dr=dr,rd=rd) # Write out to a file. + >>> xi,varxi = nn.calculateXi(rr=rr,dr=dr,rd=rd) # Or get correlation function directly. Parameters: config (dict): A configuration dict that can be used to pass in kwargs if desired. @@ -88,10 +90,11 @@ class NNCorrelation(BinnedCorr2): **kwargs: See the documentation for `BinnedCorr2` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `NNCorrelation`. See class doc for details. """ - BinnedCorr2.__init__(self, config, logger, **kwargs) + BinnedCorr2.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 1 # NData self._ro._d2 = 1 # NData @@ -193,7 +196,8 @@ def _zero_copy(self, tot): def __repr__(self): return 'NNCorrelation(config=%r)'%self.config - def process_auto(self, cat, metric=None, num_threads=None): + @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): """Process a single catalog, accumulating the auto-correlation. This accumulates the auto-correlation for the given catalog. After @@ -228,8 +232,8 @@ def process_auto(self, cat, metric=None, num_threads=None): field._d, self._coords, self._bintype, self._metric) self.tot += 0.5 * cat.sumw**2 - - def process_cross(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation. This accumulates the cross-correlation for the given catalogs. After @@ -272,8 +276,8 @@ def process_cross(self, cat1, cat2, metric=None, num_threads=None): f1._d, f2._d, self._coords, self._bintype, self._metric) self.tot += cat1.sumw*cat2.sumw - - def process_pairwise(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_pairwise(self, cat1, cat2, *, metric=None, num_threads=None): """Process a single pair of catalogs, accumulating the cross-correlation, only using the corresponding pairs of objects in each catalog. @@ -408,7 +412,8 @@ def _add_tot(self, i, j, c1, c2): # to save some time. self.results[(i,j)] = self._zero_copy(tot) - def process(self, cat1, cat2=None, metric=None, num_threads=None, comm=None, low_mem=False, + @depr_pos_kwargs + def process(self, cat1, cat2=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the correlation function. @@ -478,7 +483,8 @@ def getWeight(self): else: return self.tot - def calculateXi(self, rr, dr=None, rd=None): + @depr_pos_kwargs + def calculateXi(self, *, rr, dr=None, rd=None): r"""Calculate the correlation function given another correlation function of random points using the same mask, and possibly cross correlations of the data and random. @@ -695,7 +701,8 @@ def _calculate_xi_from_pairs(self, pairs): self.xi = xi / denom self._rr_weight = denom - def write(self, file_name, rr=None, dr=None, rd=None, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, rr=None, dr=None, rd=None, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. rr is the `NNCorrelation` function for random points. @@ -760,7 +767,7 @@ def write(self, file_name, rr=None, dr=None, rd=None, file_type=None, precision= if rd is not None: raise TypeError("rr must be provided if rd is not None") else: - xi, varxi = self.calculateXi(rr,dr,rd) + xi, varxi = self.calculateXi(rr=rr, dr=dr, rd=rd) col_names += [ 'xi','sigma_xi','DD','RR' ] columns += [ xi, np.sqrt(varxi), @@ -786,8 +793,8 @@ def write(self, file_name, rr=None, dr=None, rd=None, file_type=None, precision= file_name, col_names, columns, params=params, precision=precision, file_type=file_type, logger=self.logger) - - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there @@ -827,7 +834,8 @@ def read(self, file_name, file_type=None): self.xi = data['xi'] self.varxi = data['sigma_xi']**2 - def calculateNapSq(self, rr, R=None, dr=None, rd=None, m2_uform=None): + @depr_pos_kwargs + def calculateNapSq(self, *, rr, R=None, dr=None, rd=None, m2_uform=None): r"""Calculate the corrollary to the aperture mass statistics for counts. .. math:: @@ -901,7 +909,7 @@ def calculateNapSq(self, rr, R=None, dr=None, rd=None, m2_uform=None): 120. + ssqa*(2320. + ssqa*(-754. + ssqa*(132. - 9.*ssqa)))) Tp *= ssq - xi, varxi = self.calculateXi(rr,dr,rd) + xi, varxi = self.calculateXi(rr=rr, dr=dr, rd=rd) # Now do the integral by taking the matrix products. # Note that dlogr = bin_size diff --git a/treecorr/util.py b/treecorr/util.py index 74d3e2af..5f55ba69 100644 --- a/treecorr/util.py +++ b/treecorr/util.py @@ -946,11 +946,8 @@ def func_with_kwargs(a, *, b=3, c=4): params = inspect.signature(fn).parameters nparams = len(params) - # NB. This will trigger a TypeError on initial use if there are no kw-only parameters. - # This is actually a feature, since it's probably a sign that the developer forgot to - # add the *, item to the parameter list. This decorator doesn't make sense without it. - nkwargs = len(fn.__kwdefaults__) - npos = nparams - nkwargs + npos = np.sum([p.kind in [p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD] for p in params.values()]) + assert nparams > npos # Otherwise developer probably forgot to add the * to the signature! @functools.wraps(fn) def wrapper(*args, **kwargs): From b33ed2ec9a9a86454ec7f751b73725c893996395 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 5 Jun 2021 02:36:36 -0400 Subject: [PATCH 7/9] Convert kwargs to kw-only for BinnedCorr3 and all 3pt Correlation classes --- tests/test_nnn.py | 49 +++++++++++++++------------- tests/test_patch3pt.py | 36 ++++++++++----------- treecorr/binnedcorr3.py | 30 ++++++++++------- treecorr/corr3.py | 14 ++++---- treecorr/gggcorrelation.py | 64 ++++++++++++++++++++++-------------- treecorr/kkkcorrelation.py | 56 ++++++++++++++++++++------------ treecorr/nnncorrelation.py | 66 +++++++++++++++++++++++--------------- 7 files changed, 186 insertions(+), 129 deletions(-) diff --git a/tests/test_nnn.py b/tests/test_nnn.py index 83e1bbc7..0ae85178 100644 --- a/tests/test_nnn.py +++ b/tests/test_nnn.py @@ -18,7 +18,7 @@ import coord import fitsio -from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog, timer +from test_helper import get_script_name, do_pickle, assert_raises, CaptureLog, timer, assert_warns from test_helper import is_ccw, is_ccw_3d @timer @@ -633,7 +633,7 @@ def test_direct_count_auto(): min_v=min_v, max_v=max_v, nvbins=nvbins, brute=True, verbose=0, rng=rng) rrr.process(rcat) - zeta, varzeta = ddd.calculateZeta(rrr) + zeta, varzeta = ddd.calculateZeta(rrr=rrr) # Semi-gratuitous check of BinnedCorr3.rng access. assert rrr.rng is rng @@ -695,7 +695,12 @@ def test_direct_count_auto(): brute=True, verbose=0) drr.process(cat, rcat) rdd.process(rcat, cat) - zeta, varzeta = ddd.calculateZeta(rrr,drr,rdd) + zeta, varzeta = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) + + with assert_warns(FutureWarning): + zeta2, varzeta2 = ddd.calculateZeta(rrr, drr, rdd) + np.testing.assert_array_equal(zeta2, zeta) + np.testing.assert_array_equal(varzeta2, varzeta) config['nnn_statistic'] = 'compensated' treecorr.corr3(config, logger) @@ -2134,7 +2139,7 @@ def test_nnn(): #print('d3 = ',d3) true_zeta = (1./(12.*np.pi**2)) * (L/s)**4 * np.exp(-(d1**2+d2**2+d3**2)/(6.*s**2)) - 1. - zeta, varzeta = ddd.calculateZeta(rrr) + zeta, varzeta = ddd.calculateZeta(rrr=rrr) print('zeta = ',zeta) print('true_zeta = ',true_zeta) print('ratio = ',zeta / true_zeta) @@ -2177,7 +2182,7 @@ def test_nnn(): np.testing.assert_almost_equal(header['tot']/ddd.tot, 1.) out_file_name2 = os.path.join('output','nnn_out2.fits') - ddd.write(out_file_name2, rrr) + ddd.write(out_file_name2, rrr=rrr) data = fitsio.read(out_file_name2) np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten()) np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten()) @@ -2248,7 +2253,7 @@ def test_nnn(): print('Skipping hdf5 output file, since h5py not installed.') else: out_file_name3 = os.path.join('output','nnn_out3.hdf5') - ddd.write(out_file_name3, rrr) + ddd.write(out_file_name3, rrr=rrr) with h5py.File(out_file_name3, 'r') as hdf: data = hdf['/'] np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten()) @@ -2296,35 +2301,35 @@ def test_nnn(): # Test compensated zeta # First just check the mechanics. # If we don't actually do all the cross terms, then compensated is the same as simple. - zeta2, varzeta2 = ddd.calculateZeta(rrr,drr=rrr,rdd=rrr) + zeta2, varzeta2 = ddd.calculateZeta(rrr=rrr, drr=rrr, rdd=rrr) print('fake compensated zeta = ',zeta2) np.testing.assert_allclose(zeta2, zeta) # Error to not have one of rrr, drr, rdd. with assert_raises(TypeError): - ddd.calculateZeta(drr=rrr,rdd=rrr) + ddd.calculateZeta(drr=rrr, rdd=rrr) with assert_raises(TypeError): - ddd.calculateZeta(rrr,rdd=rrr) + ddd.calculateZeta(rrr=rrr, rdd=rrr) with assert_raises(TypeError): - ddd.calculateZeta(rrr,drr=rrr) + ddd.calculateZeta(rrr=rrr, drr=rrr) rrr2 = treecorr.NNNCorrelation(min_sep=min_sep, max_sep=max_sep, nbins=nbins, min_u=min_u, max_u=max_u, min_v=min_v, max_v=max_v, nubins=nubins, nvbins=nvbins, sep_units='arcmin') # Error if any of them haven't been run yet. with assert_raises(ValueError): - ddd.calculateZeta(rrr2,drr=rrr,rdd=rrr) + ddd.calculateZeta(rrr=rrr2, drr=rrr, rdd=rrr) with assert_raises(ValueError): - ddd.calculateZeta(rrr,drr=rrr2,rdd=rrr) + ddd.calculateZeta(rrr=rrr, drr=rrr2, rdd=rrr) with assert_raises(ValueError): - ddd.calculateZeta(rrr,drr=rrr,rdd=rrr2) + ddd.calculateZeta(rrr=rrr, drr=rrr, rdd=rrr2) out_file_name3 = os.path.join('output','nnn_out3.fits') with assert_raises(TypeError): - ddd.write(out_file_name3,drr=rrr,rdd=rrr) + ddd.write(out_file_name3, drr=rrr, rdd=rrr) with assert_raises(TypeError): - ddd.write(out_file_name3,rrr=rrr,rdd=rrr) + ddd.write(out_file_name3, rrr=rrr, rdd=rrr) with assert_raises(TypeError): - ddd.write(out_file_name3,rrr=rrr,drr=rrr) + ddd.write(out_file_name3, rrr=rrr, drr=rrr) # It's too slow to test the real calculation in nosetests runs, so we stop here if not main. if __name__ != '__main__': @@ -2339,7 +2344,7 @@ def test_nnn(): drr.process(cat,rand) rdd.process(rand,cat) - zeta, varzeta = ddd.calculateZeta(rrr,drr,rdd) + zeta, varzeta = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) print('compensated zeta = ',zeta) xi1 = (1./(4.*np.pi)) * (L/s)**2 * np.exp(-d1**2/(4.*s**2)) - 1. @@ -2358,7 +2363,7 @@ def test_nnn(): np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)), atol=0.1*tol_factor) out_file_name3 = os.path.join('output','nnn_out3.fits') - ddd.write(out_file_name3, rrr,drr,rdd) + ddd.write(out_file_name3, rrr=rrr, drr=drr, rdd=rdd) data = fitsio.read(out_file_name3) np.testing.assert_almost_equal(data['r_nom'], np.exp(ddd.logr).flatten()) np.testing.assert_almost_equal(data['u_nom'], ddd.u.flatten()) @@ -2523,7 +2528,7 @@ def test_3d(): true_zeta = ((1./(24.*np.sqrt(3)*np.pi**3)) * (L/s)**6 * np.exp(-(d1**2+d2**2+d3**2)/(6.*s**2)) - 1.) - zeta, varzeta = ddd.calculateZeta(rrr) + zeta, varzeta = ddd.calculateZeta(rrr=rrr) print('zeta = ',zeta.flatten()) print('true_zeta = ',true_zeta.flatten()) print('ratio = ',(zeta / true_zeta).flatten()) @@ -2551,7 +2556,7 @@ def test_3d(): rand = treecorr.Catalog(x=rx, y=ry, z=rz) ddd.process(cat) rrr.process(rand) - zeta, varzeta = ddd.calculateZeta(rrr) + zeta, varzeta = ddd.calculateZeta(rrr=rrr) np.testing.assert_allclose(zeta, true_zeta, rtol=0.1*tol_factor) np.testing.assert_allclose(np.log(np.abs(zeta)), np.log(np.abs(true_zeta)), atol=0.1*tol_factor) @@ -2622,8 +2627,8 @@ def test_list(): np.testing.assert_allclose(rrr.ntri, rrrx.ntri, rtol=0.1) np.testing.assert_allclose(rrr.tot, rrrx.tot) - zeta, varzeta = ddd.calculateZeta(rrr) - zetax, varzetax = dddx.calculateZeta(rrrx) + zeta, varzeta = ddd.calculateZeta(rrr=rrr) + zetax, varzetax = dddx.calculateZeta(rrr=rrrx) print('zeta = ',zeta) print('zetax = ',zetax) #print('ratio = ',zeta/zetax) diff --git a/tests/test_patch3pt.py b/tests/test_patch3pt.py index 820c9eb9..1395f90f 100644 --- a/tests/test_patch3pt.py +++ b/tests/test_patch3pt.py @@ -928,8 +928,8 @@ def test_nnn_jk(): ddd.process(cat) rdd.process(rand_cat, cat) drr.process(cat, rand_cat) - zeta_s, _ = ddd.calculateZeta(rrr) - zeta_c, _ = ddd.calculateZeta(rrr, drr, rdd) + zeta_s, _ = ddd.calculateZeta(rrr=rrr) + zeta_c, _ = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) print('simple: ',zeta_s.ravel()) print('compensated: ',zeta_c.ravel()) all_nnns.append(zeta_s.ravel()) @@ -984,8 +984,8 @@ def test_nnn_jk(): ddd.process(cat) rdd.process(rand_cat, cat) drr.process(cat, rand_cat) - zeta_s1, var_zeta_s1 = ddd.calculateZeta(rrr) - zeta_c1, var_zeta_c1 = ddd.calculateZeta(rrr, drr, rdd) + zeta_s1, var_zeta_s1 = ddd.calculateZeta(rrr=rrr) + zeta_c1, var_zeta_c1 = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) print('DDD:',ddd.tot) print(ddd.ntri.ravel()) print('simple: ') @@ -1025,7 +1025,7 @@ def test_nnn_jk(): with assert_raises(RuntimeError): dddp.estimate_cov('jackknife') - zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrr) + zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrr=rrr) print('DDD:',dddp.tot) print(dddp.ntri.ravel()) print('simple: ') @@ -1071,7 +1071,7 @@ def test_nnn_jk(): print('max log(ratio) = ',np.max(np.abs(np.log(np.diagonal(cov))-np.log(var_nnns)))) np.testing.assert_allclose(np.log(np.diagonal(cov)), np.log(var_nnns), atol=2.2*tol_factor) - zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrr, drrp, rddp) + zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrr=rrr, drr=drrp, rdd=rddp) print('compensated: ') print('DRR:',drrp.tot) print(drrp.ntri.ravel()) @@ -1118,7 +1118,7 @@ def test_nnn_jk(): drrp.process(catp, rand_catp) rddp.process(rand_catp, catp) print('simple: ') - zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrrp) + zeta_s2, var_zeta_s2 = dddp.calculateZeta(rrr=rrrp) print('DDD:',dddp.tot) print(dddp.ntri.ravel()) print(zeta_s2.ravel()) @@ -1168,7 +1168,7 @@ def test_nnn_jk(): t0 = time.time() print('compensated: ') - zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrrp, drrp, rddp) + zeta_c2, var_zeta_c2 = dddp.calculateZeta(rrr=rrrp, drr=drrp, rdd=rddp) print('DRR:',drrp.tot) print(drrp.ntri.ravel()) print('RDD:',rddp.tot) @@ -1239,7 +1239,7 @@ def cc_zeta(corrs): d1._sum(d._all) r1 = r.n1n2n3.copy() r1._sum(r._all) - zeta, _ = d1.calculateZeta(r1) + zeta, _ = d1.calculateZeta(rrr=r1) return zeta.ravel() print('simple: ') @@ -1474,12 +1474,12 @@ def test_brute_jk(): drr1.process(cat1, rand_cat1) rdd1.process(rand_cat1, cat1) rrr1.process(rand_cat1) - zeta1_list.append(ddd1.calculateZeta(rrr1)[0].ravel()) - zeta2_list.append(ddd1.calculateZeta(rrr1, drr1, rdd1)[0].ravel()) + zeta1_list.append(ddd1.calculateZeta(rrr=rrr1)[0].ravel()) + zeta2_list.append(ddd1.calculateZeta(rrr=rrr1, drr=drr1, rdd=rdd1)[0].ravel()) print('simple') zeta1_list = np.array(zeta1_list) - zeta2, varzeta2 = ddd.calculateZeta(rrr) + zeta2, varzeta2 = ddd.calculateZeta(rrr=rrr) varzeta1 = np.diagonal(np.cov(zeta1_list.T, bias=True)) * (len(zeta1_list)-1) print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel()) print('NNN: direct jackknife varzeta = ',varzeta1) @@ -1488,7 +1488,7 @@ def test_brute_jk(): print('compensated') print(zeta2_list) zeta2_list = np.array(zeta2_list) - zeta2, varzeta2 = ddd.calculateZeta(rrr, drr=drr, rdd=rdd) + zeta2, varzeta2 = ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) varzeta2 = np.diagonal(np.cov(zeta2_list.T, bias=True)) * (len(zeta2_list)-1) print('NNN: treecorr jackknife varzeta = ',ddd.varzeta.ravel()) print('NNN: direct jackknife varzeta = ',varzeta2) @@ -1504,15 +1504,15 @@ def test_brute_jk(): drr3.process(cat3, rand_cat3) rdd3.process(rand_cat3, cat3) with assert_raises(RuntimeError): - ddd.calculateZeta(rrr3) + ddd.calculateZeta(rrr=rrr3) with assert_raises(RuntimeError): - ddd.calculateZeta(rrr3, drr, rdd) + ddd.calculateZeta(rrr=rrr3, drr=drr, rdd=rdd) with assert_raises(RuntimeError): - ddd.calculateZeta(rrr, drr3, rdd3) + ddd.calculateZeta(rrr=rrr, drr=drr3, rdd=rdd3) with assert_raises(RuntimeError): - ddd.calculateZeta(rrr, drr, rdd3) + ddd.calculateZeta(rrr=rrr, drr=drr, rdd=rdd3) with assert_raises(RuntimeError): - ddd.calculateZeta(rrr, drr3, rdd) + ddd.calculateZeta(rrr=rrr, drr=drr3, rdd=rdd) @timer diff --git a/treecorr/binnedcorr3.py b/treecorr/binnedcorr3.py index 101f6a7b..1801e672 100644 --- a/treecorr/binnedcorr3.py +++ b/treecorr/binnedcorr3.py @@ -23,6 +23,7 @@ from . import _lib from .config import merge_config, setup_logger, get from .util import parse_metric, metric_enum, coord_enum, set_omp_threads, lazy_property +from .util import depr_pos_kwargs from .binnedcorr2 import estimate_multi_cov class Namespace(object): @@ -285,7 +286,8 @@ class BinnedCorr3(object): 'How many threads should be used. num_threads <= 0 means auto based on num cores.'), } - def __init__(self, config=None, logger=None, rng=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, rng=None, **kwargs): self._corr = None # Do this first to make sure we always have it for __del__ self.config = merge_config(config,kwargs,BinnedCorr3._valid_params) if logger is None: @@ -673,7 +675,7 @@ def is_my_job(my_indices, i, j, k, n): return False if len(cat1) == 1 and cat1[0].npatch == 1: - self.process_auto(cat1[0], metric, num_threads) + self.process_auto(cat1[0], metric=metric, num_threads=num_threads) else: # When patch processing, keep track of the pair-wise results. @@ -697,7 +699,7 @@ def is_my_job(my_indices, i, j, k, n): if is_my_job(my_indices, i, i, i, n): temp.clear() self.logger.info('Process patch %d auto',i) - temp.process_auto(c1,metric,num_threads) + temp.process_auto(c1, metric=metric, num_threads=num_threads) if (i,i,i) in self.results and self.results[(i,i,i)].nonzero: self.results[(i,i,i)] += temp else: @@ -712,7 +714,7 @@ def is_my_job(my_indices, i, j, k, n): # One point in c1, 2 in c2. if not self._trivially_zero(c1,c2,c2,metric): self.logger.info('Process patches %d,%d cross12',i,j) - temp.process_cross12(c1,c2, metric, num_threads) + temp.process_cross12(c1, c2, metric=metric, num_threads=num_threads) else: self.logger.info('Skipping %d,%d pair, which are too far apart ' + 'for this set of separations',i,j) @@ -730,7 +732,7 @@ def is_my_job(my_indices, i, j, k, n): # One point in c2, 2 in c1. if not self._trivially_zero(c1,c1,c2,metric): self.logger.info('Process patches %d,%d cross12',j,i) - temp.process_cross12(c2,c1, metric, num_threads) + temp.process_cross12(c2, c1, metric=metric, num_threads=num_threads) if temp.nonzero: if (i,i,j) in self.results and self.results[(i,i,j)].nonzero: self.results[(i,i,j)] += temp @@ -749,7 +751,8 @@ def is_my_job(my_indices, i, j, k, n): if not self._trivially_zero(c1,c2,c3,metric): self.logger.info('Process patches %d,%d,%d cross',i,j,k) - temp.process_cross(c1,c2,c3, metric, num_threads) + temp.process_cross(c1, c2, c3, metric=metric, + num_threads=num_threads) else: self.logger.info('Skipping %d,%d,%d, which are too far apart ' + 'for this set of separations',i,j,k) @@ -819,7 +822,7 @@ def is_my_job(my_indices, i, j, k, n1, n2): return ret if len(cat1) == 1 and len(cat2) == 1 and cat1[0].npatch == 1 and cat2[0].npatch == 1: - self.process_cross12(cat1[0], cat2[0], metric, num_threads) + self.process_cross12(cat1[0], cat2[0], metric=metric, num_threads=num_threads) else: # When patch processing, keep track of the pair-wise results. if self.npatch1 == 1: @@ -852,7 +855,7 @@ def is_my_job(my_indices, i, j, k, n1, n2): # One point in c1, 2 in c2. if not self._trivially_zero(c1,c2,c2,metric): self.logger.info('Process patches %d,%d cross12',i,j) - temp.process_cross12(c1,c2, metric, num_threads) + temp.process_cross12(c1, c2, metric=metric, num_threads=num_threads) else: self.logger.info('Skipping %d,%d pair, which are too far apart ' + 'for this set of separations',i,j) @@ -874,7 +877,8 @@ def is_my_job(my_indices, i, j, k, n1, n2): if not self._trivially_zero(c1,c2,c3,metric): self.logger.info('Process patches %d,%d,%d cross',i,j,k) - temp.process_cross(c1,c2,c3, metric, num_threads) + temp.process_cross(c1, c2, c3, metric=metric, + num_threads=num_threads) else: self.logger.info('Skipping %d,%d,%d, which are too far apart ' + 'for this set of separations',i,j,k) @@ -934,7 +938,7 @@ def is_my_job(my_indices, i, j, k, n1, n2, n3): if (len(cat1) == 1 and len(cat2) == 1 and len(cat3) == 1 and cat1[0].npatch == 1 and cat2[0].npatch == 1 and cat3[0].npatch == 1): - self.process_cross(cat1[0],cat2[0],cat3[0], metric, num_threads) + self.process_cross(cat1[0], cat2[0], cat3[0], metric=metric, num_threads=num_threads) else: # When patch processing, keep track of the pair-wise results. if self.npatch1 == 1: @@ -972,7 +976,8 @@ def is_my_job(my_indices, i, j, k, n1, n2, n3): temp.clear() if not self._trivially_zero(c1,c2,c3,metric): self.logger.info('Process patches %d,%d,%d cross',i,j,k) - temp.process_cross(c1,c2,c3, metric, num_threads) + temp.process_cross(c1, c2, c3, metric=metric, + num_threads=num_threads) else: self.logger.info('Skipping %d,%d,%d, which are too far apart ' + 'for this set of separations',i,j,k) @@ -1027,7 +1032,8 @@ def getWeight(self): """ return self.weight.ravel() - def estimate_cov(self, method, func=None): + @depr_pos_kwargs + def estimate_cov(self, method, *, func=None): """Estimate the covariance matrix based on the data This function will calculate an estimate of the covariance matrix according to the diff --git a/treecorr/corr3.py b/treecorr/corr3.py index 1202d11a..f6343ecd 100644 --- a/treecorr/corr3.py +++ b/treecorr/corr3.py @@ -117,7 +117,7 @@ def corr3(config, logger=None): # Do GGG correlation function if necessary if 'ggg_file_name' in config or 'm3_file_name' in config: logger.warning("Performing GGG calculations...") - ggg = GGGCorrelation(config,logger) + ggg = GGGCorrelation(config, logger=logger) ggg.process(cat1) logger.info("Done GGG calculations.") if 'ggg_file_name' in config: @@ -130,7 +130,7 @@ def corr3(config, logger=None): # Do NNN correlation function if necessary if 'nnn_file_name' in config: logger.warning("Performing DDD calculations...") - ddd = NNNCorrelation(config,logger) + ddd = NNNCorrelation(config, logger=logger) ddd.process(cat1) logger.info("Done DDD calculations.") @@ -141,26 +141,26 @@ def corr3(config, logger=None): rrr = None else: logger.warning("Performing RRR calculations...") - rrr = NNNCorrelation(config,logger) + rrr = NNNCorrelation(config, logger=logger) rrr.process(rand1) logger.info("Done RRR calculations.") if rrr is not None and config['nnn_statistic'] == 'compensated': logger.warning("Performing DRR calculations...") - drr = NNNCorrelation(config,logger) + drr = NNNCorrelation(config, logger=logger) drr.process(cat1,rand1) logger.info("Done DRR calculations.") logger.warning("Performing DDR calculations...") - rdd = NNNCorrelation(config,logger) + rdd = NNNCorrelation(config, logger=logger) rdd.process(rand1,cat1) logger.info("Done DDR calculations.") - ddd.write(config['nnn_file_name'],rrr,drr,rdd) + ddd.write(config['nnn_file_name'], rrr=rrr, drr=drr, rdd=rdd) logger.warning("Wrote NNN correlation to %s",config['nnn_file_name']) # Do KKK correlation function if necessary if 'kkk_file_name' in config: logger.warning("Performing KKK calculations...") - kkk = KKKCorrelation(config,logger) + kkk = KKKCorrelation(config, logger=logger) kkk.process(cat1) logger.info("Done KKK calculations.") kkk.write(config['kkk_file_name']) diff --git a/treecorr/gggcorrelation.py b/treecorr/gggcorrelation.py index a656c9ad..64bd5e47 100644 --- a/treecorr/gggcorrelation.py +++ b/treecorr/gggcorrelation.py @@ -22,6 +22,7 @@ from .binnedcorr3 import BinnedCorr3 from .util import double_ptr as dp from .util import gen_read, gen_write, gen_multi_read, gen_multi_write +from .util import depr_pos_kwargs class GGGCorrelation(BinnedCorr3): @@ -141,10 +142,11 @@ class GGGCorrelation(BinnedCorr3): **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `GGGCorrelation`. See class doc for details. """ - BinnedCorr3.__init__(self, config, logger, **kwargs) + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 3 # GData self._ro._d2 = 3 # GData @@ -276,7 +278,8 @@ def copy(self): def __repr__(self): return 'GGGCorrelation(config=%r)'%self.config - def process_auto(self, cat, metric=None, num_threads=None): + @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): """Process a single catalog, accumulating the auto-correlation. This accumulates the auto-correlation for the given catalog. After @@ -310,7 +313,8 @@ def process_auto(self, cat, metric=None, num_threads=None): _lib.ProcessAuto3(self.corr, field.data, self.output_dots, field._d, self._coords, self._bintype, self._metric) - def process_cross12(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): """Process two catalogs, accumulating the 3pt cross-correlation, where one of the points in each triangle come from the first catalog, and two come from the second. @@ -360,7 +364,8 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): f1._d, f2._d, self._coords, self._bintype, self._metric) - def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): """Process a set of three catalogs, accumulating the 3pt cross-correlation. This accumulates the cross-correlation for the given catalogs as part of a larger @@ -568,7 +573,8 @@ def _sum(self, others): np.sum([c.weight for c in others], axis=0, out=self.weight) np.sum([c.ntri for c in others], axis=0, out=self.ntri) - def process(self, cat1, cat2=None, cat3=None, metric=None, num_threads=None, + @depr_pos_kwargs + def process(self, cat1, cat2=None, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the 3pt correlation function. @@ -672,7 +678,8 @@ def getWeight(self): """ return np.concatenate([self.weight.ravel()] * 4) - def write(self, file_name, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. As described in the doc string for `GGGCorrelation`, we use the "natural components" of @@ -755,7 +762,8 @@ def write(self, file_name, file_type=None, precision=None): file_name, col_names, columns, params=params, precision=precision, file_type=file_type, logger=self.logger) - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there @@ -915,7 +923,8 @@ def _calculateT(cls, s, t, k1, k2, k3): return T0, T1, T2, T3 - def calculateMap3(self, R=None, k2=1, k3=1): + @depr_pos_kwargs + def calculateMap3(self, *, R=None, k2=1, k3=1): r"""Calculate the skewness of the aperture mass from the correlation function. The equations for this come from Jarvis, Bernstein & Jain (2004, MNRAS, 352). @@ -1116,7 +1125,8 @@ def calculateMap3(self, R=None, k2=1, k3=1): return map3, mapmapmx, mapmxmap, mxmapmap, mxmxmap, mxmapmx, mapmxmx, mx3, var - def writeMap3(self, file_name, R=None, file_type=None, precision=None): + @depr_pos_kwargs + def writeMap3(self, file_name, *, R=None, file_type=None, precision=None): r"""Write the aperture mass skewness based on the correlation function to the file, file_name. @@ -1147,7 +1157,7 @@ def writeMap3(self, file_name, R=None, file_type=None, precision=None): if R is None: R = self.rnom1d - stats = self.calculateMap3(R) + stats = self.calculateMap3(R=R) if precision is None: precision = self.config.get('precision', 4) @@ -1233,21 +1243,22 @@ class GGGCrossCorrelation(BinnedCorr3): **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `GGGCrossCorrelation`. See class doc for details. """ - BinnedCorr3.__init__(self, config, logger, **kwargs) + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 3 # GData self._ro._d2 = 3 # GData self._ro._d3 = 3 # GData - self.g1g2g3 = GGGCorrelation(config, logger, **kwargs) - self.g1g3g2 = GGGCorrelation(config, logger, **kwargs) - self.g2g1g3 = GGGCorrelation(config, logger, **kwargs) - self.g2g3g1 = GGGCorrelation(config, logger, **kwargs) - self.g3g1g2 = GGGCorrelation(config, logger, **kwargs) - self.g3g2g1 = GGGCorrelation(config, logger, **kwargs) + self.g1g2g3 = GGGCorrelation(config, logger=logger, **kwargs) + self.g1g3g2 = GGGCorrelation(config, logger=logger, **kwargs) + self.g2g1g3 = GGGCorrelation(config, logger=logger, **kwargs) + self.g2g3g1 = GGGCorrelation(config, logger=logger, **kwargs) + self.g3g1g2 = GGGCorrelation(config, logger=logger, **kwargs) + self.g3g2g1 = GGGCorrelation(config, logger=logger, **kwargs) self._all = [self.g1g2g3, self.g1g3g2, self.g2g1g3, self.g2g3g1, self.g3g1g2, self.g3g2g1] self.logger.debug('Finished building GGGCrossCorr') @@ -1296,7 +1307,8 @@ def copy(self): def __repr__(self): return 'GGGCrossCorrelation(config=%r)'%self.config - def process_cross12(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): """Process two catalogs, accumulating the 3pt cross-correlation, where one of the points in each triangle come from the first catalog, and two come from the second. @@ -1354,7 +1366,8 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): f1._d, f2._d, self._coords, self._bintype, self._metric) - def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): """Process a set of three catalogs, accumulating the 3pt cross-correlation. This accumulates the cross-correlation for the given catalogs. After @@ -1470,7 +1483,8 @@ def _sum(self, others): for i, ggg in enumerate(self._all): ggg._sum([c._all[i] for c in others]) - def process(self, cat1, cat2, cat3=None, metric=None, num_threads=None, + @depr_pos_kwargs + def process(self, cat1, cat2, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Accumulate the cross-correlation of the points in the given Catalogs: cat1, cat2, cat3. @@ -1561,7 +1575,8 @@ def getWeight(self): """ return np.concatenate([ggg.getWeight() for ggg in self._all]) - def write(self, file_name, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None): r"""Write the cross-correlation functions to the file, file_name. Parameters: @@ -1597,7 +1612,8 @@ def write(self, file_name, file_type=None, precision=None): file_name, col_names, group_names, columns, params=params, precision=precision, file_type=file_type, logger=self.logger) - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there diff --git a/treecorr/kkkcorrelation.py b/treecorr/kkkcorrelation.py index ba2e312e..838a4e1b 100644 --- a/treecorr/kkkcorrelation.py +++ b/treecorr/kkkcorrelation.py @@ -22,6 +22,7 @@ from .binnedcorr3 import BinnedCorr3 from .util import double_ptr as dp from .util import gen_read, gen_write, gen_multi_read, gen_multi_write +from .util import depr_pos_kwargs class KKKCorrelation(BinnedCorr3): @@ -108,10 +109,11 @@ class KKKCorrelation(BinnedCorr3): **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `KKKCorrelation`. See class doc for details. """ - BinnedCorr3.__init__(self, config, logger, **kwargs) + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 2 # KData self._ro._d2 = 2 # KData @@ -207,7 +209,8 @@ def copy(self): def __repr__(self): return 'KKKCorrelation(config=%r)'%self.config - def process_auto(self, cat, metric=None, num_threads=None): + @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): """Process a single catalog, accumulating the auto-correlation. This accumulates the auto-correlation for the given catalog. After @@ -241,7 +244,8 @@ def process_auto(self, cat, metric=None, num_threads=None): _lib.ProcessAuto3(self.corr, field.data, self.output_dots, field._d, self._coords, self._bintype, self._metric) - def process_cross12(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): """Process two catalogs, accumulating the 3pt cross-correlation, where one of the points in each triangle come from the first catalog, and two come from the second. @@ -291,7 +295,8 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): f1._d, f2._d, self._coords, self._bintype, self._metric) - def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): """Process a set of three catalogs, accumulating the 3pt cross-correlation. This accumulates the cross-correlation for the given catalogs as part of a larger @@ -458,7 +463,8 @@ def _sum(self, others): np.sum([c.weight for c in others], axis=0, out=self.weight) np.sum([c.ntri for c in others], axis=0, out=self.ntri) - def process(self, cat1, cat2=None, cat3=None, metric=None, num_threads=None, + @depr_pos_kwargs + def process(self, cat1, cat2=None, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Compute the 3pt correlation function. @@ -541,7 +547,8 @@ def process(self, cat1, cat2=None, cat3=None, metric=None, num_threads=None, self.logger.info("vark3 = %f: sig_k = %f",vark3,math.sqrt(vark3)) self.finalize(vark1,vark2,vark3) - def write(self, file_name, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. The output file will include the following columns: @@ -606,7 +613,8 @@ def write(self, file_name, file_type=None, precision=None): file_name, col_names, columns, params=params, precision=precision, file_type=file_type, logger=self.logger) - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there @@ -727,21 +735,22 @@ class KKKCrossCorrelation(BinnedCorr3): **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `KKKCrossCorrelation`. See class doc for details. """ - BinnedCorr3.__init__(self, config, logger, **kwargs) + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 2 # KData self._ro._d2 = 2 # KData self._ro._d3 = 2 # KData - self.k1k2k3 = KKKCorrelation(config, logger, **kwargs) - self.k1k3k2 = KKKCorrelation(config, logger, **kwargs) - self.k2k1k3 = KKKCorrelation(config, logger, **kwargs) - self.k2k3k1 = KKKCorrelation(config, logger, **kwargs) - self.k3k1k2 = KKKCorrelation(config, logger, **kwargs) - self.k3k2k1 = KKKCorrelation(config, logger, **kwargs) + self.k1k2k3 = KKKCorrelation(config, logger=logger, **kwargs) + self.k1k3k2 = KKKCorrelation(config, logger=logger, **kwargs) + self.k2k1k3 = KKKCorrelation(config, logger=logger, **kwargs) + self.k2k3k1 = KKKCorrelation(config, logger=logger, **kwargs) + self.k3k1k2 = KKKCorrelation(config, logger=logger, **kwargs) + self.k3k2k1 = KKKCorrelation(config, logger=logger, **kwargs) self._all = [self.k1k2k3, self.k1k3k2, self.k2k1k3, self.k2k3k1, self.k3k1k2, self.k3k2k1] self.logger.debug('Finished building KKKCrossCorr') @@ -790,7 +799,8 @@ def copy(self): def __repr__(self): return 'KKKCrossCorrelation(config=%r)'%self.config - def process_cross12(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): """Process two catalogs, accumulating the 3pt cross-correlation, where one of the points in each triangle come from the first catalog, and two come from the second. @@ -848,7 +858,8 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): f1._d, f2._d, self._coords, self._bintype, self._metric) - def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): """Process a set of three catalogs, accumulating the 3pt cross-correlation. This accumulates the cross-correlation for the given catalogs. After @@ -964,7 +975,8 @@ def _sum(self, others): for i, kkk in enumerate(self._all): kkk._sum([c._all[i] for c in others]) - def process(self, cat1, cat2, cat3=None, metric=None, num_threads=None, + @depr_pos_kwargs + def process(self, cat1, cat2, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Accumulate the cross-correlation of the points in the given Catalogs: cat1, cat2, cat3. @@ -1055,7 +1067,8 @@ def getWeight(self): """ return np.concatenate([kkk.getWeight() for kkk in self._all]) - def write(self, file_name, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None): r"""Write the cross-correlation functions to the file, file_name. Parameters: @@ -1087,7 +1100,8 @@ def write(self, file_name, file_type=None, precision=None): file_name, col_names, group_names, columns, params=params, precision=precision, file_type=file_type, logger=self.logger) - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there diff --git a/treecorr/nnncorrelation.py b/treecorr/nnncorrelation.py index bb64b22a..d4c72eed 100644 --- a/treecorr/nnncorrelation.py +++ b/treecorr/nnncorrelation.py @@ -21,6 +21,8 @@ from .binnedcorr3 import BinnedCorr3 from .util import double_ptr as dp from .util import gen_read, gen_write, gen_multi_read, gen_multi_write, lazy_property +from .util import depr_pos_kwargs + class NNNCorrelation(BinnedCorr3): """This class handles the calculation and storage of a 2-point count-count correlation @@ -86,8 +88,8 @@ class NNNCorrelation(BinnedCorr3): >>> rrr.process(rand) # Likewise for random-random correlations >>> drr.process(cat,rand) # If desired, also do data-random correlations >>> rdd.process(rand,cat) # Also with two data and one random - >>> nnn.write(file_name,rrr,drr,...) # Write out to a file. - >>> zeta,varzeta = nnn.calculateZeta(rrr,drr,rdd) # Or get the 3pt function directly. + >>> nnn.write(file_name,rrr=rrr,drr=drr,...) # Write out to a file. + >>> zeta,varzeta = nnn.calculateZeta(rrr=rrr,drr=drr,rdd=rdd) # Or get zeta directly. Parameters: config (dict): A configuration dict that can be used to pass in kwargs if desired. @@ -100,10 +102,11 @@ class NNNCorrelation(BinnedCorr3): **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `NNNCorrelation`. See class doc for details. """ - BinnedCorr3.__init__(self, config, logger, **kwargs) + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 1 # NData self._ro._d2 = 1 # NData @@ -230,7 +233,8 @@ def _zero_copy(self, tot): def __repr__(self): return 'NNNCorrelation(config=%r)'%self.config - def process_auto(self, cat, metric=None, num_threads=None): + @depr_pos_kwargs + def process_auto(self, cat, *, metric=None, num_threads=None): """Process a single catalog, accumulating the auto-correlation. This accumulates the auto-correlation for the given catalog. After @@ -265,7 +269,8 @@ def process_auto(self, cat, metric=None, num_threads=None): field._d, self._coords, self._bintype, self._metric) self.tot += (1./6.) * cat.sumw**3 - def process_cross12(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): """Process two catalogs, accumulating the 3pt cross-correlation, where one of the points in each triangle come from the first catalog, and two come from the second. @@ -316,7 +321,8 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): self._bintype, self._metric) self.tot += cat1.sumw * cat2.sumw**2 / 2. - def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): """Process a set of three catalogs, accumulating the 3pt cross-correlation. This accumulates the cross-correlation for the given catalogs as part of a larger @@ -503,7 +509,8 @@ def __iadd__(self, other): self.ntri[:] += other.ntri[:] return self - def process(self, cat1, cat2=None, cat3=None, metric=None, num_threads=None, + @depr_pos_kwargs + def process(self, cat1, cat2=None, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Accumulate the 3pt correlation of the points in the given Catalog(s). @@ -588,7 +595,8 @@ def getWeight(self): else: return self.tot - def calculateZeta(self, rrr, drr=None, rdd=None): + @depr_pos_kwargs + def calculateZeta(self, *, rrr, drr=None, rdd=None): r"""Calculate the 3pt function given another 3pt function of random points using the same mask, and possibly cross correlations of the data and random. @@ -802,7 +810,8 @@ def _calculate_xi_from_pairs(self, pairs): self.zeta = zeta / denom self._rrr_weight = denom - def write(self, file_name, rrr=None, drr=None, rdd=None, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, rrr=None, drr=None, rdd=None, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. Normally, at least rrr should be provided, but if this is None, then only the @@ -894,7 +903,7 @@ def write(self, file_name, rrr=None, drr=None, rdd=None, file_type=None, precisi columns += [ self.weight, self.ntri ] else: # This will check for other invalid combinations of rrr, drr, etc. - zeta, varzeta = self.calculateZeta(rrr,drr,rdd) + zeta, varzeta = self.calculateZeta(rrr=rrr, drr=drr, rdd=rdd) col_names += [ 'zeta','sigma_zeta','DDD','RRR' ] columns += [ zeta, np.sqrt(varzeta), @@ -916,7 +925,8 @@ def write(self, file_name, rrr=None, drr=None, rdd=None, file_type=None, precisi file_name, col_names, columns, params=params, precision=precision, file_type=file_type, logger=self.logger) - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there @@ -1036,21 +1046,22 @@ class NNNCrossCorrelation(BinnedCorr3): **kwargs: See the documentation for `BinnedCorr3` for the list of allowed keyword arguments, which may be passed either directly or in the config dict. """ - def __init__(self, config=None, logger=None, **kwargs): + @depr_pos_kwargs + def __init__(self, config=None, *, logger=None, **kwargs): """Initialize `NNNCrossCorrelation`. See class doc for details. """ - BinnedCorr3.__init__(self, config, logger, **kwargs) + BinnedCorr3.__init__(self, config, logger=logger, **kwargs) self._ro._d1 = 1 # NData self._ro._d2 = 1 # NData self._ro._d3 = 1 # NData - self.n1n2n3 = NNNCorrelation(config, logger, **kwargs) - self.n1n3n2 = NNNCorrelation(config, logger, **kwargs) - self.n2n1n3 = NNNCorrelation(config, logger, **kwargs) - self.n2n3n1 = NNNCorrelation(config, logger, **kwargs) - self.n3n1n2 = NNNCorrelation(config, logger, **kwargs) - self.n3n2n1 = NNNCorrelation(config, logger, **kwargs) + self.n1n2n3 = NNNCorrelation(config, logger=logger, **kwargs) + self.n1n3n2 = NNNCorrelation(config, logger=logger, **kwargs) + self.n2n1n3 = NNNCorrelation(config, logger=logger, **kwargs) + self.n2n3n1 = NNNCorrelation(config, logger=logger, **kwargs) + self.n3n1n2 = NNNCorrelation(config, logger=logger, **kwargs) + self.n3n2n1 = NNNCorrelation(config, logger=logger, **kwargs) self._all = [self.n1n2n3, self.n1n3n2, self.n2n1n3, self.n2n3n1, self.n3n1n2, self.n3n2n1] self.tot = 0. @@ -1115,7 +1126,8 @@ def _zero_copy(self, tot): def __repr__(self): return 'NNNCrossCorrelation(config=%r)'%self.config - def process_cross12(self, cat1, cat2, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross12(self, cat1, cat2, *, metric=None, num_threads=None): """Process two catalogs, accumulating the 3pt cross-correlation, where one of the points in each triangle come from the first catalog, and two come from the second. @@ -1178,7 +1190,8 @@ def process_cross12(self, cat1, cat2, metric=None, num_threads=None): self.n2n3n1.tot += tot self.tot += tot - def process_cross(self, cat1, cat2, cat3, metric=None, num_threads=None): + @depr_pos_kwargs + def process_cross(self, cat1, cat2, cat3, *, metric=None, num_threads=None): """Process a set of three catalogs, accumulating the 3pt cross-correlation. This accumulates the cross-correlation for the given catalogs. After @@ -1319,7 +1332,8 @@ def getWeight(self): """ return 1. - def process(self, cat1, cat2, cat3=None, metric=None, num_threads=None, + @depr_pos_kwargs + def process(self, cat1, cat2, cat3=None, *, metric=None, num_threads=None, comm=None, low_mem=False, initialize=True, finalize=True): """Accumulate the cross-correlation of the points in the given Catalogs: cat1, cat2, cat3. @@ -1384,7 +1398,8 @@ def process(self, cat1, cat2, cat3=None, metric=None, num_threads=None, self.finalize() - def write(self, file_name, file_type=None, precision=None): + @depr_pos_kwargs + def write(self, file_name, *, file_type=None, precision=None): r"""Write the correlation function to the file, file_name. Parameters: @@ -1415,7 +1430,8 @@ def write(self, file_name, file_type=None, precision=None): file_name, col_names, group_names, columns, params=params, precision=precision, file_type=file_type, logger=self.logger) - def read(self, file_name, file_type=None): + @depr_pos_kwargs + def read(self, file_name, *, file_type=None): """Read in values from a file. This should be a file that was written by TreeCorr, preferably a FITS file, so there From 067a0593b0287a346d44b976b50fc5de4a732dc9 Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 5 Jun 2021 03:08:22 -0400 Subject: [PATCH 8/9] Add to CHANGELOG --- CHANGELOG.rst | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index a2fac2f3..291625ba 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -6,10 +6,16 @@ See the listing below for the complete list of new features and changes. `_ whose issue numbers are listed below for the relevant items. +Starting with this version, TreeCorr no longer supports Python 2.7. +We currently support python versions 3.6, 3.7, 3,8, 3.9. + + API Changes ----------- -- No longer supports Python 2.7. Supports Python versions 3.6, 3.7, 3,8, 3.9. +- Many function parameters are now keyword-only. The old syntax allowing these parameters + to be positional still works, but is deprecated. (#129) + Performance improvements ------------------------ From e2da4f7dc94da10e93fd93167de558068248191b Mon Sep 17 00:00:00 2001 From: Mike Jarvis Date: Sat, 5 Jun 2021 03:52:12 -0400 Subject: [PATCH 9/9] init arg of kmeans_initialize_centers is clear enough without keyword. Allow it as positional. --- tests/test_kmeans.py | 16 ++++++++-------- treecorr/field.py | 2 +- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/tests/test_kmeans.py b/tests/test_kmeans.py index f15fa4e0..34b3aa27 100644 --- a/tests/test_kmeans.py +++ b/tests/test_kmeans.py @@ -472,7 +472,7 @@ def test_init_random(): print('3d with init=random') npatch = 10 field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, init='random') + cen1 = field.kmeans_initialize_centers(npatch, 'random') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -498,7 +498,7 @@ def test_init_random(): # Use a field with lots of top level cells print('3d with init=random, min_top=10') field = cat.getNField(min_top=10) - cen1 = field.kmeans_initialize_centers(npatch, init='random') + cen1 = field.kmeans_initialize_centers(npatch, 'random') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -525,7 +525,7 @@ def test_init_random(): cat = treecorr.Catalog(x=x, y=y) xy = np.array([x, y]).T field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, init='random') + cen1 = field.kmeans_initialize_centers(npatch, 'random') assert cen1.shape == (npatch, 2) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -553,7 +553,7 @@ def test_init_random(): cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad') xyz = np.array([cat.x, cat.y, cat.z]).T field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, init='random') + cen1 = field.kmeans_initialize_centers(npatch, 'random') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -621,7 +621,7 @@ def test_init_kmpp(): print('3d with init=kmeans++') npatch = 10 field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, init='kmeans++') + cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -647,7 +647,7 @@ def test_init_kmpp(): # Use a field with lots of top level cells print('3d with init=kmeans++, min_top=10') field = cat.getNField(min_top=10) - cen1 = field.kmeans_initialize_centers(npatch, init='kmeans++') + cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -674,7 +674,7 @@ def test_init_kmpp(): cat = treecorr.Catalog(x=x, y=y) xy = np.array([x, y]).T field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, init='kmeans++') + cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++') assert cen1.shape == (npatch, 2) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) @@ -702,7 +702,7 @@ def test_init_kmpp(): cat = treecorr.Catalog(ra=ra, dec=dec, ra_units='rad', dec_units='rad') xyz = np.array([cat.x, cat.y, cat.z]).T field = cat.getNField() - cen1 = field.kmeans_initialize_centers(npatch, init='kmeans++') + cen1 = field.kmeans_initialize_centers(npatch, 'kmeans++') assert cen1.shape == (npatch, 3) p1 = field.kmeans_assign_patches(cen1) print('patches = ',np.unique(p1)) diff --git a/treecorr/field.py b/treecorr/field.py index cdf05ec1..ffe56d20 100644 --- a/treecorr/field.py +++ b/treecorr/field.py @@ -382,7 +382,7 @@ def run_kmeans(self, npatch, *, max_iter=200, tol=1.e-5, init='tree', alt=False, return patches, centers @depr_pos_kwargs - def kmeans_initialize_centers(self, npatch, *, init='tree', rng=None): + def kmeans_initialize_centers(self, npatch, init='tree', *, rng=None): """Use the field's tree structure to assign good initial centers for a K-Means run. The classic K-Means algorithm involves starting with random points as the initial