Skip to content

Commit

Permalink
use positional-only parameters
Browse files Browse the repository at this point in the history
  • Loading branch information
xflr6 committed Jul 16, 2023
1 parent 8afb62c commit 2d51825
Show file tree
Hide file tree
Showing 30 changed files with 243 additions and 241 deletions.
2 changes: 1 addition & 1 deletion CHANGES.rst
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@ Version 2.6.2 (in development)

Relax `models.IsoRetirement.remedy` constraint: nullable except for `reason='split'`.

Drop Python 3.7 support.
Drop Python 3.7 support. Use PEP 570 positional-only parameters.


Version 2.6.1
Expand Down
14 changes: 7 additions & 7 deletions tests/helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,13 +3,13 @@
MB = 2**20


def pairwise(iterable):
def pairwise(iterable, /):
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)


def get_assert_head(items, *, n):
def get_assert_head(items, /, *, n):
head = list(itertools.islice(items, n))

assert head
Expand All @@ -18,33 +18,33 @@ def get_assert_head(items, *, n):
return head


def assert_nonempty_string(obj):
def assert_nonempty_string(obj, /):
assert obj is not None
assert isinstance(obj, str)


def assert_nonempty_string_tuple(obj):
def assert_nonempty_string_tuple(obj, /):
assert obj is not None
assert isinstance(obj, tuple)
assert all(isinstance(o, str) for o in obj)
assert obj
assert all(obj)


def assert_nonempty_dict(obj):
def assert_nonempty_dict(obj, /):
assert obj is not None
assert isinstance(obj, dict)
assert obj


def assert_file_size_between(path, min, max, *, unit=MB):
def assert_file_size_between(path, /, min, max, *, unit=MB):
assert path is not None
assert path.exists()
assert path.is_file()
assert min * unit <= path.stat().st_size <= max * unit


def assert_valid_languoids(items, *, n):
def assert_valid_languoids(items, /, *, n):
for path, languoid in get_assert_head(items, n=n):
assert_nonempty_string_tuple(path)
assert_nonempty_dict(languoid)
Expand Down
8 changes: 4 additions & 4 deletions treedb/_globals.py
Original file line number Diff line number Diff line change
Expand Up @@ -60,8 +60,8 @@
RecordType = typing.Mapping[str, typing.Mapping[str, RecordValueType]]


def filepath_tuple(file_path: str,
*, sep=FILE_PATH_SEP) -> typing.Tuple[str]:
def filepath_tuple(file_path: str, /, *,
sep=FILE_PATH_SEP) -> typing.Tuple[str]:
path_parts = file_path.split(sep)
return tuple(path_parts)

Expand All @@ -78,7 +78,7 @@ class RecordItem(typing.NamedTuple):
record: RecordType

@classmethod
def from_filepath_record(cls, file_path: str, languoid):
def from_filepath_record(cls, file_path: str, languoid, /):
return cls(filepath_tuple(file_path), languoid)


Expand Down Expand Up @@ -121,5 +121,5 @@ class LanguoidItem(typing.NamedTuple):
languoid: LanguoidType

@classmethod
def from_filepath_languoid(cls, file_path: str, languoid):
def from_filepath_languoid(cls, file_path: str, languoid, /):
return cls(filepath_tuple(file_path), languoid)
12 changes: 6 additions & 6 deletions treedb/_proxies.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ class PathProxy(Proxy):
.
"""

def __init__(self, path=None):
def __init__(self, path=None, /):
self.path = path

def __fspath__(self):
Expand Down Expand Up @@ -78,15 +78,15 @@ class EngineProxy(Proxy, sa.engine.Engine):
>>> EngineProxy(future=True)
<treedb._proxies.EngineProxy>
"""
def __init__(self, engine=None, *, future):
def __init__(self, engine=None, /, *, future):
self.engine = engine
self.future = future

def _create_engine(self, url):
def _create_engine(self, url, /):
log.debug('sqlalchemy.create_engine(%r)', url)
self.engine = sa.create_engine(url, future=self.future)

def connect(self, close_with_result=False, **kwargs):
def connect(self, *, close_with_result: bool = False, **kwargs):
return self.engine.connect(**kwargs)

def dispose(self):
Expand Down Expand Up @@ -157,7 +157,7 @@ def __repr__(self):
f' filename={name!r}{parent}'
f' size={self.file_size()!r}>')

def file_with_suffix(self, suffix):
def file_with_suffix(self, suffix, /):
if self.file is None:
name = f'{self.memory_write_path.name}{suffix}'
return self.memory_write_path.with_name(name)
Expand All @@ -170,7 +170,7 @@ def file_mtime(self):
return (datetime.datetime.fromtimestamp(self.file.stat().st_mtime)
if self.file_exists() else None)

def file_size(self, as_megabytes=False):
def file_size(self, *, as_megabytes: bool = False):
if self.file_exists():
result = self.file.stat().st_size
if as_megabytes:
Expand Down
57 changes: 29 additions & 28 deletions treedb/_tools.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@
log = logging.getLogger(__name__)


def uniqued(iterable):
def uniqued(iterable, /):
"""Return list of unique hashable elements preserving order.
>>> uniqued('spamham')
Expand All @@ -58,10 +58,10 @@ def uniqued(iterable):
return [i for i in iterable if i not in seen and not seen.add(i)]


def next_count(start: int = 0, step: int = 1):
def next_count(*, start: int = 0, step: int = 1):
"""Return a callable returning descending ints.
>>> nxt = next_count(1)
>>> nxt = next_count(start=1)
>>> nxt()
1
Expand Down Expand Up @@ -115,8 +115,8 @@ def groupby_attrgetter(*attrnames):
return functools.partial(itertools.groupby, key=key)


def islice_limit(iterable,
*, limit: typing.Optional[int] = None,
def islice_limit(iterable, /, *,
limit: typing.Optional[int] = None,
offset: typing.Optional[int] = 0):
"""Return a slice from iterable applying limit and offset.
Expand All @@ -142,7 +142,7 @@ def islice_limit(iterable,
return iterable


def iterslices(iterable, *, size: int):
def iterslices(iterable, /, *, size: int):
"""Yield iterable in chunks of maximal size.
>>> [tuple(chunk) for chunk in iterslices('bacon', size=2)]
Expand All @@ -153,7 +153,7 @@ def iterslices(iterable, *, size: int):
return iter(lambda: list(next_slice()), [])


def walk_scandir(top, *,
def walk_scandir(top, /, *,
verbose: bool = False,
sortkey=operator.attrgetter('name')) -> typing.Iterator[os.DirEntry]:
"""Yield os.DirEntry objects for all files under top."""
Expand Down Expand Up @@ -188,8 +188,9 @@ def walk_scandir(top, *,
stack.extend(reversed(dirs))


def pipe_json_lines(file, documents=None, *,
delete_present: bool = True, autocompress: bool = True,
def pipe_json_lines(file, documents=None, /, *,
delete_present: bool = True,
autocompress: bool = True,
newline: typing.Optional[str] = '\n',
sort_keys: bool = True,
compact: bool = True,
Expand Down Expand Up @@ -230,7 +231,7 @@ def pipe_json_lines(file, documents=None, *,
return pipe_json(lines, dump=False, **json_kwargs)


def pipe_json(documents, *, dump: bool,
def pipe_json(documents, /, *, dump: bool,
sort_keys: bool = True,
compact: bool = False,
indent: typing.Optional[int] = None,
Expand Down Expand Up @@ -265,7 +266,7 @@ def itercodec(docs):
return itercodec(documents)


def pipe_lines(file, lines=None, *, newline: typing.Optional[str] = None,
def pipe_lines(file, lines=None, /, *, newline: typing.Optional[str] = None,
delete_present: bool = False, autocompress: bool = True):
open_func, result, hashobj = get_open_result(file,
write=lines is not None,
Expand All @@ -291,7 +292,7 @@ def iterlines():
return iterlines()


def write_wrapped(hashsum, f, lines, *, buflines: int = 1_000):
def write_wrapped(hashsum, f, lines, /, *, buflines: int = 1_000):
write_line = functools.partial(print, file=f)
buf = f.buffer
total = 0
Expand All @@ -306,7 +307,7 @@ def write_wrapped(hashsum, f, lines, *, buflines: int = 1_000):
return total


def write_lines(file, lines):
def write_lines(file, lines, /):
r"""
>>> with io.StringIO() as f:
Expand All @@ -324,7 +325,7 @@ def write_lines(file, lines):
return total


def path_from_filename(filename, *args, expanduser: bool = True):
def path_from_filename(filename, /, *args, expanduser: bool = True):
if hasattr(filename, 'open'):
assert not args
result = filename
Expand All @@ -336,7 +337,7 @@ def path_from_filename(filename, *args, expanduser: bool = True):
return result


def get_open_result(file, *, write: bool = False,
def get_open_result(file, /, *, write: bool = False,
delete_present: bool = False, autocompress: bool = False,
newline: typing.Optional[str] = None,
_encoding: str = 'utf-8'):
Expand Down Expand Up @@ -389,7 +390,7 @@ def open_func():
return open_func, result, hashobj


def get_open_module(filepath, autocompress: bool = False):
def get_open_module(filepath, /, *, autocompress: bool = False):
file = path_from_filename(filepath)

suffix = file.suffix.lower()
Expand All @@ -403,7 +404,7 @@ def get_open_module(filepath, autocompress: bool = False):
return result


def sha256sum(file, *, raw: bool = False, autocompress: bool = True,
def sha256sum(file, /, *, raw: bool = False, autocompress: bool = True,
hash_file_string: bool = False,
file_string_encoding: str = ENCODING):
"""
Expand All @@ -426,12 +427,12 @@ def sha256sum(file, *, raw: bool = False, autocompress: bool = True,
return hashobj if raw else hashobj.hexdigest()


def update_hashobj(hashobj, file, *, chunksize: int = 2**16): # 64 KiB
def update_hashobj(hashobj, file, /, *, chunksize: int = 2**16): # 64 KiB
for chunk in iter(functools.partial(file.read, chunksize), b''):
hashobj.update(chunk)


def run(cmd, *, capture_output: bool = False,
def run(cmd, /, *, capture_output: bool = False,
unpack: bool = False, cwd=None, check: bool = False,
encoding: str = ENCODING):
log.info('subprocess.run(%r)', cmd)
Expand Down Expand Up @@ -480,19 +481,19 @@ class Ordering(dict):
_missing = float('inf')

@classmethod
def fromlist(cls, keys, *, start_index: int = 0):
def fromlist(cls, keys, /, *, start_index: int = 0):
return cls((k, i) for i, k in enumerate(uniqued(keys), start=start_index))

def __missing__(self, key):
def __missing__(self, key, /):
return self._missing

def _sortkey(self, key):
def _sortkey(self, key, /):
return self[key], key

def sorted(self, keys):
def sorted(self, keys, /):
return sorted(keys, key=self._sortkey)

def sorted_enumerate(self, keys, start: int = 0):
def sorted_enumerate(self, keys, /, *, start: int = 0):
keyed = sorted((self[key], key) for key in keys)
return ((i, key) for i, (_, key) in enumerate(keyed, start=start))

Expand All @@ -511,7 +512,7 @@ class ConfigParser(configparser.ConfigParser):
_header = None

@classmethod
def from_file(cls, filename, *, encoding=ENCODING, **kwargs):
def from_file(cls, filename, /, *, encoding=ENCODING, **kwargs):
path = path_from_filename(filename)
if cls._basename is not None and path.name != cls._basename:
raise RuntimeError(f'unexpected filename {path!r}'
Expand All @@ -522,18 +523,18 @@ def from_file(cls, filename, *, encoding=ENCODING, **kwargs):
inst.read_file(f)
return inst

def __init__(self, *, defaults=None, **kwargs):
def __init__(self, /, *, defaults=None, **kwargs):
for k, v in self._init_defaults.items():
kwargs.setdefault(k, v)
super().__init__(defaults=defaults, **kwargs)

def to_dict(self, *, sort_sections: bool = False,
def to_dict(self, /, *, sort_sections: bool = False,
_default_section: str = configparser.DEFAULTSECT):
items = sorted(self.items()) if sort_sections else self.items()
return {name: dict(section) for name, section in items
if name != _default_section}

def to_file(self, filename, *, encoding=ENCODING):
def to_file(self, filename, /, *, encoding=ENCODING):
path = path_from_filename(filename)
with path.open('wt', encoding=encoding, newline=self._newline) as f:
if self._header is not None:
Expand Down
10 changes: 5 additions & 5 deletions treedb/backend/_basics.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def print_versions(*, engine=ENGINE, file=None) -> None:
engine=engine)


def set_engine(filename, *,
def set_engine(filename, /, *,
resolve: bool = False,
require: bool = False,
title: typing.Optional[str] = None,
Expand Down Expand Up @@ -145,12 +145,12 @@ def connect(*, bind=ENGINE,
return conn


def scalar(statement, *args, bind=ENGINE, **kwargs):
def scalar(statement, /, *args, bind=ENGINE, **kwargs):
with connect(bind=bind) as conn:
return conn.scalar(statement, *args, **kwargs)


def iterrows(query, *, mappings=False, bind=ENGINE):
def iterrows(query, /, *, mappings=False, bind=ENGINE):
with connect(bind=bind) as conn:
result = conn.execute(query)

Expand All @@ -160,7 +160,7 @@ def iterrows(query, *, mappings=False, bind=ENGINE):
yield from result


def expression_compile(expression, *, literal_binds=True):
def expression_compile(expression, /, *, literal_binds=True):
"""Return literal compiled expression."""
return expression.compile(compile_kwargs={'literal_binds': literal_binds})

Expand All @@ -176,6 +176,6 @@ def json_object(*, sort_keys_: bool,
return sa.type_coerce(obj, sa.JSON) if load_json_ else obj


def json_datetime(date):
def json_datetime(date, /):
date = sa.func.replace(date, ' ', 'T')
return sa.func.replace(date, '.000000', '')

0 comments on commit 2d51825

Please sign in to comment.