Skip to content

Commit

Permalink
Change occurrences of % and format() to f-strings
Browse files Browse the repository at this point in the history
  • Loading branch information
DimitriPapadopoulos committed May 31, 2023
1 parent 4132f36 commit 1a48e00
Show file tree
Hide file tree
Showing 13 changed files with 83 additions and 83 deletions.
2 changes: 1 addition & 1 deletion zarr/_storage/absstore.py
Expand Up @@ -77,7 +77,7 @@ def __init__(self, container=None, prefix='', account_name=None, account_key=Non
from azure.storage.blob import ContainerClient
blob_service_kwargs = blob_service_kwargs or {}
client = ContainerClient(
"https://{}.blob.core.windows.net/".format(account_name), container,
f"https://{account_name}.blob.core.windows.net/", container,
credential=account_key, **blob_service_kwargs
)

Expand Down
2 changes: 1 addition & 1 deletion zarr/_storage/store.py
Expand Up @@ -227,7 +227,7 @@ def _validate_key(self, key: str):
# TODO: Possibly allow key == ".zmetadata" too if we write a
# consolidated metadata spec corresponding to this?
):
raise ValueError("keys starts with unexpected value: `{}`".format(key))
raise ValueError(f"keys starts with unexpected value: `{key}`")

if key.endswith('/'):
raise ValueError("keys may not end in /")
Expand Down
20 changes: 10 additions & 10 deletions zarr/convenience.py
Expand Up @@ -248,7 +248,7 @@ def save_group(store: StoreLike, *args, zarr_version=None, path=None, **kwargs):
try:
grp = _create_group(_store, path=path, overwrite=True, zarr_version=zarr_version)
for i, arr in enumerate(args):
k = 'arr_{}'.format(i)
k = f'arr_{i}'
grp.create_dataset(k, data=arr, overwrite=True, zarr_version=zarr_version)
for k, arr in kwargs.items():
grp.create_dataset(k, data=arr, overwrite=True, zarr_version=zarr_version)
Expand Down Expand Up @@ -517,9 +517,9 @@ def _log_copy_summary(log, dry_run, n_copied, n_skipped, n_bytes_copied):
message = 'dry run: '
else:
message = 'all done: '
message += '{:,} copied, {:,} skipped'.format(n_copied, n_skipped)
message += f'{n_copied:,} copied, {n_skipped:,} skipped'
if not dry_run:
message += ', {:,} bytes copied'.format(n_bytes_copied)
message += f', {n_bytes_copied:,} bytes copied'
log(message)


Expand Down Expand Up @@ -710,14 +710,14 @@ def copy_store(source, dest, source_path='', dest_path='', excludes=None,

# take action
if do_copy:
log('copy {}'.format(descr))
log(f'copy {descr}')
if not dry_run:
data = source[source_key]
n_bytes_copied += buffer_size(data)
dest[dest_key] = data
n_copied += 1
else:
log('skip {}'.format(descr))
log(f'skip {descr}')
n_skipped += 1

# log a final message with a summary of what happened
Expand All @@ -728,7 +728,7 @@ def copy_store(source, dest, source_path='', dest_path='', excludes=None,

def _check_dest_is_group(dest):
if not hasattr(dest, 'create_dataset'):
raise ValueError('dest must be a group, got {!r}'.format(dest))
raise ValueError(f'dest must be a group, got {dest!r}')


def copy(source, dest, name=None, shallow=False, without_attrs=False, log=None,
Expand Down Expand Up @@ -915,7 +915,7 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists,
if do_copy:

# log a message about what we're going to do
log('copy {} {} {}'.format(source.name, source.shape, source.dtype))
log(f'copy {source.name} {source.shape} {source.dtype}')

if not dry_run:

Expand Down Expand Up @@ -986,7 +986,7 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists,
n_copied += 1

else:
log('skip {} {} {}'.format(source.name, source.shape, source.dtype))
log(f'skip {source.name} {source.shape} {source.dtype}')
n_skipped += 1

elif root or not shallow:
Expand All @@ -1008,7 +1008,7 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists,
if do_copy:

# log action
log('copy {}'.format(source.name))
log(f'copy {source.name}')

if not dry_run:

Expand Down Expand Up @@ -1045,7 +1045,7 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists,
n_copied += 1

else:
log('skip {}'.format(source.name))
log(f'skip {source.name}')
n_skipped += 1

return n_copied, n_skipped, n_bytes_copied
Expand Down
8 changes: 4 additions & 4 deletions zarr/core.py
Expand Up @@ -2256,7 +2256,7 @@ def _encode_chunk(self, chunk):

def __repr__(self):
t = type(self)
r = '<{}.{}'.format(t.__module__, t.__name__)
r = f'<{t.__module__}.{t.__name__}'
if self.name:
r += ' %r' % self.name
r += ' %s' % str(self.shape)
Expand Down Expand Up @@ -2297,11 +2297,11 @@ def info_items(self):
def _info_items_nosync(self):

def typestr(o):
return '{}.{}'.format(type(o).__module__, type(o).__name__)
return f'{type(o).__module__}.{type(o).__name__}'

def bytestr(n):
if n > 2**10:
return '{} ({})'.format(n, human_readable_size(n))
return f'{n} ({human_readable_size(n)})'
else:
return str(n)

Expand Down Expand Up @@ -2342,7 +2342,7 @@ def bytestr(n):
('Storage ratio', '%.1f' % (self.nbytes / self.nbytes_stored)),
]
items += [
('Chunks initialized', '{}/{}'.format(self.nchunks_initialized, self.nchunks))
('Chunks initialized', f'{self.nchunks_initialized}/{self.nchunks}')
]

return items
Expand Down
4 changes: 2 additions & 2 deletions zarr/hierarchy.py
Expand Up @@ -316,7 +316,7 @@ def __len__(self):

def __repr__(self):
t = type(self)
r = '<{}.{}'.format(t.__module__, t.__name__)
r = f'<{t.__module__}.{t.__name__}'
if self.name:
r += ' %r' % self.name
if self._read_only:
Expand All @@ -335,7 +335,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):
def info_items(self):

def typestr(o):
return '{}.{}'.format(type(o).__module__, type(o).__name__)
return f'{type(o).__module__}.{type(o).__name__}'

items = []

Expand Down
2 changes: 1 addition & 1 deletion zarr/indexing.py
Expand Up @@ -880,7 +880,7 @@ def check_fields(fields, dtype):
# multiple field selection
out_dtype = np.dtype([(f, dtype[f]) for f in fields])
except KeyError as e:
raise IndexError("invalid 'fields' argument, field not found: {!r}".format(e))
raise IndexError(f"invalid 'fields' argument, field not found: {e!r}")
else:
return out_dtype
else:
Expand Down
2 changes: 1 addition & 1 deletion zarr/storage.py
Expand Up @@ -2807,7 +2807,7 @@ def __init__(self, prefix='zarr', dimension_separator=None, **kwargs):
self.client = redis.Redis(**kwargs)

def _key(self, key):
return '{prefix}:{key}'.format(prefix=self._prefix, key=key)
return f'{self._prefix}:{key}'

def __getitem__(self, key):
return self.client[self._key(key)]
Expand Down
2 changes: 1 addition & 1 deletion zarr/tests/test_convenience.py
Expand Up @@ -81,7 +81,7 @@ def test_open_array(path_type, zarr_version):

# path not found
with pytest.raises(ValueError):
open('doesnotexist', mode='r')
open('doesnotexist')


@pytest.mark.parametrize("zarr_version", _VERSIONS)
Expand Down
6 changes: 3 additions & 3 deletions zarr/tests/test_core.py
Expand Up @@ -1155,7 +1155,7 @@ def test_dtypes(self):
# datetime, timedelta
for base_type in 'Mm':
for resolution in 'D', 'us', 'ns':
dtype = '{}8[{}]'.format(base_type, resolution)
dtype = f'{base_type}8[{resolution}]'
z = self.create_array(shape=100, dtype=dtype, fill_value=0)
assert z.dtype == np.dtype(dtype)
a = np.random.randint(np.iinfo('i8').min, np.iinfo('i8').max,
Expand Down Expand Up @@ -1348,7 +1348,7 @@ def compare_arrays(expected, actual, item_dtype):

# convenience API
for item_type in 'int', '<u4':
z = self.create_array(shape=data.shape, dtype='array:{}'.format(item_type))
z = self.create_array(shape=data.shape, dtype=f'array:{item_type}')
assert z.dtype == object
assert isinstance(z.filters[0], VLenArray)
assert z.filters[0].dtype == np.dtype(item_type)
Expand Down Expand Up @@ -1987,7 +1987,7 @@ def test_object_arrays_vlen_array(self):
# convenience API
for item_type in 'int', '<u4':
with pytest.raises(ValueError):
self.create_array(shape=data.shape, dtype='array:{}'.format(item_type))
self.create_array(shape=data.shape, dtype=f'array:{item_type}')

def test_object_arrays_danger(self):
# Cannot hacking out object codec as N5 doesn't allow object codecs
Expand Down

0 comments on commit 1a48e00

Please sign in to comment.