Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Change occurrences of % and format() to f-strings #1423

Merged
merged 3 commits into from Feb 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 3 additions & 0 deletions docs/release.rst
Expand Up @@ -18,6 +18,9 @@ Release notes
Unreleased
----------

* Change occurrences of % and format() to f-strings.
By :user:`Dimitri Papadopoulos Orfanos <DimitriPapadopoulos>` :issue:`1423`.

.. _release_2.17.0:

2.17.0
Expand Down
6 changes: 3 additions & 3 deletions zarr/_storage/absstore.py
Expand Up @@ -84,7 +84,7 @@ def __init__(

blob_service_kwargs = blob_service_kwargs or {}
client = ContainerClient(
"https://{}.blob.core.windows.net/".format(account_name),
f"https://{account_name}.blob.core.windows.net/",
container,
credential=account_key,
**blob_service_kwargs,
Expand Down Expand Up @@ -141,7 +141,7 @@ def __getitem__(self, key):
try:
return self.client.download_blob(blob_name).readall()
except ResourceNotFoundError:
raise KeyError("Blob %s not found" % blob_name)
raise KeyError(f"Blob {blob_name} not found")

def __setitem__(self, key, value):
value = ensure_bytes(value)
Expand All @@ -154,7 +154,7 @@ def __delitem__(self, key):
try:
self.client.delete_blob(self._append_path_to_prefix(key))
except ResourceNotFoundError:
raise KeyError("Blob %s not found" % key)
raise KeyError(f"Blob {key} not found")

def __eq__(self, other):
return (
Expand Down
2 changes: 1 addition & 1 deletion zarr/_storage/store.py
Expand Up @@ -227,7 +227,7 @@ def _validate_key(self, key: str):
# TODO: Possibly allow key == ".zmetadata" too if we write a
# consolidated metadata spec corresponding to this?
):
raise ValueError("keys starts with unexpected value: `{}`".format(key))
raise ValueError(f"key starts with unexpected value: `{key}`")

if key.endswith("/"):
raise ValueError("keys may not end in /")
Expand Down
2 changes: 1 addition & 1 deletion zarr/_storage/v3.py
Expand Up @@ -569,7 +569,7 @@ def __init__(self, store: StoreLike, metadata_key=meta_root + "consolidated/.zme
consolidated_format = meta.get("zarr_consolidated_format", None)
if consolidated_format != 1:
raise MetadataError(
"unsupported zarr consolidated metadata format: %s" % consolidated_format
f"unsupported zarr consolidated metadata format: {consolidated_format}"
)

# decode metadata
Expand Down
44 changes: 18 additions & 26 deletions zarr/convenience.py
Expand Up @@ -259,7 +259,7 @@ def save_group(store: StoreLike, *args, zarr_version=None, path=None, **kwargs):
try:
grp = _create_group(_store, path=path, overwrite=True, zarr_version=zarr_version)
for i, arr in enumerate(args):
k = "arr_{}".format(i)
k = f"arr_{i}"
grp.create_dataset(k, data=arr, overwrite=True, zarr_version=zarr_version)
for k, arr in kwargs.items():
grp.create_dataset(k, data=arr, overwrite=True, zarr_version=zarr_version)
Expand Down Expand Up @@ -499,7 +499,7 @@ def __init__(self, log):
self.log_file = log
else:
raise TypeError(
"log must be a callable function, file path or " "file-like object, found %r" % log
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This string was cut in half, looks like a left-over from #1459.

f"log must be a callable function, file path or file-like object, found {log!r}"
)

def __enter__(self):
Expand All @@ -526,9 +526,9 @@ def _log_copy_summary(log, dry_run, n_copied, n_skipped, n_bytes_copied):
message = "dry run: "
else:
message = "all done: "
message += "{:,} copied, {:,} skipped".format(n_copied, n_skipped)
message += f"{n_copied:,} copied, {n_skipped:,} skipped"
if not dry_run:
message += ", {:,} bytes copied".format(n_bytes_copied)
message += f", {n_bytes_copied:,} bytes copied"
log(message)


Expand Down Expand Up @@ -657,9 +657,7 @@ def copy_store(
# check if_exists parameter
valid_if_exists = ["raise", "replace", "skip"]
if if_exists not in valid_if_exists:
raise ValueError(
"if_exists must be one of {!r}; found {!r}".format(valid_if_exists, if_exists)
)
raise ValueError(f"if_exists must be one of {valid_if_exists!r}; found {if_exists!r}")

# setup counting variables
n_copied = n_skipped = n_bytes_copied = 0
Expand Down Expand Up @@ -720,20 +718,20 @@ def copy_store(
if if_exists != "replace":
if dest_key in dest:
if if_exists == "raise":
raise CopyError("key {!r} exists in destination".format(dest_key))
raise CopyError(f"key {dest_key!r} exists in destination")
elif if_exists == "skip":
do_copy = False

# take action
if do_copy:
log("copy {}".format(descr))
log(f"copy {descr}")
if not dry_run:
data = source[source_key]
n_bytes_copied += buffer_size(data)
dest[dest_key] = data
n_copied += 1
else:
log("skip {}".format(descr))
log(f"skip {descr}")
n_skipped += 1

# log a final message with a summary of what happened
Expand All @@ -744,7 +742,7 @@ def copy_store(

def _check_dest_is_group(dest):
if not hasattr(dest, "create_dataset"):
raise ValueError("dest must be a group, got {!r}".format(dest))
raise ValueError(f"dest must be a group, got {dest!r}")


def copy(
Expand Down Expand Up @@ -910,11 +908,9 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
# check if_exists parameter
valid_if_exists = ["raise", "replace", "skip", "skip_initialized"]
if if_exists not in valid_if_exists:
raise ValueError(
"if_exists must be one of {!r}; found {!r}".format(valid_if_exists, if_exists)
)
raise ValueError(f"if_exists must be one of {valid_if_exists!r}; found {if_exists!r}")
if dest_h5py and if_exists == "skip_initialized":
raise ValueError("{!r} can only be used when copying to zarr".format(if_exists))
raise ValueError(f"{if_exists!r} can only be used when copying to zarr")

# determine name to copy to
if name is None:
Expand All @@ -934,9 +930,7 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
exists = dest is not None and name in dest
if exists:
if if_exists == "raise":
raise CopyError(
"an object {!r} already exists in destination " "{!r}".format(name, dest.name)
)
raise CopyError(f"an object {name!r} already exists in destination {dest.name!r}")
elif if_exists == "skip":
do_copy = False
elif if_exists == "skip_initialized":
Expand All @@ -947,7 +941,7 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
# take action
if do_copy:
# log a message about what we're going to do
log("copy {} {} {}".format(source.name, source.shape, source.dtype))
Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Perhaps log("{}".format(arg)) should actually be changed to log("%s", arg), so that string formatting interpolation is not executed if logging is not enabled.

See also:

log(f"copy {source.name} {source.shape} {source.dtype}")

if not dry_run:
# clear the way
Expand Down Expand Up @@ -1015,7 +1009,7 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
n_copied += 1

else:
log("skip {} {} {}".format(source.name, source.shape, source.dtype))
log(f"skip {source.name} {source.shape} {source.dtype}")
n_skipped += 1

elif root or not shallow:
Expand All @@ -1026,16 +1020,14 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
exists_array = dest is not None and name in dest and hasattr(dest[name], "shape")
if exists_array:
if if_exists == "raise":
raise CopyError(
"an array {!r} already exists in destination " "{!r}".format(name, dest.name)
)
raise CopyError(f"an array {name!r} already exists in destination {dest.name!r}")
elif if_exists == "skip":
do_copy = False

# take action
if do_copy:
# log action
log("copy {}".format(source.name))
log(f"copy {source.name}")

if not dry_run:
# clear the way
Expand Down Expand Up @@ -1078,7 +1070,7 @@ def _copy(log, source, dest, name, root, shallow, without_attrs, if_exists, dry_
n_copied += 1

else:
log("skip {}".format(source.name))
log(f"skip {source.name}")
n_skipped += 1

return n_copied, n_skipped, n_bytes_copied
Expand Down Expand Up @@ -1327,7 +1319,7 @@ def open_consolidated(store: StoreLike, metadata_key=".zmetadata", mode="r+", **
store, storage_options=kwargs.get("storage_options"), mode=mode, zarr_version=zarr_version
)
if mode not in {"r", "r+"}:
raise ValueError("invalid mode, expected either 'r' or 'r+'; found {!r}".format(mode))
raise ValueError(f"invalid mode, expected either 'r' or 'r+'; found {mode!r}")

path = kwargs.pop("path", None)
if store._store_version == 2:
Expand Down
20 changes: 10 additions & 10 deletions zarr/core.py
Expand Up @@ -2396,11 +2396,11 @@ def _encode_chunk(self, chunk):

def __repr__(self):
t = type(self)
r = "<{}.{}".format(t.__module__, t.__name__)
r = f"<{t.__module__}.{t.__name__}"
if self.name:
r += " %r" % self.name
r += " %s" % str(self.shape)
r += " %s" % self.dtype
r += f" {self.name!r}"
r += f" {str(self.shape)}"
r += f" {self.dtype}"
if self._read_only:
r += " read-only"
r += ">"
Expand Down Expand Up @@ -2436,11 +2436,11 @@ def info_items(self):

def _info_items_nosync(self):
def typestr(o):
return "{}.{}".format(type(o).__module__, type(o).__name__)
return f"{type(o).__module__}.{type(o).__name__}"

def bytestr(n):
if n > 2**10:
return "{} ({})".format(n, human_readable_size(n))
return f"{n} ({human_readable_size(n)})"
else:
return str(n)

Expand All @@ -2451,7 +2451,7 @@ def bytestr(n):
items += [("Name", self.name)]
items += [
("Type", typestr(self)),
("Data type", "%s" % self.dtype),
("Data type", str(self.dtype)),
("Shape", str(self.shape)),
("Chunk shape", str(self.chunks)),
("Order", self.order),
Expand All @@ -2461,7 +2461,7 @@ def bytestr(n):
# filters
if self.filters:
for i, f in enumerate(self.filters):
items += [("Filter [%s]" % i, repr(f))]
items += [(f"Filter [{i}]", repr(f))]

# compressor
items += [("Compressor", repr(self.compressor))]
Expand All @@ -2478,9 +2478,9 @@ def bytestr(n):
if self.nbytes_stored > 0:
items += [
("No. bytes stored", bytestr(self.nbytes_stored)),
("Storage ratio", "%.1f" % (self.nbytes / self.nbytes_stored)),
("Storage ratio", f"{self.nbytes / self.nbytes_stored:.1f}"),
]
items += [("Chunks initialized", "{}/{}".format(self.nchunks_initialized, self.nchunks))]
items += [("Chunks initialized", f"{self.nchunks_initialized}/{self.nchunks}")]

return items

Expand Down
4 changes: 2 additions & 2 deletions zarr/creation.py
Expand Up @@ -287,7 +287,7 @@ def _kwargs_compat(compressor, fill_value, kwargs):
compressor = compression

else:
raise ValueError("bad value for compression: %r" % compression)
raise ValueError(f"bad value for compression: {compression!r}")

# handle 'fillvalue'
if "fillvalue" in kwargs:
Expand All @@ -297,7 +297,7 @@ def _kwargs_compat(compressor, fill_value, kwargs):

# ignore other keyword arguments
for k in kwargs:
warn("ignoring keyword argument %r" % k)
warn(f"ignoring keyword argument {k!r}")

return compressor, fill_value

Expand Down
4 changes: 1 addition & 3 deletions zarr/errors.py
Expand Up @@ -67,9 +67,7 @@ def __init__(self):


def err_too_many_indices(selection, shape):
raise IndexError(
"too many indices for array; expected {}, got {}".format(len(shape), len(selection))
)
raise IndexError(f"too many indices for array; expected {len(shape)}, got {len(selection)}")


class VindexInvalidSelectionError(_BaseZarrIndexError):
Expand Down
14 changes: 6 additions & 8 deletions zarr/hierarchy.py
Expand Up @@ -340,9 +340,9 @@ def __len__(self):

def __repr__(self):
t = type(self)
r = "<{}.{}".format(t.__module__, t.__name__)
r = f"<{t.__module__}.{t.__name__}"
if self.name:
r += " %r" % self.name
r += f" {self.name!r}"
if self._read_only:
r += " read-only"
r += ">"
Expand All @@ -358,7 +358,7 @@ def __exit__(self, exc_type, exc_val, exc_tb):

def info_items(self):
def typestr(o):
return "{}.{}".format(type(o).__module__, type(o).__name__)
return f"{type(o).__module__}.{type(o).__name__}"

items = []

Expand Down Expand Up @@ -1157,17 +1157,15 @@ def _require_dataset_nosync(self, name, shape, dtype=None, exact=False, **kwargs
shape = normalize_shape(shape)
if shape != a.shape:
raise TypeError(
"shape do not match existing array; expected {}, got {}".format(a.shape, shape)
f"shape do not match existing array; expected {a.shape}, got {shape}"
)
dtype = np.dtype(dtype)
if exact:
if dtype != a.dtype:
raise TypeError(
"dtypes do not match exactly; expected {}, got {}".format(a.dtype, dtype)
)
raise TypeError(f"dtypes do not match exactly; expected {a.dtype}, got {dtype}")
else:
if not np.can_cast(dtype, a.dtype):
raise TypeError("dtypes ({}, {}) cannot be safely cast".format(dtype, a.dtype))
raise TypeError(f"dtypes ({dtype}, {a.dtype}) cannot be safely cast")
return a

else:
Expand Down