Skip to content

Commit

Permalink
Fix UP031: Use format specifiers instead of percent format
Browse files Browse the repository at this point in the history
  • Loading branch information
maresb committed Apr 30, 2024
1 parent 75b5706 commit 27bd9aa
Show file tree
Hide file tree
Showing 15 changed files with 102 additions and 96 deletions.
4 changes: 2 additions & 2 deletions pytensor/bin/pytensor_cache.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,9 +75,9 @@ def main():
if items:
_logger.warning(
"There remain elements in the cache dir that you may "
"need to erase manually. The cache dir is:\n %s\n"
f"need to erase manually. The cache dir is:\n {config.compiledir}\n"
'You can also call "pytensor-cache purge" to '
"remove everything from that directory." % config.compiledir
"remove everything from that directory."
)
_logger.debug(f"Remaining elements ({len(items)}): {', '.join(items)}")
elif sys.argv[1] == "list":
Expand Down
2 changes: 1 addition & 1 deletion pytensor/breakpoint.py
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ def perform(self, node, inputs, output_storage):
except Exception:
raise ValueError(
"Some of the inputs to the PdbBreakpoint op "
"'%s' could not be casted to NumPy arrays" % self.name
f"'{self.name}' could not be casted to NumPy arrays"
)

print("\n")
Expand Down
2 changes: 1 addition & 1 deletion pytensor/compile/builders.py
Original file line number Diff line number Diff line change
Expand Up @@ -709,7 +709,7 @@ def _recompute_rop_op(self):
if not isinstance(roverrides_l, list):
raise TypeError(
"Rop overriding function should return a list, "
'got "%s"' % type(roverrides_l)
f'got "{type(roverrides_l)}"'
)
all_rops_l, all_rops_ov_l = zip(
*[
Expand Down
2 changes: 1 addition & 1 deletion pytensor/compile/ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -252,7 +252,7 @@ def __hash__(self):
return hash(type(self)) ^ hash(self.__fn)

def __str__(self):
return "FromFunctionOp{%s}" % self.__fn.__name__
return f"FromFunctionOp{{{self.__fn.__name__}}}"

def perform(self, node, inputs, outputs):
outs = self.__fn(*inputs)
Expand Down
37 changes: 21 additions & 16 deletions pytensor/link/c/params_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,8 +262,13 @@ def __init__(self, params_type, **kwargs):
self.__dict__.update(__params_type__=params_type, __signatures__=None)

def __repr__(self):
return "Params(%s)" % ", ".join(
[(f"{k}:{type(self[k]).__name__}:{self[k]}") for k in sorted(self.keys())]
return "Params({})".format(
", ".join(
[
(f"{k}:{type(self[k]).__name__}:{self[k]}")
for k in sorted(self.keys())
]
)
)

def __getattr__(self, key):
Expand Down Expand Up @@ -346,13 +351,11 @@ def __init__(self, **kwargs):
for attribute_name in kwargs:
if re.match("^[A-Za-z_][A-Za-z0-9_]*$", attribute_name) is None:
raise AttributeError(
'ParamsType: attribute "%s" should be a valid identifier.'
% attribute_name
f'ParamsType: attribute "{attribute_name}" should be a valid identifier.'
)
if attribute_name in c_cpp_keywords:
raise SyntaxError(
'ParamsType: "%s" is a potential C/C++ keyword and should not be used as attribute name.'
% attribute_name
f'ParamsType: "{attribute_name}" is a potential C/C++ keyword and should not be used as attribute name.'
)
type_instance = kwargs[attribute_name]
type_name = type_instance.__class__.__name__
Expand Down Expand Up @@ -424,8 +427,10 @@ def __getattr__(self, key):
return super().__getattr__(self, key)

def __repr__(self):
return "ParamsType<%s>" % ", ".join(
[(f"{self.fields[i]}:{self.types[i]}") for i in range(self.length)]
return "ParamsType<{}>".format(
", ".join(
[(f"{self.fields[i]}:{self.types[i]}") for i in range(self.length)]
)
)

def __eq__(self, other):
Expand Down Expand Up @@ -733,18 +738,18 @@ def c_support_code(self, **kwargs):
struct_cleanup = "\n".join(c_cleanup_list)
struct_extract = "\n\n".join(c_extract_list)
struct_extract_method = """
void extract(PyObject* object, int field_pos) {
switch(field_pos) {
void extract(PyObject* object, int field_pos) {{
switch(field_pos) {{
// Extraction cases.
%s
{}
// Default case.
default:
PyErr_Format(PyExc_TypeError, "ParamsType: no extraction defined for a field %%d.", field_pos);
PyErr_Format(PyExc_TypeError, "ParamsType: no extraction defined for a field %d.", field_pos);
this->setErrorOccurred();
break;
}
}
""" % (
}}
}}
""".format(
"\n".join(
[
("case %d: extract_%s(object); break;" % (i, self.fields[i]))
Expand Down Expand Up @@ -866,7 +871,7 @@ def c_extract(self, name, sub, check_input=True, **kwargs):
struct_name=self.name,
length=self.length,
fail=sub["fail"],
fields_list='"%s"' % '", "'.join(self.fields),
fields_list='"{}"'.format('", "'.join(self.fields)),
)
)

Expand Down
9 changes: 7 additions & 2 deletions pytensor/link/utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -355,8 +355,13 @@ def raise_with_op(
+ f"\nInputs values: {scalar_values}"
)
if verbosity == "high":
detailed_err_msg += "\nInputs type_num: %s" % str(
[getattr(getattr(i[0], "dtype", ""), "num", "") for i in thunk.inputs]
detailed_err_msg += "\nInputs type_num: {}".format(
str(
[
getattr(getattr(i[0], "dtype", ""), "num", "")
for i in thunk.inputs
]
)
)

detailed_err_msg += f"\nOutputs clients: {clients}\n"
Expand Down
2 changes: 1 addition & 1 deletion pytensor/scalar/basic.py
Original file line number Diff line number Diff line change
Expand Up @@ -475,7 +475,7 @@ def c_extract(self, name, sub, check_input=True, **kwargs):
sub,
name=name,
dtype=specs[1],
pyarr_type="Py%sArrType_Type" % specs[2],
pyarr_type=f"Py{specs[2]}ArrType_Type",
)
)
else:
Expand Down
15 changes: 8 additions & 7 deletions pytensor/tensor/blas.py
Original file line number Diff line number Diff line change
Expand Up @@ -180,9 +180,9 @@ def __init__(self, inplace):

def __str__(self):
if self.inplace:
return "%s{inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}{{inplace}}"
else:
return "%s{no_inplace}" % self.__class__.__name__
return f"{self.__class__.__name__}{{no_inplace}}"

def make_node(self, y, alpha, A, x, beta):
y = ptb.as_tensor_variable(y)
Expand Down Expand Up @@ -279,9 +279,9 @@ def __init__(self, destructive):

def __str__(self):
if self.destructive:
return "%s{destructive}" % self.__class__.__name__
return f"{self.__class__.__name__}{{destructive}}"
else:
return "%s{non-destructive}" % self.__class__.__name__
return f"{self.__class__.__name__}{{non-destructive}}"

def make_node(self, A, alpha, x, y):
A = ptb.as_tensor_variable(A)
Expand Down Expand Up @@ -1811,9 +1811,10 @@ def contiguous(var, ndim):
f"{strides}[{i}] > 0 && {strides}[{i}] % type_size == 0"
for i in range(1, ndim)
),
"(%s)"
% " || ".join(
f"{strides}[{i}] == type_size" for i in range(1, ndim)
"({})".format(
" || ".join(
f"{strides}[{i}] == type_size" for i in range(1, ndim)
)
),
]
)
Expand Down
4 changes: 2 additions & 2 deletions pytensor/tensor/elemwise.py
Original file line number Diff line number Diff line change
Expand Up @@ -1098,14 +1098,14 @@ def _c_all(self, node, nodename, inames, onames, sub):
all_broadcastable = all(s == 1 for s in var.type.shape)
cond1 = " && ".join(
[
"PyArray_ISCONTIGUOUS(%s)" % arr
f"PyArray_ISCONTIGUOUS({arr})"
for arr, var in z
if not all_broadcastable
]
)
cond2 = " && ".join(
[
"PyArray_ISFORTRAN(%s)" % arr
f"PyArray_ISFORTRAN({arr})"
for arr, var in z
if not all_broadcastable
]
Expand Down
24 changes: 12 additions & 12 deletions pytensor/tensor/extra_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -652,8 +652,8 @@ def make_node(self, x, repeats):
if repeats.dtype in numpy_unsupported_dtypes:
raise TypeError(
(
"dtypes %s are not supported by numpy.repeat "
"for the 'repeats' parameter, " % str(numpy_unsupported_dtypes)
f"dtypes {numpy_unsupported_dtypes!s} are not supported by numpy.repeat "
"for the 'repeats' parameter, "
),
repeats.dtype,
)
Expand Down Expand Up @@ -882,8 +882,8 @@ def make_node(self, a, val):
val = ptb.as_tensor_variable(val)
if a.ndim < 2:
raise TypeError(
"%s: first parameter must have at least"
" two dimensions" % self.__class__.__name__
f"{self.__class__.__name__}: first parameter must have at least"
" two dimensions"
)
elif val.ndim != 0:
raise TypeError(
Expand All @@ -892,8 +892,8 @@ def make_node(self, a, val):
val = ptb.cast(val, dtype=upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError(
"%s: type of second parameter must be the same as"
" the first's" % self.__class__.__name__
f"{self.__class__.__name__}: type of second parameter must be the same as"
" the first's"
)
return Apply(self, [a, val], [a.type()])

Expand Down Expand Up @@ -926,8 +926,8 @@ def grad(self, inp, cost_grad):
return [None, None]
elif a.ndim > 2:
raise NotImplementedError(
"%s: gradient is currently implemented"
" for matrices only" % self.__class__.__name__
f"{self.__class__.__name__}: gradient is currently implemented"
" for matrices only"
)
wr_a = fill_diagonal(grad, 0) # valid for any number of dimensions
# diag is only valid for matrices
Expand Down Expand Up @@ -984,8 +984,8 @@ def make_node(self, a, val, offset):
offset = ptb.as_tensor_variable(offset)
if a.ndim != 2:
raise TypeError(
"%s: first parameter must have exactly"
" two dimensions" % self.__class__.__name__
f"{self.__class__.__name__}: first parameter must have exactly"
" two dimensions"
)
elif val.ndim != 0:
raise TypeError(
Expand All @@ -998,8 +998,8 @@ def make_node(self, a, val, offset):
val = ptb.cast(val, dtype=upcast(a.dtype, val.dtype))
if val.dtype != a.dtype:
raise TypeError(
"%s: type of second parameter must be the same"
" as the first's" % self.__class__.__name__
f"{self.__class__.__name__}: type of second parameter must be the same"
" as the first's"
)
elif offset.dtype not in integer_dtypes:
raise TypeError(
Expand Down
11 changes: 5 additions & 6 deletions pytensor/tensor/fft.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,8 +20,7 @@ def make_node(self, a, s=None):
a = as_tensor_variable(a)
if a.ndim < 2:
raise TypeError(
"%s: input must have dimension > 2, with first dimension batches"
% self.__class__.__name__
f"{self.__class__.__name__}: input must have dimension > 2, with first dimension batches"
)

if s is None:
Expand All @@ -31,8 +30,8 @@ def make_node(self, a, s=None):
s = as_tensor_variable(s)
if s.dtype not in integer_dtypes:
raise TypeError(
"%s: length of the transformed axis must be"
" of type integer" % self.__class__.__name__
f"{self.__class__.__name__}: length of the transformed axis must be"
" of type integer"
)
return Apply(self, [a, s], [self.output_type(a)()])

Expand Down Expand Up @@ -92,8 +91,8 @@ def make_node(self, a, s=None):
s = as_tensor_variable(s)
if s.dtype not in integer_dtypes:
raise TypeError(
"%s: length of the transformed axis must be"
" of type integer" % self.__class__.__name__
f"{self.__class__.__name__}: length of the transformed axis must be"
" of type integer"
)
return Apply(self, [a, s], [self.output_type(a)()])

Expand Down
2 changes: 1 addition & 1 deletion pytensor/tensor/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ def __init__(self, dtype, shape, mmap_mode=None):
if mmap_mode not in (None, "c"):
raise ValueError(
"The only supported values for mmap_mode "
"are None and 'c', got %s" % mmap_mode
f"are None and 'c', got {mmap_mode}"
)
self.mmap_mode = mmap_mode

Expand Down
2 changes: 1 addition & 1 deletion pytensor/tensor/math.py
Original file line number Diff line number Diff line change
Expand Up @@ -1540,7 +1540,7 @@ def __init__(self, axis=None):

def __str__(self):
if self.axis is not None:
return "Mean{%s}" % (", ".join(str(x) for x in self.axis))
return "Mean{{{}}}".format(", ".join(str(x) for x in self.axis))
else:
return "Mean"

Expand Down
2 changes: 1 addition & 1 deletion pytensor/tensor/subtensor.py
Original file line number Diff line number Diff line change
Expand Up @@ -2174,7 +2174,7 @@ def __str__(self):
else:
msg += ",inc"

return self.__class__.__name__ + "{%s}" % msg
return self.__class__.__name__ + f"{{{msg}}}"

def make_node(self, x, y, ilist):
x_ = as_tensor_variable(x)
Expand Down
Loading

0 comments on commit 27bd9aa

Please sign in to comment.