Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DM-23173: change to binary operators at beginning of new line #125

Merged
merged 3 commits into from
Apr 9, 2020
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
2 changes: 1 addition & 1 deletion examples/argumentParser.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,4 +75,4 @@ class ExampleConfig(pexConfig.Config):
parsedCmd = parser.parse_args(config=config)
pcDict = parsedCmd.__dict__
for key in sorted(pcDict):
print("parsedCmd.%s=%r" % (key, pcDict[key]))
print(f"parsedCmd.{key}={pcDict[key]!r}")
81 changes: 40 additions & 41 deletions python/lsst/pipe/base/argumentParser.py
Original file line number Diff line number Diff line change
Expand Up @@ -149,7 +149,7 @@ def castDataIds(self, butler):
try:
idKeyTypeDict = butler.getKeys(datasetType=self.datasetType, level=self.level)
except KeyError as e:
msg = "Cannot get keys for datasetType %s at level %s" % (self.datasetType, self.level)
msg = f"Cannot get keys for datasetType {self.datasetType} at level {self.level}"
raise KeyError(msg) from e

for dataDict in self.idList:
Expand All @@ -161,15 +161,15 @@ def castDataIds(self, butler):
keyType = str

log = lsstLog.Log.getDefaultLogger()
log.warn("Unexpected ID %s; guessing type is \"%s\"" %
(key, 'str' if keyType == str else keyType))
log.warn("Unexpected ID %s; guessing type is \"%s\"",
key, 'str' if keyType == str else keyType)
idKeyTypeDict[key] = keyType

if keyType != str:
try:
castVal = keyType(strVal)
except Exception:
raise TypeError("Cannot cast value %r to %s for ID key %r" % (strVal, keyType, key,))
raise TypeError(f"Cannot cast value {strVal!r} to {keyType} for ID key {key}")
dataDict[key] = castVal

def makeDataRefList(self, namespace):
Expand Down Expand Up @@ -227,7 +227,7 @@ class DataIdArgument:

def __init__(self, name, datasetType, level, doMakeDataRefList=True, ContainerClass=DataIdContainer):
if name.startswith("-"):
raise RuntimeError("Name %s must not start with -" % (name,))
raise RuntimeError(f"Name {name} must not start with -")
self.name = name
self.datasetType = datasetType
self.level = level
Expand Down Expand Up @@ -357,9 +357,9 @@ def addArgument(self, parser, idName):
-----
Also sets the `name` attribute if it is currently `None`.
"""
help = self.help if self.help else "dataset type for %s" % (idName,)
help = self.help if self.help else f"dataset type for {idName}"
if self.name is None:
self.name = "--%s_dstype" % (idName,)
self.name = f"--{idName}_dstype"
requiredDict = dict()
if self.name.startswith("-"):
requiredDict = dict(required=self.default is None)
Expand Down Expand Up @@ -400,7 +400,7 @@ def getDatasetType(self, namespace):
try:
value = getattr(value, key)
except KeyError:
raise RuntimeError("Cannot find config parameter %r" % (self.name,))
raise RuntimeError(f"Cannot find config parameter {self.name!r}")
return value


Expand Down Expand Up @@ -451,13 +451,12 @@ def __init__(self, name, usage="%(prog)s input [options]", **kwargs):
formatter_class=argparse.RawDescriptionHelpFormatter,
**kwargs)
self.add_argument(metavar='input', dest="rawInput",
help="path to input data repository, relative to $%s" % (DEFAULT_INPUT_NAME,))
help=f"path to input data repository, relative to ${DEFAULT_INPUT_NAME}")
self.add_argument("--calib", dest="rawCalib",
help="path to input calibration repository, relative to $%s" %
(DEFAULT_CALIB_NAME,))
help=f"path to input calibration repository, relative to ${DEFAULT_CALIB_NAME}")
self.add_argument("--output", dest="rawOutput",
help="path to output data repository (need not exist), relative to $%s" %
(DEFAULT_OUTPUT_NAME,))
help="path to output data repository (need not exist), "
f"relative to ${DEFAULT_OUTPUT_NAME}")
self.add_argument("--rerun", dest="rawRerun", metavar="[INPUT:]OUTPUT",
help="rerun name: sets OUTPUT to ROOT/rerun/OUTPUT; "
"optionally sets ROOT to ROOT/rerun/INPUT")
Expand Down Expand Up @@ -553,9 +552,9 @@ def add_id_argument(self, name, datasetType, help, level=None, doMakeDataRefList
argName = name.lstrip("-")

if argName in self._dataIdArgDict:
raise RuntimeError("Data ID argument %s already exists" % (name,))
raise RuntimeError(f"Data ID argument {name} already exists")
if argName in set(("camera", "config", "butler", "log", "obsPkg")):
raise RuntimeError("Data ID argument %s is a reserved name" % (name,))
raise RuntimeError(f"Data ID argument {name} is a reserved name")

self.add_argument(name, nargs="*", action=IdValueAction, help=help,
metavar="KEY=VALUE1[^VALUE2[^VALUE3...]")
Expand Down Expand Up @@ -620,20 +619,20 @@ def parse_args(self, config, args=None, log=None, override=None):
if len(args) == 1 and args[0] in ("-h", "--help"):
self.exit()
else:
self.exit("%s: error: Must specify input as first argument" % self.prog)
self.exit(f"{self.prog}: error: Must specify input as first argument")

# Note that --rerun may change namespace.input, but if it does
# we verify that the new input has the same mapper class.
namespace = argparse.Namespace()
namespace.input = _fixPath(DEFAULT_INPUT_NAME, args[0])
if not os.path.isdir(namespace.input):
self.error("Error: input=%r not found" % (namespace.input,))
self.error(f"Error: input={namespace.input!r} not found")

namespace.config = config
namespace.log = log if log is not None else lsstLog.Log.getDefaultLogger()
mapperClass = dafPersist.Butler.getMapperClass(namespace.input)
if mapperClass is None:
self.error("Error: no mapper specified for input repo %r" % (namespace.input,))
self.error(f"Error: no mapper specified for input repo {namespace.input!r}")

namespace.camera = mapperClass.getCameraName()
namespace.obsPkg = mapperClass.getPackageName()
Expand Down Expand Up @@ -696,7 +695,7 @@ def parse_args(self, config, args=None, log=None, override=None):
if "data" in namespace.show:
for dataIdName in self._dataIdArgDict.keys():
for dataRef in getattr(namespace, dataIdName).refList:
print("%s dataRef.dataId = %s" % (dataIdName, dataRef.dataId))
print(f"{dataIdName} dataRef.dataId = {dataRef.dataId}")

if namespace.show and "run" not in namespace.show:
sys.exit(0)
Expand All @@ -706,7 +705,7 @@ def parse_args(self, config, args=None, log=None, override=None):
import debug
assert debug # silence pyflakes
except ImportError:
sys.stderr.write("Warning: no 'debug' module found\n")
print("Warning: no 'debug' module found", file=sys.stderr)
namespace.debug = False

del namespace.loglevel
Expand Down Expand Up @@ -763,7 +762,7 @@ def _parseDirectories(self, namespace):
namespace.input = os.path.realpath(os.path.join(namespace.output, "_parent"))
modifiedInput = True
else:
self.error("Error: invalid argument for --rerun: %s" % namespace.rerun)
self.error(f"Error: invalid argument for --rerun: {namespace.rerun}")
if modifiedInput and dafPersist.Butler.getMapperClass(namespace.input) != mapperClass:
self.error("Error: input directory specified by --rerun must have the same mapper as INPUT")
else:
Expand Down Expand Up @@ -953,9 +952,9 @@ def getTaskDict(config, taskDict=None, baseName=""):
if hasattr(field, "value") and hasattr(field, "target"):
subConfig = field.value
if isinstance(subConfig, pexConfig.Config):
subBaseName = "%s.%s" % (baseName, fieldName) if baseName else fieldName
subBaseName = f"{baseName}.{fieldName}" if baseName else fieldName
try:
taskName = "%s.%s" % (field.target.__module__, field.target.__name__)
taskName = f"{field.target.__module__}.{field.target.__name__}"
except Exception:
taskName = repr(field.target)
taskDict[subBaseName] = taskName
Expand Down Expand Up @@ -1018,8 +1017,8 @@ def __init__(self, pattern):
self._pattern = re.compile(fnmatch.translate(pattern))
else:
if pattern != pattern.lower():
print(u"Matching \"%s\" without regard to case "
"(append :NOIGNORECASE to prevent this)" % (pattern,), file=sys.stdout)
print(f"Matching {pattern!r} without regard to case "
"(append :NOIGNORECASE to prevent this)", file=sys.stdout)
self._pattern = re.compile(fnmatch.translate(pattern), re.IGNORECASE)

def write(self, showStr):
Expand All @@ -1028,7 +1027,7 @@ def write(self, showStr):
# at "=" for string matching
matchStr = showStr.split("\n")[-1].split("=")[0]
if self._pattern.search(matchStr):
print(u"\n" + showStr)
print("\n" + showStr)

fd = FilteredStream(pattern)
else:
Expand All @@ -1054,15 +1053,15 @@ def write(self, showStr):
try:
hconfig = getattr(hconfig, cpt)
except AttributeError:
print("Error: configuration %s has no subconfig %s" %
(".".join(["config"] + cpath[:i]), cpt), file=sys.stderr)
config_path = ".".join(["config"] + cpath[:i])
print(f"Error: configuration {config_path} has no subconfig {cpt}", file=sys.stderr)
error = True

try:
print(pexConfig.history.format(hconfig, cname))
except KeyError:
print("Error: %s has no field %s" % (".".join(["config"] + cpath), cname),
file=sys.stderr)
config_path = ".".join(["config"] + cpath)
print(f"Error: {config_path} has no field {cname}", file=sys.stderr)
error = True

if error:
Expand All @@ -1075,8 +1074,8 @@ def write(self, showStr):
elif showCommand == "tasks":
showTaskHierarchy(config)
else:
print(u"Unknown value for show: %s (choose from '%s')" %
(what, "', '".join("config[=XXX] data history=XXX tasks run".split())), file=sys.stderr)
choices = "', '".join("config[=XXX] data history=XXX tasks run".split())
print(f"Unknown value for show: {what} (choose from {choices!r})", file=sys.stderr)
sys.exit(1)

if exit and "run" not in showOpts:
Expand All @@ -1091,13 +1090,13 @@ def showTaskHierarchy(config):
config : `lsst.pex.config.Config`
Configuration to process.
"""
print(u"Subtasks:")
print("Subtasks:")
taskDict = getTaskDict(config=config)

fieldNameList = sorted(taskDict.keys())
for fieldName in fieldNameList:
taskName = taskDict[fieldName]
print(u"%s: %s" % (fieldName, taskName))
print(f"{fieldName}: {taskName}")


class ConfigValueAction(argparse.Action):
Expand All @@ -1124,22 +1123,22 @@ def __call__(self, parser, namespace, values, option_string):
for nameValue in values:
name, sep, valueStr = nameValue.partition("=")
if not valueStr:
parser.error("%s value %s must be in form name=value" % (option_string, nameValue))
parser.error(f"{option_string} value {nameValue} must be in form name=value")

# see if setting the string value works; if not, try eval
try:
setDottedAttr(namespace.config, name, valueStr)
except AttributeError:
parser.error("no config field: %s" % (name,))
parser.error(f"no config field: {name}")
except Exception:
try:
value = eval(valueStr, {})
except Exception:
parser.error("cannot parse %r as a value for %s" % (valueStr, name))
parser.error(f"cannot parse {valueStr!r} as a value for {name}")
try:
setDottedAttr(namespace.config, name, value)
except Exception as e:
parser.error("cannot set config.%s=%r: %s" % (name, value, e))
parser.error(f"cannot set config.{name}={value!r}: {e}")


class ConfigFileAction(argparse.Action):
Expand Down Expand Up @@ -1167,7 +1166,7 @@ def __call__(self, parser, namespace, values, option_string=None):
try:
namespace.config.load(configfile)
except Exception as e:
parser.error("cannot load config file %r: %s" % (configfile, e))
parser.error(f"cannot load config file {configfile!r}: {e}")


class IdValueAction(argparse.Action):
Expand Down Expand Up @@ -1225,7 +1224,7 @@ def __call__(self, parser, namespace, values, option_string):
for nameValue in values:
name, sep, valueStr = nameValue.partition("=")
if name in idDict:
parser.error("%s appears multiple times in one ID argument: %s" % (name, option_string))
parser.error(f"{name} appears multiple times in one ID argument: {option_string}")
idDict[name] = []
for v in valueStr.split("^"):
mat = re.search(r"^(\d+)\.\.(\d+)(?::(\d+))?$", v)
Expand Down Expand Up @@ -1278,7 +1277,7 @@ def __call__(self, parser, namespace, values, option_string):
if logLevelUpr in permittedLevelSet:
logLevel = getattr(lsstLog.Log, logLevelUpr)
else:
parser.error("loglevel=%r not one of %s" % (levelStr, permittedLevelList))
parser.error(f"loglevel={levelStr!r} not one of {permittedLevelList}")
if component is None:
namespace.log.setLevel(logLevel)
else:
Expand Down
28 changes: 13 additions & 15 deletions python/lsst/pipe/base/cmdLineTask.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,7 @@ def profile(filename, log=None):
profile.disable()
profile.dump_stats(filename)
if log is not None:
log.info("cProfile stats written to %s" % filename)
log.info("cProfile stats written to %s", filename)


class TaskRunner:
Expand Down Expand Up @@ -679,17 +679,17 @@ def writeConfig(self, butler, clobber=False, doBackup=True):
try:
oldConfig = butler.get(configName, immediate=True)
except Exception as exc:
raise type(exc)("Unable to read stored config file %s (%s); consider using --clobber-config" %
(configName, exc))
raise type(exc)(f"Unable to read stored config file {configName} (exc); "
"consider using --clobber-config")

def logConfigMismatch(msg):
self.log.fatal("Comparing configuration: %s", msg)

if not self.config.compare(oldConfig, shortcut=False, output=logConfigMismatch):
raise TaskError(
("Config does not match existing task config %r on disk; tasks configurations " +
"must be consistent within the same output repo (override with --clobber-config)") %
(configName,))
f"Config does not match existing task config {configName!r} on disk; "
"tasks configurations must be consistent within the same output repo "
"(override with --clobber-config)")
else:
butler.put(self.config, configName)

Expand Down Expand Up @@ -722,9 +722,9 @@ def writeSchemas(self, butler, clobber=False, doBackup=True):
oldSchema = butler.get(schemaDataset, immediate=True).getSchema()
if not oldSchema.compare(catalog.getSchema(), afwTable.Schema.IDENTICAL):
raise TaskError(
("New schema does not match schema %r on disk; schemas must be " +
" consistent within the same output repo (override with --clobber-config)") %
(dataset,))
f"New schema does not match schema {dataset!r} on disk; "
"schemas must be consistent within the same output repo "
"(override with --clobber-config)")
else:
butler.put(catalog, schemaDataset)

Expand Down Expand Up @@ -779,18 +779,16 @@ def writePackageVersions(self, butler, clobber=False, doBackup=True, dataset="pa
try:
old = butler.get(dataset, immediate=True)
except Exception as exc:
raise type(exc)("Unable to read stored version dataset %s (%s); "
"consider using --clobber-versions or --no-versions" %
(dataset, exc))
raise type(exc)(f"Unable to read stored version dataset {dataset} ({exc}); "
"consider using --clobber-versions or --no-versions")
# Note that because we can only detect python modules that have been imported, the stored
# list of products may be more or less complete than what we have now. What's important is
# that the products that are in common have the same version.
diff = packages.difference(old)
if diff:
versions_str = "; ".join(f"{pkg}: {diff[pkg][1]} vs {diff[pkg][0]}" for pkg in diff)
raise TaskError(
"Version mismatch (" +
"; ".join("%s: %s vs %s" % (pkg, diff[pkg][1], diff[pkg][0]) for pkg in diff) +
"); consider using --clobber-versions or --no-versions")
f"Version mismatch ({versions_str}); consider using --clobber-versions or --no-versions")
# Update the old set of packages in case we have more packages that haven't been persisted.
extra = packages.extra(old)
if extra:
Expand Down
8 changes: 4 additions & 4 deletions python/lsst/pipe/base/struct.py
Original file line number Diff line number Diff line change
Expand Up @@ -78,9 +78,9 @@ def __safeAdd(self, name, val):
Raised if name already exists or starts with ``__`` (two underscores).
"""
if hasattr(self, name):
raise RuntimeError("Item %s already exists" % (name,))
raise RuntimeError(f"Item {name!r} already exists")
if name.startswith("__"):
raise RuntimeError("Item name %r invalid; must not begin with __" % (name,))
raise RuntimeError(f"Item name {name!r} invalid; must not begin with __")
setattr(self, name, val)

def getDict(self):
Expand Down Expand Up @@ -137,5 +137,5 @@ def __len__(self):
return len(self.__dict__)

def __repr__(self):
itemList = ["%s=%r" % (name, val) for name, val in self.getDict().items()]
return "%s(%s)" % (self.__class__.__name__, "; ".join(itemList))
itemsStr = "; ".join(f"{name}={val}" for name, val in self.getDict().items())
return f"{self.__class__.__name__}({itemsStr})"