Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

DM-28480: Update for new flake8 #72

Merged
merged 4 commits into from
Jan 27, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
8 changes: 5 additions & 3 deletions .github/workflows/lint.yaml
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
name: lint

on:
- push
- pull_request
push:
branches:
- master
pull_request:

jobs:
lint:
Expand All @@ -13,7 +15,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v2
with:
python-version: 3.7
python-version: 3.8

- name: Install
run: pip install -r <(curl https://raw.githubusercontent.com/lsst/linting/master/requirements.txt)
Expand Down
52 changes: 26 additions & 26 deletions python/lsst/cp/pipe/makeBrighterFatterKernel.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
)
isrDesirableSteps = pexConfig.ListField(
dtype=str,
doc="isr operations that it is advisable to perform, but are not mission-critical." +
doc="isr operations that it is advisable to perform, but are not mission-critical."
" WARNs are logged for any of these found to be False.",
default=['doBias', 'doDark', 'doCrosstalk', 'doDefect', 'doLinearize']
)
Expand Down Expand Up @@ -142,7 +142,7 @@ class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
)
xcorrCheckRejectLevel = pexConfig.Field(
dtype=float,
doc="Sanity check level for the sum of the input cross-correlations. Arrays which " +
doc="Sanity check level for the sum of the input cross-correlations. Arrays which "
"sum to greater than this are discarded before the clipped mean is calculated.",
default=2.0
)
Expand All @@ -158,7 +158,7 @@ class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
)
nSigmaClipKernelGen = pexConfig.Field(
dtype=float,
doc="Number of sigma to clip to during pixel-wise clipping when generating the kernel. See " +
doc="Number of sigma to clip to during pixel-wise clipping when generating the kernel. See "
"the generateKernel docstring for more info.",
default=4
)
Expand All @@ -184,7 +184,7 @@ class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
)
biasCorr = pexConfig.Field(
dtype=float,
doc="An empirically determined correction factor, used to correct for the sigma-clipping of" +
doc="An empirically determined correction factor, used to correct for the sigma-clipping of"
" a non-Gaussian distribution. Post DM-15277, code will exist here to calculate appropriate values",
default=0.9241
)
Expand All @@ -195,7 +195,7 @@ class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
)
fixPtcThroughOrigin = pexConfig.Field(
dtype=bool,
doc="Constrain the fit of the photon transfer curve to go through the origin when measuring" +
doc="Constrain the fit of the photon transfer curve to go through the origin when measuring"
"the gain?",
default=True
)
Expand All @@ -210,13 +210,13 @@ class MakeBrighterFatterKernelTaskConfig(pexConfig.Config):
)
ignoreAmpsForAveraging = pexConfig.ListField(
dtype=str,
doc="List of amp names to ignore when averaging the amplifier kernels into the detector" +
doc="List of amp names to ignore when averaging the amplifier kernels into the detector"
" kernel. Only relevant for level = AMP",
default=[]
)
backgroundWarnLevel = pexConfig.Field(
dtype=float,
doc="Log warnings if the mean of the fitted background is found to be above this level after " +
doc="Log warnings if the mean of the fitted background is found to be above this level after "
"differencing image pair.",
default=0.1
)
Expand Down Expand Up @@ -435,22 +435,22 @@ def validateIsrConfig(self):

for configParam in self.config.isrMandatorySteps:
if configDict[configParam] is False:
raise RuntimeError('Must set config.isr.%s to True '
'for brighter-fatter kernel calculation' % configParam)
raise RuntimeError(f'Must set config.isr.{configParam} to True '
'for brighter-fatter kernel calculation')

for configParam in self.config.isrForbiddenSteps:
if configDict[configParam] is True:
raise RuntimeError('Must set config.isr.%s to False '
'for brighter-fatter kernel calculation' % configParam)
raise RuntimeError(f'Must set config.isr.{configParam} to False '
'for brighter-fatter kernel calculation')

for configParam in self.config.isrDesirableSteps:
if configParam not in configDict:
self.log.info('Failed to find key %s in the isr config dict. You probably want ' +
'to set the equivalent for your obs_package to True.' % configParam)
self.log.info('Failed to find key %s in the isr config dict. You probably want '
'to set the equivalent for your obs_package to True.', configParam)
continue
if configDict[configParam] is False:
self.log.warn('Found config.isr.%s set to False for brighter-fatter kernel calculation. '
'It is probably desirable to have this set to True' % configParam)
'It is probably desirable to have this set to True', configParam)

# subtask settings
if not self.config.isr.assembleCcd.doTrim:
Expand Down Expand Up @@ -1239,8 +1239,8 @@ def _iterativeRegression(self, x, y, fixThroughOrigin=False, nSigmaClip=None, ma
res = (y - slope * x) / x
resMean = afwMath.makeStatistics(res, afwMath.MEANCLIP, sctrl).getValue()
resStd = np.sqrt(afwMath.makeStatistics(res, afwMath.VARIANCECLIP, sctrl).getValue())
index = np.where((res > (resMean + nSigmaClip*resStd)) |
(res < (resMean - nSigmaClip*resStd)))
index = np.where((res > (resMean + nSigmaClip*resStd))
| (res < (resMean - nSigmaClip*resStd)))
self.log.debug("%.3f %.3f %.3f %.3f" % (resMean, resStd, np.max(res), nSigmaClip))
if np.shape(np.where(index))[1] == 0 or (nIter >= maxIter): # run out of points or iters
break
Expand Down Expand Up @@ -1439,8 +1439,8 @@ def successiveOverRelax(self, source, maxIter=None, eLevel=None):
# Calculate the initial error
for i in range(1, func.shape[0] - 1):
for j in range(1, func.shape[1] - 1):
resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
resid[i, j] = (func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
+ func[i + 1, j] - 4*func[i, j] - source[i - 1, j - 1])
inError = np.sum(np.abs(resid))

# Iterate until convergence
Expand All @@ -1454,24 +1454,24 @@ def successiveOverRelax(self, source, maxIter=None, eLevel=None):
if nIter%2 == 0:
for i in range(1, func.shape[0] - 1, 2):
for j in range(1, func.shape[1] - 1, 2):
resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j] +
func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
resid[i, j] = float(func[i, j-1] + func[i, j + 1] + func[i - 1, j]
+ func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
func[i, j] += omega*resid[i, j]*.25
for i in range(2, func.shape[0] - 1, 2):
for j in range(2, func.shape[1] - 1, 2):
resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
+ func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
func[i, j] += omega*resid[i, j]*.25
else:
for i in range(1, func.shape[0] - 1, 2):
for j in range(2, func.shape[1] - 1, 2):
resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
+ func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
func[i, j] += omega*resid[i, j]*.25
for i in range(2, func.shape[0] - 1, 2):
for j in range(1, func.shape[1] - 1, 2):
resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j] +
func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
resid[i, j] = float(func[i, j - 1] + func[i, j + 1] + func[i - 1, j]
+ func[i + 1, j] - 4.0*func[i, j] - dx*dx*source[i - 1, j - 1])
func[i, j] += omega*resid[i, j]*.25
outError = np.sum(np.abs(resid))
if outError < inError*eLevel:
Expand Down
10 changes: 5 additions & 5 deletions python/lsst/cp/pipe/measureCrosstalk.py
Original file line number Diff line number Diff line change
Expand Up @@ -220,9 +220,9 @@ def run(self, inputExp, sourceExps=[]):
sourceAmpName = sourceAmp.getName()
sourceAmpImage = sourceIm[sourceAmp.getBBox()]
sourceMask = sourceAmpImage.mask.array
select = ((sourceMask & detected > 0) &
(sourceMask & bad == 0) &
np.isfinite(sourceAmpImage.image.array))
select = ((sourceMask & detected > 0)
& (sourceMask & bad == 0)
& np.isfinite(sourceAmpImage.image.array))
count = np.sum(select)
self.log.debug(" Source amplifier: %s", sourceAmpName)

Expand Down Expand Up @@ -606,8 +606,8 @@ def measureCrosstalkCoefficients(self, ratios, rejIter, rejSigma):
else:
correctionFactor = self.sigmaClipCorrection(rejSigma)
calib.coeffErr[ii][jj] = np.std(values) * correctionFactor
calib.coeffValid[ii][jj] = (np.abs(calib.coeffs[ii][jj]) >
calib.coeffErr[ii][jj] / np.sqrt(calib.coeffNum[ii][jj]))
calib.coeffValid[ii][jj] = (np.abs(calib.coeffs[ii][jj])
> calib.coeffErr[ii][jj] / np.sqrt(calib.coeffNum[ii][jj]))

if calib.coeffNum[ii][jj] > 1:
self.debugRatios('measure', ratios, ordering[ii], ordering[jj],
Expand Down
4 changes: 2 additions & 2 deletions python/lsst/cp/pipe/ptc/astierCovPtcFit.py
Original file line number Diff line number Diff line change
Expand Up @@ -399,8 +399,8 @@ def evalCovModel(self, mu=None):
# assumes that mu is 1d
bigMu = mu[:, np.newaxis, np.newaxis]*gain
# c(=a*b in Astier+19) also has a contribution to the last term, that is absent for now.
covModel = (bigMu/(gain*gain)*(a1*bigMu+2./3.*(bigMu*bigMu)*(a2 + c1) +
(1./3.*a3 + 5./6.*ac)*(bigMu*bigMu*bigMu)) + noise[np.newaxis, :, :]/gain**2)
covModel = (bigMu/(gain*gain)*(a1*bigMu+2./3.*(bigMu*bigMu)*(a2 + c1)
+ (1./3.*a3 + 5./6.*ac)*(bigMu*bigMu*bigMu)) + noise[np.newaxis, :, :]/gain**2)
# add the Poisson term, and the read out noise (variance)
covModel[:, 0, 0] += mu/gain

Expand Down
4 changes: 2 additions & 2 deletions python/lsst/cp/pipe/ptc/cpSolvePtcTask.py
Original file line number Diff line number Diff line change
Expand Up @@ -524,7 +524,7 @@ def fitPtc(self, dataset):
if dataset.ptcFitType:
ptcFitType = dataset.ptcFitType
else:
raise RuntimeError(f"ptcFitType is None of empty in PTC dataset.")
raise RuntimeError("ptcFitType is None of empty in PTC dataset.")
matrixSide = self.config.maximumRangeCovariancesAstier
nanMatrix = np.empty((matrixSide, matrixSide))
nanMatrix[:] = np.nan
Expand Down Expand Up @@ -615,7 +615,7 @@ def errFunc(p, x, y):
varVecFinal = varVecOriginal[mask]

if Counter(mask)[False] > 0:
self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:" +
self.log.info((f"Number of points discarded in PTC of amplifier {ampName}:"
f" {Counter(mask)[False]} out of {len(meanVecOriginal)}"))

if (len(meanVecFinal) < len(parsIniPtc)):
Expand Down
18 changes: 9 additions & 9 deletions python/lsst/cp/pipe/ptc/plotPtc.py
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ def run(self, filenameFull, datasetPtc, linearizer=None, log=None):
elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
self._plotStandardPtc(datasetPtc, ptcFitType, pdfPages)
else:
raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" +
raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
"Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.")
if linearizer:
self._plotLinearizer(datasetPtc, linearizer, pdfPages)
Expand Down Expand Up @@ -306,11 +306,11 @@ def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, cov
chi2FullModelNoBVar = calculateWeightedReducedChi2(varVecFinalNoB, varVecModelFinalNoB,
varWeightsFinalNoB, len(meanVecFinalNoB),
3)
stringLegend = (f"Gain: {gain:.4} e/ADU \n" +
f"Noise: {noise:.4} e \n" +
r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] +
"\n" + r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0] +
f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
stringLegend = (f"Gain: {gain:.4} e/ADU \n"
f"Noise: {noise:.4} e \n"
+ r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] + "\n"
+ r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0]
+ f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
minMeanVecFinal = np.nanmin(meanVecFinal)
maxMeanVecFinal = np.nanmax(meanVecFinal)
deltaXlim = maxMeanVecFinal - minMeanVecFinal
Expand Down Expand Up @@ -390,10 +390,10 @@ def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, cov
pdfPages.savefig(f2)
fResCov00.suptitle("Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize)
pdfPages.savefig(fResCov00)
fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n" +
fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n"
" Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
pdfPages.savefig(fCov01)
fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n" +
fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n"
"Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
pdfPages.savefig(fCov10)

Expand Down Expand Up @@ -1039,7 +1039,7 @@ def _plotLinearizer(self, dataset, linearizer, pdfPages):
f.suptitle("Linearity \n Fit: Polynomial (degree: %g)"
% (len(pars)-1),
fontsize=supTitleFontSize)
f2.suptitle(r"Fractional NL residual" + "\n" +
f2.suptitle(r"Fractional NL residual" "\n"
r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$",
fontsize=supTitleFontSize)
pdfPages.savefig(f)
Expand Down
18 changes: 9 additions & 9 deletions python/lsst/cp/pipe/ptc/plotPtcGen2.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def run(self, filenameFull, datasetPtc, linearizer=None, log=None):
elif ptcFitType in ["EXPAPPROXIMATION", "POLYNOMIAL"]:
self._plotStandardPtc(datasetPtc, ptcFitType, pdfPages)
else:
raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n" +
raise RuntimeError(f"The input dataset had an invalid dataset.ptcFitType: {ptcFitType}. \n"
"Options: 'FULLCOVARIANCE', EXPAPPROXIMATION, or 'POLYNOMIAL'.")
if linearizer:
self._plotLinearizer(datasetPtc, linearizer, pdfPages)
Expand Down Expand Up @@ -333,11 +333,11 @@ def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, cov
chi2FullModelNoBVar = calculateWeightedReducedChi2(varVecFinalNoB, varVecModelFinalNoB,
varWeightsFinalNoB, len(meanVecFinalNoB),
3)
stringLegend = (f"Gain: {gain:.4} e/ADU \n" +
f"Noise: {noise:.4} e \n" +
r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] +
"\n" + r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0] +
f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
stringLegend = (f"Gain: {gain:.4} e/ADU \n"
f"Noise: {noise:.4} e \n"
+ r"$a_{00}$: %.3e 1/e"%aCoeffs[0, 0] + "\n"
+ r"$b_{00}$: %.3e 1/e"%bCoeffs[0, 0]
+ f"\nLast in fit: {meanVecFinal[-1]:.7} ADU ")
minMeanVecFinal = np.nanmin(meanVecFinal)
maxMeanVecFinal = np.nanmax(meanVecFinal)
deltaXlim = maxMeanVecFinal - minMeanVecFinal
Expand Down Expand Up @@ -417,10 +417,10 @@ def plotCovariances(mu, covs, covsModel, covsWeights, covsNoB, covsModelNoB, cov
pdfPages.savefig(f2)
fResCov00.suptitle("Residuals (data-model) for Cov00 (Var)", fontsize=supTitleFontSize)
pdfPages.savefig(fResCov00)
fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n" +
fCov01.suptitle("Cov01 as in Astier+19 (nearest parallel neighbor covariance) \n"
" Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
pdfPages.savefig(fCov01)
fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n" +
fCov10.suptitle("Cov10 as in Astier+19 (nearest serial neighbor covariance) \n"
"Fit: Eq. 20, Astier+19", fontsize=supTitleFontSize)
pdfPages.savefig(fCov10)

Expand Down Expand Up @@ -1066,7 +1066,7 @@ def _plotLinearizer(self, dataset, linearizer, pdfPages):
f.suptitle("Linearity \n Fit: Polynomial (degree: %g)"
% (len(pars)-1),
fontsize=supTitleFontSize)
f2.suptitle(r"Fractional NL residual" + "\n" +
f2.suptitle(r"Fractional NL residual" "\n"
r"$100\times \frac{(k_0 + k_1*Time-\mu)}{k_0+k_1*Time}$",
fontsize=supTitleFontSize)
pdfPages.savefig(f)
Expand Down