Skip to content

Commit

Permalink
maint: RF101 Bugbear lint checks must be selected
Browse files Browse the repository at this point in the history
  • Loading branch information
nvictus committed Jun 20, 2024
1 parent 1cfd8cd commit 19470b4
Show file tree
Hide file tree
Showing 10 changed files with 30 additions and 22 deletions.
10 changes: 8 additions & 2 deletions bioframe/core/arrops.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@
import numpy as np
import pandas as pd

INT64_MAX = np.iinfo(np.int64).max


def natsort_key(s, _NS_REGEX=re.compile(r"(\d+)", re.U)):
return tuple([int(x) if x.isdigit() else x for x in _NS_REGEX.split(s) if x])
Expand Down Expand Up @@ -210,6 +212,7 @@ def _overlap_intervals_legacy(starts1, ends1, starts2, ends2, closed=False, sort
"One of the inputs is provided as pandas.Series and its index "
"will be ignored.",
SyntaxWarning,
stacklevel=2,
)

starts1 = np.asarray(starts1)
Expand Down Expand Up @@ -308,6 +311,7 @@ def overlap_intervals(starts1, ends1, starts2, ends2, closed=False, sort=False):
"One of the inputs is provided as pandas.Series and its index "
"will be ignored.",
SyntaxWarning,
stacklevel=2,
)

starts1 = np.asarray(starts1)
Expand Down Expand Up @@ -442,6 +446,7 @@ def merge_intervals(starts, ends, min_dist=0):
"One of the inputs is provided as pandas.Series and its index "
"will be ignored.",
SyntaxWarning,
stacklevel=2,
)

starts = np.asarray(starts)
Expand Down Expand Up @@ -473,7 +478,7 @@ def merge_intervals(starts, ends, min_dist=0):
def complement_intervals(
starts,
ends,
bounds=(0, np.iinfo(np.int64).max),
bounds=(0, INT64_MAX),
):
_, merged_starts, merged_ends = merge_intervals(starts, ends, min_dist=0)

Expand Down Expand Up @@ -534,6 +539,7 @@ def _closest_intervals_nooverlap(
"One of the inputs is provided as pandas.Series "
"and its index will be ignored.",
SyntaxWarning,
stacklevel=2,
)

starts1 = np.asarray(starts1)
Expand Down Expand Up @@ -776,7 +782,7 @@ def stack_intervals(starts, ends):

occupancy = np.zeros(2, dtype=bool)
levels = -1 * np.ones(n, dtype=np.int64)
for border, border_id in zip(borders, border_ids):
for border_id in border_ids:
interval_id = np.abs(border_id) - 1
if border_id > 0:
if occupancy.sum() == occupancy.shape[0]:
Expand Down
4 changes: 2 additions & 2 deletions bioframe/core/checks.py
Original file line number Diff line number Diff line change
Expand Up @@ -310,9 +310,9 @@ def is_contained(
# ek1 = end is the default value
# sk1 = start is the default value
assert (df_view_assigned[sk1] >= df_view_assigned[sk2 + "_"]).all()
except AssertionError:
except AssertionError as e:
if raise_errors:
raise AssertionError("df not contained in view_df")
raise AssertionError("df not contained in view_df") from e
else:
return False
return True
Expand Down
4 changes: 2 additions & 2 deletions bioframe/core/construction.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,8 +165,8 @@ def from_any(regions, fill_null=False, name_col="name", cols=None):
else:
ends.append(out_df[ek1].values[i])
out_df[ek1] = ends
except Exception:
raise ValueError("could not fill ends with provided chromsizes")
except Exception as e:
raise ValueError("could not fill ends with provided chromsizes") from e

return out_df

Expand Down
4 changes: 2 additions & 2 deletions bioframe/core/stringops.py
Original file line number Diff line number Diff line change
Expand Up @@ -231,8 +231,8 @@ def parse_region(
if chromsizes is not None:
try:
clen = chromsizes[chrom]
except KeyError:
raise ValueError(f"Unknown sequence label: {chrom}")
except KeyError as e:
raise ValueError(f"Unknown sequence label: {chrom}") from e
if end is None:
end = clen

Expand Down
6 changes: 3 additions & 3 deletions bioframe/extras.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def digest(fasta_records, enzyme):
import Bio.Restriction as biorst
import Bio.Seq as bioseq
except ImportError:
raise ImportError("Biopython is required to use digest")
raise ImportError("Biopython is required to use digest") from None

# http://biopython.org/DIST/docs/cookbook/Restriction.html#mozTocId447698
if not isinstance(fasta_records, dict):
Expand All @@ -196,8 +196,8 @@ def digest(fasta_records, enzyme):
chroms = fasta_records.keys()
try:
cut_finder = getattr(biorst, enzyme).search
except AttributeError:
raise ValueError(f"Unknown enzyme name: {enzyme}")
except AttributeError as e:
raise ValueError(f"Unknown enzyme name: {enzyme}") from e

def _each(chrom):
seq = bioseq.Seq(str(fasta_records[chrom][:]))
Expand Down
3 changes: 2 additions & 1 deletion bioframe/io/bed.py
Original file line number Diff line number Diff line change
Expand Up @@ -701,7 +701,8 @@ def to_bed(
warnings.warn(
f"Standard column {col} contains null values. "
"These will be replaced with the uninformative value "
f"{BED_FIELD_FILLVALUES[col]}."
f"{BED_FIELD_FILLVALUES[col]}.",
stacklevel=2,
)
bed[col] = df[col].fillna(BED_FIELD_FILLVALUES[col])
else:
Expand Down
14 changes: 7 additions & 7 deletions bioframe/io/fileops.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ def read_table(filepath_or, schema=None, schema_is_strict=False, **kwargs):
kwargs.setdefault("names", SCHEMAS[schema])
except (KeyError, TypeError):
if isinstance(schema, str):
raise ValueError(f"TSV schema not found: '{schema}'")
raise ValueError(f"TSV schema not found: '{schema}'") from None
kwargs.setdefault("names", schema)
df = pd.read_csv(filepath_or, **kwargs)
if schema_is_strict:
Expand Down Expand Up @@ -167,7 +167,7 @@ def read_tabix(fp, chrom=None, start=None, end=None):
try:
import pysam
except ImportError:
raise ImportError("pysam is required to use `read_tabix`")
raise ImportError("pysam is required to use `read_tabix`") from None

with closing(pysam.TabixFile(fp)) as f:
names = list(f.header) or None
Expand Down Expand Up @@ -242,7 +242,7 @@ def read_alignments(fp, chrom=None, start=None, end=None):
try:
import pysam
except ImportError:
raise ImportError("pysam is required to use `read_alignments`")
raise ImportError("pysam is required to use `read_alignments`") from None

ext = os.path.splitext(fp)[1]
if ext == '.sam':
Expand Down Expand Up @@ -343,7 +343,7 @@ def load_fasta(filepath_or, engine="pysam", **kwargs):
try:
import pysam
except ImportError:
raise ImportError("pysam is required to use engine='pysam'")
raise ImportError("pysam is required to use engine='pysam'") from None

if is_multifile:
for onefile in filepath_or:
Expand All @@ -359,7 +359,7 @@ def load_fasta(filepath_or, engine="pysam", **kwargs):
try:
import pyfaidx
except ImportError:
raise ImportError("pyfaidx is required to use engine='pyfaidx'")
raise ImportError("pyfaidx is required to use engine='pyfaidx'") from None

if is_multifile:
for onefile in filepath_or:
Expand Down Expand Up @@ -518,7 +518,7 @@ def to_bigwig(df, chromsizes, outpath, value_field=None, path_to_binary=None):
"Pass it as 'path_to_binary' parameter to bioframe.to_bigwig or "
"install it with, for example, conda install -y -c bioconda "
"ucsc-bedgraphtobigwig "
)
) from None
elif path_to_binary.endswith("bedGraphToBigWig"):
if not os.path.isfile(path_to_binary) and os.access(path_to_binary, os.X_OK):
raise ValueError(
Expand Down Expand Up @@ -599,7 +599,7 @@ def to_bigbed(df, chromsizes, outpath, schema="bed6", path_to_binary=None):
"Pass it as 'path_to_binary' parameter to bioframe.to_bigbed or "
"install it with, for example, conda install -y -c bioconda "
"ucsc-bedtobigbed "
)
) from None
elif path_to_binary.endswith("bedToBigBed"):
if not os.path.isfile(path_to_binary) and os.access(path_to_binary, os.X_OK):
raise ValueError(
Expand Down
2 changes: 1 addition & 1 deletion bioframe/io/resources.py
Original file line number Diff line number Diff line change
Expand Up @@ -222,7 +222,7 @@ def fetch_centromeres(db: str, provider: str = "local") -> pd.DataFrame:
("centromeres", client.fetch_centromeres),
]

for schema, fetcher in fetchers:
for schema, fetcher in fetchers: # noqa: B007
try:
df = fetcher()
break
Expand Down
4 changes: 2 additions & 2 deletions bioframe/sandbox/parquet_io.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def to_parquet(
import pyarrow as pa
import pyarrow.parquet
except ImportError:
raise ImportError("Saving to parquet requires the `pyarrow` package")
raise ImportError("Saving to parquet requires the `pyarrow` package") from None

if isinstance(pieces, pd.DataFrame):
pieces = (pieces,)
Expand Down Expand Up @@ -101,7 +101,7 @@ def read_parquet(filepath, columns=None, iterator=False, **kwargs):
except ImportError:
raise ImportError(
"Iterating over Parquet data requires the `pyarrow` package."
)
) from None

class ParquetFileIterator(ParquetFile):
def __iter__(self):
Expand Down
1 change: 1 addition & 0 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -80,6 +80,7 @@ exclude = [

[tool.ruff.lint]
extend-select = [
"B", # bugbear
# "C", # mccabe complexity
# "D", # pydocstyle
"E", # style errors
Expand Down

0 comments on commit 19470b4

Please sign in to comment.