Skip to content

Commit

Permalink
MNT: Test on 3.11 and 3.12 (#999)
Browse files Browse the repository at this point in the history
* MNT: Test on 3.11 and 3.12

* [RUN] pyupgrade --py38-plus **/*.py

* MNT: Reflect minimum Python support in black config
  • Loading branch information
effigies committed Nov 20, 2023
1 parent a422dc3 commit 1c3f93e
Show file tree
Hide file tree
Showing 20 changed files with 136 additions and 64 deletions.
72 changes: 72 additions & 0 deletions .circleci/config.yml
Original file line number Diff line number Diff line change
Expand Up @@ -117,6 +117,74 @@ jobs:
paths:
- src/coverage/.coverage.py310

unittest_311:
docker:
- image: continuumio/miniconda3
working_directory: /tmp/src/tedana
steps:
- checkout
- restore_cache:
key: conda-py311-v1-{{ checksum "pyproject.toml" }}
- run:
name: Generate environment
command: |
apt-get update
apt-get install -yqq make
if [ ! -d /opt/conda/envs/tedana_py311 ]; then
conda create -yq -n tedana_py311 python=3.11
source activate tedana_py311
pip install .[tests]
fi
- run:
name: Running unit tests
command: |
source activate tedana_py311
make unittest
mkdir /tmp/src/coverage
mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py311
- save_cache:
key: conda-py311-v1-{{ checksum "pyproject.toml" }}
paths:
- /opt/conda/envs/tedana_py311
- persist_to_workspace:
root: /tmp
paths:
- src/coverage/.coverage.py311

unittest_312:
docker:
- image: continuumio/miniconda3
working_directory: /tmp/src/tedana
steps:
- checkout
- restore_cache:
key: conda-py312-v1-{{ checksum "pyproject.toml" }}
- run:
name: Generate environment
command: |
apt-get update
apt-get install -yqq make
if [ ! -d /opt/conda/envs/tedana_py312 ]; then
conda create -yq -n tedana_py312 python=3.12
source activate tedana_py312
pip install .[tests]
fi
- run:
name: Running unit tests
command: |
source activate tedana_py312
make unittest
mkdir /tmp/src/coverage
mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py312
- save_cache:
key: conda-py312-v1-{{ checksum "pyproject.toml" }}
paths:
- /opt/conda/envs/tedana_py312
- persist_to_workspace:
root: /tmp
paths:
- src/coverage/.coverage.py312

style_check:
docker:
- image: continuumio/miniconda3
Expand Down Expand Up @@ -310,11 +378,15 @@ workflows:
- makeenv_38
- unittest_39
- unittest_310
- unittest_311
- unittest_312
- merge_coverage:
requires:
- unittest_38
- unittest_39
- unittest_310
- unittest_311
- unittest_312
- three-echo
- four-echo
- five-echo
Expand Down
1 change: 0 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,4 @@
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# tedana documentation build configuration file, created by
# sphinx-quickstart
Expand Down
4 changes: 3 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ classifiers = [
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
"Programming Language :: Python :: 3.12",
]
license = {file = "LICENSE"}
requires-python = ">=3.8"
Expand Down Expand Up @@ -101,7 +103,7 @@ version-file = "tedana/_version.py"

[tool.black]
line-length = 99
target-version = ['py37']
target-version = ['py38']
include = '\.pyi?$'
exclude = '''
Expand Down
1 change: 0 additions & 1 deletion tedana/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,3 @@
# -*- coding: utf-8 -*-
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
"""Tedana: A Python package for TE-dependent analysis of multi-echo data."""
Expand Down
2 changes: 1 addition & 1 deletion tedana/bibtex.py
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ def get_description_references(description):
A string containing BibTeX entries, limited only to the citations in the description.
"""
bibtex_file = op.join(get_resource_path(), "references.bib")
with open(bibtex_file, "r") as fo:
with open(bibtex_file) as fo:
bibtex_string = fo.read()

braces_idx = find_braces(bibtex_string)
Expand Down
6 changes: 3 additions & 3 deletions tedana/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ def default(self, obj):
if isinstance(obj, set):
return list(obj)

return super(CustomEncoder, self).default(obj)
return super().default(obj)


class OutputGenerator:
Expand Down Expand Up @@ -454,7 +454,7 @@ def load_json(path: str) -> dict:
FileNotFoundError if the file does not exist
IsADirectoryError if the path is a directory instead of a file
"""
with open(path, "r") as f:
with open(path) as f:
try:
data = json.load(f)
except json.decoder.JSONDecodeError:
Expand Down Expand Up @@ -943,7 +943,7 @@ def fname_to_component_list(fname: str) -> List[int]:
else:
raise ValueError(f"Cannot determine a components column in file {fname}")

with open(fname, "r") as fp:
with open(fname) as fp:
contents = fp.read()
return str_to_component_list(contents)

Expand Down
10 changes: 5 additions & 5 deletions tedana/reporting/html_report.py
Original file line number Diff line number Diff line change
Expand Up @@ -40,7 +40,7 @@ def _generate_buttons(out_dir, io_generator):

buttons_template_name = "report_carpet_buttons_template.html"
buttons_template_path = resource_path.joinpath(buttons_template_name)
with open(str(buttons_template_path), "r") as buttons_file:
with open(str(buttons_template_path)) as buttons_file:
buttons_tpl = Template(buttons_file.read())

buttons_html = buttons_tpl.substitute(
Expand Down Expand Up @@ -87,7 +87,7 @@ def _update_template_bokeh(bokeh_id, info_table, about, prefix, references, boke

body_template_name = "report_body_template.html"
body_template_path = resource_path.joinpath(body_template_name)
with open(str(body_template_path), "r") as body_file:
with open(str(body_template_path)) as body_file:
body_tpl = Template(body_file.read())
body = body_tpl.substitute(
content=bokeh_id,
Expand All @@ -114,7 +114,7 @@ def _save_as_html(body):
resource_path = Path(__file__).resolve().parent.joinpath("data", "html")
head_template_name = "report_head_template.html"
head_template_path = resource_path.joinpath(head_template_name)
with open(str(head_template_path), "r") as head_file:
with open(str(head_template_path)) as head_file:
head_tpl = Template(head_file.read())

html = head_tpl.substitute(version=__version__, bokehversion=bokehversion, body=body)
Expand All @@ -127,7 +127,7 @@ def _generate_info_table(info_dict):

info_template_name = "report_info_table_template.html"
info_template_path = resource_path.joinpath(info_template_name)
with open(str(info_template_path), "r") as info_file:
with open(str(info_template_path)) as info_file:
info_tpl = Template(info_file.read())

info_dict = info_dict["GeneratedBy"][0]
Expand Down Expand Up @@ -273,7 +273,7 @@ def get_elbow_val(elbow_prefix):
with open(opj(io_generator.out_dir, f"{io_generator.prefix}report.txt"), "r+") as f:
about = f.read()

with open(opj(io_generator.out_dir, f"{io_generator.prefix}references.bib"), "r") as f:
with open(opj(io_generator.out_dir, f"{io_generator.prefix}references.bib")) as f:
references = f.read()

# Read info table
Expand Down
10 changes: 5 additions & 5 deletions tedana/selection/component_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -106,7 +106,7 @@ def validate_tree(tree):
raise TreeError("\n" + f"Decision tree missing required fields: {missing_keys}")

# Warn if unused fields exist
unused_keys = set(tree.keys()) - set(tree_expected_keys) - set(["used_metrics"])
unused_keys = set(tree.keys()) - set(tree_expected_keys) - {"used_metrics"}
# Make sure some fields don't trigger a warning; hacky, sorry
ok_to_not_use = (
"reconstruct_from",
Expand All @@ -133,7 +133,7 @@ def validate_tree(tree):
continue

# Get a functions parameters and compare to parameters defined in the tree
pos = set([p for p, i in sig.parameters.items() if i.default is inspect.Parameter.empty])
pos = {p for p, i in sig.parameters.items() if i.default is inspect.Parameter.empty}
kwargs = set(sig.parameters.keys()) - pos

missing_pos = pos - set(node.get("parameters").keys()) - defaults
Expand Down Expand Up @@ -194,11 +194,11 @@ def validate_tree(tree):
if node.get("kwargs") is not None:
tagset = set()
if "tag_if_true" in node.get("kwargs").keys():
tagset.update(set([node["kwargs"]["tag_if_true"]]))
tagset.update({node["kwargs"]["tag_if_true"]})
if "tag_if_false" in node.get("kwargs").keys():
tagset.update(set([node["kwargs"]["tag_if_false"]]))
tagset.update({node["kwargs"]["tag_if_false"]})
if "tag" in node.get("kwargs").keys():
tagset.update(set([node["kwargs"]["tag"]]))
tagset.update({node["kwargs"]["tag"]})
undefined_classification_tags = tagset.difference(set(tree.get("classification_tags")))
if undefined_classification_tags:
LGR.warning(
Expand Down
14 changes: 7 additions & 7 deletions tedana/selection/selection_nodes.py
Original file line number Diff line number Diff line change
Expand Up @@ -523,7 +523,7 @@ def dec_variance_lessthan_thresholds(
"""
outputs = {
"decision_node_idx": selector.current_node_idx,
"used_metrics": set([var_metric]),
"used_metrics": {var_metric},
"node_label": None,
"n_true": None,
"n_false": None,
Expand Down Expand Up @@ -647,7 +647,7 @@ def calc_median(
"decision_node_idx": selector.current_node_idx,
"node_label": None,
label_name: None,
"used_metrics": set([metric_name]),
"used_metrics": {metric_name},
"calc_cross_comp_metrics": [label_name],
}

Expand Down Expand Up @@ -736,7 +736,7 @@ def calc_kappa_elbow(
"decision_node_idx": selector.current_node_idx,
"node_label": None,
"n_echos": selector.n_echos,
"used_metrics": set(["kappa"]),
"used_metrics": {"kappa"},
"calc_cross_comp_metrics": [
"kappa_elbow_kundu",
"kappa_allcomps_elbow",
Expand Down Expand Up @@ -874,7 +874,7 @@ def calc_rho_elbow(
"rho_unclassified_elbow",
"elbow_f05",
],
"used_metrics": set(["kappa", "rho", "variance explained"]),
"used_metrics": {"kappa", "rho", "variance explained"},
elbow_name: None,
"rho_allcomps_elbow": None,
"rho_unclassified_elbow": None,
Expand Down Expand Up @@ -1124,8 +1124,8 @@ def dec_reclassify_high_var_comps(
# predefine all outputs that should be logged
outputs = {
"decision_node_idx": selector.current_node_idx,
"used_metrics": set(["variance explained"]),
"used_cross_comp_metrics": set(["varex_upper_p"]),
"used_metrics": {"variance explained"},
"used_cross_comp_metrics": {"varex_upper_p"},
"node_label": None,
"n_true": None,
"n_false": None,
Expand Down Expand Up @@ -1273,7 +1273,7 @@ def calc_varex_thresh(
"node_label": None,
varex_name: None,
"num_highest_var_comps": num_highest_var_comps,
"used_metrics": set(["variance explained"]),
"used_metrics": {"variance explained"},
}
if (
isinstance(percentile_thresh, (int, float))
Expand Down
6 changes: 3 additions & 3 deletions tedana/selection/selection_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -268,7 +268,7 @@ def comptable_classification_changer(
for idx in changeidx:
tmpstr = selector.component_table.loc[idx, "classification_tags"]
if tmpstr == "" or isinstance(tmpstr, float):
tmpset = set([tag_if])
tmpset = {tag_if}
else:
tmpset = set(tmpstr.split(","))
tmpset.update([tag_if])
Expand Down Expand Up @@ -633,11 +633,11 @@ def kappa_elbow_kundu(component_table, n_echos, comps2use=None):
kappa_nonsig_elbow = getelbow(kappas_nonsig, return_val=True)

kappa_elbow = np.min((kappa_nonsig_elbow, kappa_allcomps_elbow))
LGR.info(("Calculating kappa elbow based on min of all and nonsig components."))
LGR.info("Calculating kappa elbow based on min of all and nonsig components.")
else:
kappa_elbow = kappa_allcomps_elbow
kappa_nonsig_elbow = None
LGR.info(("Calculating kappa elbow based on all components."))
LGR.info("Calculating kappa elbow based on all components.")

# Calculating varex_upper_p
# Upper limit for variance explained is median across components with high
Expand Down
4 changes: 2 additions & 2 deletions tedana/tests/test_combine.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def test_make_optcom():
n_voxels, n_echos, n_trs = 20, 3, 10
n_mask = 5
data = np.random.random((n_voxels, n_echos, n_trs))
mask = np.zeros((n_voxels)).astype(bool)
mask = np.zeros(n_voxels).astype(bool)
mask[:n_mask] = True
tes = np.array([10, 20, 30]) # E

Expand All @@ -49,7 +49,7 @@ def test_make_optcom():
assert comb.shape == (n_voxels, n_trs)

# Voxel-wise T2* estimates
t2s = np.random.random((n_voxels))
t2s = np.random.random(n_voxels)
comb = combine.make_optcom(data, tes, mask, t2s=t2s, combmode="t2s")
assert comb.shape == (n_voxels, n_trs)

Expand Down
4 changes: 2 additions & 2 deletions tedana/tests/test_component_selector.py
Original file line number Diff line number Diff line change
Expand Up @@ -300,8 +300,8 @@ def test_are_only_necessary_metrics_used_warning():
selector = component_selector.ComponentSelector("minimal", sample_comptable())

# warning when an element of necessary_metrics was not in used_metrics
selector.tree["used_metrics"] = set(["A", "B", "C"])
selector.necessary_metrics = set(["B", "C", "D"])
selector.tree["used_metrics"] = {"A", "B", "C"}
selector.necessary_metrics = {"B", "C", "D"}
selector.are_only_necessary_metrics_used()


Expand Down
10 changes: 5 additions & 5 deletions tedana/tests/test_decay.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,7 @@ def test__apply_t2s_floor():
n_voxels, n_echos, n_trs = 100, 5, 25
echo_times = np.array([2, 23, 54, 75, 96])
me_data = np.random.random((n_voxels, n_echos, n_trs))
t2s = np.random.random((n_voxels)) * 1000
t2s = np.random.random(n_voxels) * 1000
t2s[t2s < 1] = 1 # Crop at 1 ms to be safe
t2s[0] = 0.001

Expand Down Expand Up @@ -100,7 +100,7 @@ def test_smoke_fit_decay():
n_echos = 5
n_times = 20
data = np.random.random((n_samples, n_echos, n_times))
tes = np.random.random((n_echos)).tolist()
tes = np.random.random(n_echos).tolist()
mask = np.ones(n_samples, dtype=int)
mask[n_samples // 2 :] = 0
adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask
Expand All @@ -126,7 +126,7 @@ def test_smoke_fit_decay_curvefit():
n_echos = 5
n_times = 20
data = np.random.random((n_samples, n_echos, n_times))
tes = np.random.random((n_echos)).tolist()
tes = np.random.random(n_echos).tolist()
mask = np.ones(n_samples, dtype=int)
mask[n_samples // 2 :] = 0
adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask
Expand All @@ -150,7 +150,7 @@ def test_smoke_fit_decay_ts():
n_echos = 5
n_times = 20
data = np.random.random((n_samples, n_echos, n_times))
tes = np.random.random((n_echos)).tolist()
tes = np.random.random(n_echos).tolist()
mask = np.ones(n_samples, dtype=int)
mask[n_samples // 2 :] = 0
adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask
Expand All @@ -176,7 +176,7 @@ def test_smoke_fit_decay_curvefit_ts():
n_echos = 5
n_times = 20
data = np.random.random((n_samples, n_echos, n_times))
tes = np.random.random((n_echos)).tolist()
tes = np.random.random(n_echos).tolist()
mask = np.ones(n_samples, dtype=int)
mask[n_samples // 2 :] = 0
adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask
Expand Down
Loading

0 comments on commit 1c3f93e

Please sign in to comment.