From 1c3f93e9e83f0cd5667eac246bb989f2362ee15f Mon Sep 17 00:00:00 2001 From: Chris Markiewicz Date: Mon, 20 Nov 2023 17:45:59 -0500 Subject: [PATCH] MNT: Test on 3.11 and 3.12 (#999) * MNT: Test on 3.11 and 3.12 * [RUN] pyupgrade --py38-plus **/*.py * MNT: Reflect minimum Python support in black config --- .circleci/config.yml | 72 +++++++++++++++++++++++++ docs/conf.py | 1 - pyproject.toml | 4 +- tedana/__init__.py | 1 - tedana/bibtex.py | 2 +- tedana/io.py | 6 +-- tedana/reporting/html_report.py | 10 ++-- tedana/selection/component_selector.py | 10 ++-- tedana/selection/selection_nodes.py | 14 ++--- tedana/selection/selection_utils.py | 6 +-- tedana/tests/test_combine.py | 4 +- tedana/tests/test_component_selector.py | 4 +- tedana/tests/test_decay.py | 10 ++-- tedana/tests/test_integration.py | 4 +- tedana/tests/test_io.py | 12 ++--- tedana/tests/test_selection_nodes.py | 6 +-- tedana/tests/test_stats.py | 16 +++--- tedana/tests/test_utils.py | 8 +-- tedana/workflows/ica_reclassify.py | 2 +- tedana/workflows/tedana.py | 8 +-- 20 files changed, 136 insertions(+), 64 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 765cc65fb..da16d5e89 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -117,6 +117,74 @@ jobs: paths: - src/coverage/.coverage.py310 + unittest_311: + docker: + - image: continuumio/miniconda3 + working_directory: /tmp/src/tedana + steps: + - checkout + - restore_cache: + key: conda-py311-v1-{{ checksum "pyproject.toml" }} + - run: + name: Generate environment + command: | + apt-get update + apt-get install -yqq make + if [ ! -d /opt/conda/envs/tedana_py311 ]; then + conda create -yq -n tedana_py311 python=3.11 + source activate tedana_py311 + pip install .[tests] + fi + - run: + name: Running unit tests + command: | + source activate tedana_py311 + make unittest + mkdir /tmp/src/coverage + mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py311 + - save_cache: + key: conda-py311-v1-{{ checksum "pyproject.toml" }} + paths: + - /opt/conda/envs/tedana_py311 + - persist_to_workspace: + root: /tmp + paths: + - src/coverage/.coverage.py311 + + unittest_312: + docker: + - image: continuumio/miniconda3 + working_directory: /tmp/src/tedana + steps: + - checkout + - restore_cache: + key: conda-py312-v1-{{ checksum "pyproject.toml" }} + - run: + name: Generate environment + command: | + apt-get update + apt-get install -yqq make + if [ ! -d /opt/conda/envs/tedana_py312 ]; then + conda create -yq -n tedana_py312 python=3.12 + source activate tedana_py312 + pip install .[tests] + fi + - run: + name: Running unit tests + command: | + source activate tedana_py312 + make unittest + mkdir /tmp/src/coverage + mv /tmp/src/tedana/.coverage /tmp/src/coverage/.coverage.py312 + - save_cache: + key: conda-py312-v1-{{ checksum "pyproject.toml" }} + paths: + - /opt/conda/envs/tedana_py312 + - persist_to_workspace: + root: /tmp + paths: + - src/coverage/.coverage.py312 + style_check: docker: - image: continuumio/miniconda3 @@ -310,11 +378,15 @@ workflows: - makeenv_38 - unittest_39 - unittest_310 + - unittest_311 + - unittest_312 - merge_coverage: requires: - unittest_38 - unittest_39 - unittest_310 + - unittest_311 + - unittest_312 - three-echo - four-echo - five-echo diff --git a/docs/conf.py b/docs/conf.py index 0abb14feb..c05d1e17b 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -1,5 +1,4 @@ #!/usr/bin/env python3 -# -*- coding: utf-8 -*- # # tedana documentation build configuration file, created by # sphinx-quickstart diff --git a/pyproject.toml b/pyproject.toml index ee1f4c6d3..d2f19f7f3 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -15,6 +15,8 @@ classifiers = [ "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", ] license = {file = "LICENSE"} requires-python = ">=3.8" @@ -101,7 +103,7 @@ version-file = "tedana/_version.py" [tool.black] line-length = 99 -target-version = ['py37'] +target-version = ['py38'] include = '\.pyi?$' exclude = ''' diff --git a/tedana/__init__.py b/tedana/__init__.py index b28484ada..2a33e353d 100644 --- a/tedana/__init__.py +++ b/tedana/__init__.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*- # vi: set ft=python sts=4 ts=4 sw=4 et: """Tedana: A Python package for TE-dependent analysis of multi-echo data.""" diff --git a/tedana/bibtex.py b/tedana/bibtex.py index b884bde4f..e7531f538 100644 --- a/tedana/bibtex.py +++ b/tedana/bibtex.py @@ -177,7 +177,7 @@ def get_description_references(description): A string containing BibTeX entries, limited only to the citations in the description. """ bibtex_file = op.join(get_resource_path(), "references.bib") - with open(bibtex_file, "r") as fo: + with open(bibtex_file) as fo: bibtex_string = fo.read() braces_idx = find_braces(bibtex_string) diff --git a/tedana/io.py b/tedana/io.py index d4342fe04..ce65cfdbd 100644 --- a/tedana/io.py +++ b/tedana/io.py @@ -50,7 +50,7 @@ def default(self, obj): if isinstance(obj, set): return list(obj) - return super(CustomEncoder, self).default(obj) + return super().default(obj) class OutputGenerator: @@ -454,7 +454,7 @@ def load_json(path: str) -> dict: FileNotFoundError if the file does not exist IsADirectoryError if the path is a directory instead of a file """ - with open(path, "r") as f: + with open(path) as f: try: data = json.load(f) except json.decoder.JSONDecodeError: @@ -943,7 +943,7 @@ def fname_to_component_list(fname: str) -> List[int]: else: raise ValueError(f"Cannot determine a components column in file {fname}") - with open(fname, "r") as fp: + with open(fname) as fp: contents = fp.read() return str_to_component_list(contents) diff --git a/tedana/reporting/html_report.py b/tedana/reporting/html_report.py index bc2f31309..bd3eb88ae 100644 --- a/tedana/reporting/html_report.py +++ b/tedana/reporting/html_report.py @@ -40,7 +40,7 @@ def _generate_buttons(out_dir, io_generator): buttons_template_name = "report_carpet_buttons_template.html" buttons_template_path = resource_path.joinpath(buttons_template_name) - with open(str(buttons_template_path), "r") as buttons_file: + with open(str(buttons_template_path)) as buttons_file: buttons_tpl = Template(buttons_file.read()) buttons_html = buttons_tpl.substitute( @@ -87,7 +87,7 @@ def _update_template_bokeh(bokeh_id, info_table, about, prefix, references, boke body_template_name = "report_body_template.html" body_template_path = resource_path.joinpath(body_template_name) - with open(str(body_template_path), "r") as body_file: + with open(str(body_template_path)) as body_file: body_tpl = Template(body_file.read()) body = body_tpl.substitute( content=bokeh_id, @@ -114,7 +114,7 @@ def _save_as_html(body): resource_path = Path(__file__).resolve().parent.joinpath("data", "html") head_template_name = "report_head_template.html" head_template_path = resource_path.joinpath(head_template_name) - with open(str(head_template_path), "r") as head_file: + with open(str(head_template_path)) as head_file: head_tpl = Template(head_file.read()) html = head_tpl.substitute(version=__version__, bokehversion=bokehversion, body=body) @@ -127,7 +127,7 @@ def _generate_info_table(info_dict): info_template_name = "report_info_table_template.html" info_template_path = resource_path.joinpath(info_template_name) - with open(str(info_template_path), "r") as info_file: + with open(str(info_template_path)) as info_file: info_tpl = Template(info_file.read()) info_dict = info_dict["GeneratedBy"][0] @@ -273,7 +273,7 @@ def get_elbow_val(elbow_prefix): with open(opj(io_generator.out_dir, f"{io_generator.prefix}report.txt"), "r+") as f: about = f.read() - with open(opj(io_generator.out_dir, f"{io_generator.prefix}references.bib"), "r") as f: + with open(opj(io_generator.out_dir, f"{io_generator.prefix}references.bib")) as f: references = f.read() # Read info table diff --git a/tedana/selection/component_selector.py b/tedana/selection/component_selector.py index 37d0b3a15..769dea776 100644 --- a/tedana/selection/component_selector.py +++ b/tedana/selection/component_selector.py @@ -106,7 +106,7 @@ def validate_tree(tree): raise TreeError("\n" + f"Decision tree missing required fields: {missing_keys}") # Warn if unused fields exist - unused_keys = set(tree.keys()) - set(tree_expected_keys) - set(["used_metrics"]) + unused_keys = set(tree.keys()) - set(tree_expected_keys) - {"used_metrics"} # Make sure some fields don't trigger a warning; hacky, sorry ok_to_not_use = ( "reconstruct_from", @@ -133,7 +133,7 @@ def validate_tree(tree): continue # Get a functions parameters and compare to parameters defined in the tree - pos = set([p for p, i in sig.parameters.items() if i.default is inspect.Parameter.empty]) + pos = {p for p, i in sig.parameters.items() if i.default is inspect.Parameter.empty} kwargs = set(sig.parameters.keys()) - pos missing_pos = pos - set(node.get("parameters").keys()) - defaults @@ -194,11 +194,11 @@ def validate_tree(tree): if node.get("kwargs") is not None: tagset = set() if "tag_if_true" in node.get("kwargs").keys(): - tagset.update(set([node["kwargs"]["tag_if_true"]])) + tagset.update({node["kwargs"]["tag_if_true"]}) if "tag_if_false" in node.get("kwargs").keys(): - tagset.update(set([node["kwargs"]["tag_if_false"]])) + tagset.update({node["kwargs"]["tag_if_false"]}) if "tag" in node.get("kwargs").keys(): - tagset.update(set([node["kwargs"]["tag"]])) + tagset.update({node["kwargs"]["tag"]}) undefined_classification_tags = tagset.difference(set(tree.get("classification_tags"))) if undefined_classification_tags: LGR.warning( diff --git a/tedana/selection/selection_nodes.py b/tedana/selection/selection_nodes.py index 6b5a1e6cb..2427b3348 100644 --- a/tedana/selection/selection_nodes.py +++ b/tedana/selection/selection_nodes.py @@ -523,7 +523,7 @@ def dec_variance_lessthan_thresholds( """ outputs = { "decision_node_idx": selector.current_node_idx, - "used_metrics": set([var_metric]), + "used_metrics": {var_metric}, "node_label": None, "n_true": None, "n_false": None, @@ -647,7 +647,7 @@ def calc_median( "decision_node_idx": selector.current_node_idx, "node_label": None, label_name: None, - "used_metrics": set([metric_name]), + "used_metrics": {metric_name}, "calc_cross_comp_metrics": [label_name], } @@ -736,7 +736,7 @@ def calc_kappa_elbow( "decision_node_idx": selector.current_node_idx, "node_label": None, "n_echos": selector.n_echos, - "used_metrics": set(["kappa"]), + "used_metrics": {"kappa"}, "calc_cross_comp_metrics": [ "kappa_elbow_kundu", "kappa_allcomps_elbow", @@ -874,7 +874,7 @@ def calc_rho_elbow( "rho_unclassified_elbow", "elbow_f05", ], - "used_metrics": set(["kappa", "rho", "variance explained"]), + "used_metrics": {"kappa", "rho", "variance explained"}, elbow_name: None, "rho_allcomps_elbow": None, "rho_unclassified_elbow": None, @@ -1124,8 +1124,8 @@ def dec_reclassify_high_var_comps( # predefine all outputs that should be logged outputs = { "decision_node_idx": selector.current_node_idx, - "used_metrics": set(["variance explained"]), - "used_cross_comp_metrics": set(["varex_upper_p"]), + "used_metrics": {"variance explained"}, + "used_cross_comp_metrics": {"varex_upper_p"}, "node_label": None, "n_true": None, "n_false": None, @@ -1273,7 +1273,7 @@ def calc_varex_thresh( "node_label": None, varex_name: None, "num_highest_var_comps": num_highest_var_comps, - "used_metrics": set(["variance explained"]), + "used_metrics": {"variance explained"}, } if ( isinstance(percentile_thresh, (int, float)) diff --git a/tedana/selection/selection_utils.py b/tedana/selection/selection_utils.py index 882496746..5dbee3d31 100644 --- a/tedana/selection/selection_utils.py +++ b/tedana/selection/selection_utils.py @@ -268,7 +268,7 @@ def comptable_classification_changer( for idx in changeidx: tmpstr = selector.component_table.loc[idx, "classification_tags"] if tmpstr == "" or isinstance(tmpstr, float): - tmpset = set([tag_if]) + tmpset = {tag_if} else: tmpset = set(tmpstr.split(",")) tmpset.update([tag_if]) @@ -633,11 +633,11 @@ def kappa_elbow_kundu(component_table, n_echos, comps2use=None): kappa_nonsig_elbow = getelbow(kappas_nonsig, return_val=True) kappa_elbow = np.min((kappa_nonsig_elbow, kappa_allcomps_elbow)) - LGR.info(("Calculating kappa elbow based on min of all and nonsig components.")) + LGR.info("Calculating kappa elbow based on min of all and nonsig components.") else: kappa_elbow = kappa_allcomps_elbow kappa_nonsig_elbow = None - LGR.info(("Calculating kappa elbow based on all components.")) + LGR.info("Calculating kappa elbow based on all components.") # Calculating varex_upper_p # Upper limit for variance explained is median across components with high diff --git a/tedana/tests/test_combine.py b/tedana/tests/test_combine.py index 65e757a9a..99c067495 100644 --- a/tedana/tests/test_combine.py +++ b/tedana/tests/test_combine.py @@ -39,7 +39,7 @@ def test_make_optcom(): n_voxels, n_echos, n_trs = 20, 3, 10 n_mask = 5 data = np.random.random((n_voxels, n_echos, n_trs)) - mask = np.zeros((n_voxels)).astype(bool) + mask = np.zeros(n_voxels).astype(bool) mask[:n_mask] = True tes = np.array([10, 20, 30]) # E @@ -49,7 +49,7 @@ def test_make_optcom(): assert comb.shape == (n_voxels, n_trs) # Voxel-wise T2* estimates - t2s = np.random.random((n_voxels)) + t2s = np.random.random(n_voxels) comb = combine.make_optcom(data, tes, mask, t2s=t2s, combmode="t2s") assert comb.shape == (n_voxels, n_trs) diff --git a/tedana/tests/test_component_selector.py b/tedana/tests/test_component_selector.py index 32277ceb6..0d27aa26f 100644 --- a/tedana/tests/test_component_selector.py +++ b/tedana/tests/test_component_selector.py @@ -300,8 +300,8 @@ def test_are_only_necessary_metrics_used_warning(): selector = component_selector.ComponentSelector("minimal", sample_comptable()) # warning when an element of necessary_metrics was not in used_metrics - selector.tree["used_metrics"] = set(["A", "B", "C"]) - selector.necessary_metrics = set(["B", "C", "D"]) + selector.tree["used_metrics"] = {"A", "B", "C"} + selector.necessary_metrics = {"B", "C", "D"} selector.are_only_necessary_metrics_used() diff --git a/tedana/tests/test_decay.py b/tedana/tests/test_decay.py index 4d11d8e90..2ca8093ab 100644 --- a/tedana/tests/test_decay.py +++ b/tedana/tests/test_decay.py @@ -67,7 +67,7 @@ def test__apply_t2s_floor(): n_voxels, n_echos, n_trs = 100, 5, 25 echo_times = np.array([2, 23, 54, 75, 96]) me_data = np.random.random((n_voxels, n_echos, n_trs)) - t2s = np.random.random((n_voxels)) * 1000 + t2s = np.random.random(n_voxels) * 1000 t2s[t2s < 1] = 1 # Crop at 1 ms to be safe t2s[0] = 0.001 @@ -100,7 +100,7 @@ def test_smoke_fit_decay(): n_echos = 5 n_times = 20 data = np.random.random((n_samples, n_echos, n_times)) - tes = np.random.random((n_echos)).tolist() + tes = np.random.random(n_echos).tolist() mask = np.ones(n_samples, dtype=int) mask[n_samples // 2 :] = 0 adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask @@ -126,7 +126,7 @@ def test_smoke_fit_decay_curvefit(): n_echos = 5 n_times = 20 data = np.random.random((n_samples, n_echos, n_times)) - tes = np.random.random((n_echos)).tolist() + tes = np.random.random(n_echos).tolist() mask = np.ones(n_samples, dtype=int) mask[n_samples // 2 :] = 0 adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask @@ -150,7 +150,7 @@ def test_smoke_fit_decay_ts(): n_echos = 5 n_times = 20 data = np.random.random((n_samples, n_echos, n_times)) - tes = np.random.random((n_echos)).tolist() + tes = np.random.random(n_echos).tolist() mask = np.ones(n_samples, dtype=int) mask[n_samples // 2 :] = 0 adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask @@ -176,7 +176,7 @@ def test_smoke_fit_decay_curvefit_ts(): n_echos = 5 n_times = 20 data = np.random.random((n_samples, n_echos, n_times)) - tes = np.random.random((n_echos)).tolist() + tes = np.random.random(n_echos).tolist() mask = np.ones(n_samples, dtype=int) mask[n_samples // 2 :] = 0 adaptive_mask = np.random.randint(2, n_echos, size=(n_samples)) * mask diff --git a/tedana/tests/test_integration.py b/tedana/tests/test_integration.py index 426901436..59695a939 100644 --- a/tedana/tests/test_integration.py +++ b/tedana/tests/test_integration.py @@ -57,7 +57,7 @@ def check_integration_outputs(fname, outpath, n_logs=1): found_files.remove(log) # Compares remaining files with those expected - with open(fname, "r") as f: + with open(fname) as f: expected_files = f.read().splitlines() expected_files = [os.path.normpath(path) for path in expected_files] @@ -679,6 +679,6 @@ def test_integration_t2smap(skip_integration): ] # Compares remaining files with those expected - with open(fname, "r") as f: + with open(fname) as f: expected_files = f.read().splitlines() assert sorted(expected_files) == sorted(found_files) diff --git a/tedana/tests/test_io.py b/tedana/tests/test_io.py index 9bdbd27e9..0594f8022 100644 --- a/tedana/tests/test_io.py +++ b/tedana/tests/test_io.py @@ -110,8 +110,8 @@ def test_smoke_split_ts(): # creating the component table with component as random floats, # a "metric," and random classification - component = np.random.random((n_components)) - metric = np.random.random((n_components)) + component = np.random.random(n_components) + metric = np.random.random(n_components) classification = np.random.choice(["accepted", "rejected", "ignored"], n_components) df_data = np.column_stack((component, metric, classification)) comptable = pd.DataFrame(df_data, columns=["component", "metric", "classification"]) @@ -137,8 +137,8 @@ def test_smoke_write_split_ts(): # creating the component table with component as random floats, # a "metric," and random classification io_generator = me.OutputGenerator(ref_img) - component = np.random.random((n_components)) - metric = np.random.random((n_components)) + component = np.random.random(n_components) + metric = np.random.random(n_components) classification = np.random.choice(["accepted", "rejected", "ignored"], n_components) df_data = np.column_stack((component, metric, classification)) comptable = pd.DataFrame(df_data, columns=["component", "metric", "classification"]) @@ -161,7 +161,7 @@ def test_smoke_filewrite(): in both bids and orig formats. """ n_samples, _, _ = 64350, 10, 6 - data_1d = np.random.random((n_samples)) + data_1d = np.random.random(n_samples) ref_img = os.path.join(data_dir, "mask.nii.gz") io_generator = me.OutputGenerator(ref_img) @@ -280,7 +280,7 @@ def test_custom_encoder(): assert np.array_equal(test_data["data"], decoded["data"]) # set should become list - test_data = {"data": set(["cat", "dog", "fish"])} + test_data = {"data": {"cat", "dog", "fish"}} encoded = json.dumps(test_data, cls=me.CustomEncoder) decoded = json.loads(encoded) assert list(test_data["data"]) == decoded["data"] diff --git a/tedana/tests/test_selection_nodes.py b/tedana/tests/test_selection_nodes.py index 8044d5bd8..a2816992f 100644 --- a/tedana/tests/test_selection_nodes.py +++ b/tedana/tests/test_selection_nodes.py @@ -615,7 +615,7 @@ def test_calc_median_smoke(): median_label="varex", only_used_metrics=True, ) - assert len(used_metrics - set(["variance explained"])) == 0 + assert len(used_metrics - {"variance explained"}) == 0 # Standard call to this function. selector = selection_nodes.calc_median( @@ -777,7 +777,7 @@ def test_dec_reclassify_high_var_comps(): decide_comps, only_used_metrics=True, ) - assert len(used_metrics - set(["variance explained"])) == 0 + assert len(used_metrics - {"variance explained"}) == 0 # Raises an error since varex_upper_p not in cross_component_metrics # & there are components in decide_comps @@ -838,7 +838,7 @@ def test_calc_varex_thresh_smoke(): used_metrics = selection_nodes.calc_varex_thresh( selector, decide_comps, thresh_label="upper", percentile_thresh=90, only_used_metrics=True ) - assert len(used_metrics - set(["variance explained"])) == 0 + assert len(used_metrics - {"variance explained"}) == 0 # Standard call to this function. selector = selection_nodes.calc_varex_thresh( diff --git a/tedana/tests/test_stats.py b/tedana/tests/test_stats.py index c891c33af..d08096cfb 100644 --- a/tedana/tests/test_stats.py +++ b/tedana/tests/test_stats.py @@ -12,14 +12,14 @@ def test_break_computefeats2(): n_samples, n_vols, n_comps = 10000, 100, 50 data = np.empty((n_samples, n_vols)) mmix = np.empty((n_vols, n_comps)) - mask = np.empty((n_samples)) + mask = np.empty(n_samples) - data = np.empty((n_samples)) + data = np.empty(n_samples) with pytest.raises(ValueError): computefeats2(data, mmix, mask, normalize=True) data = np.empty((n_samples, n_vols)) - mmix = np.empty((n_vols)) + mmix = np.empty(n_vols) with pytest.raises(ValueError): computefeats2(data, mmix, mask, normalize=True) @@ -28,11 +28,11 @@ def test_break_computefeats2(): with pytest.raises(ValueError): computefeats2(data, mmix, mask, normalize=True) - mask = np.empty((n_samples + 1)) + mask = np.empty(n_samples + 1) with pytest.raises(ValueError): computefeats2(data, mmix, mask, normalize=True) data.shape[1] != mmix.shape[0] - mask = np.empty((n_samples)) + mask = np.empty(n_samples) mmix = np.empty((n_vols + 1, n_comps)) with pytest.raises(ValueError): computefeats2(data, mmix, mask, normalize=True) @@ -85,14 +85,14 @@ def test_break_get_coeffs(): n_samples, n_echos, n_vols, n_comps = 10000, 5, 100, 50 data = np.empty((n_samples, n_vols)) x = np.empty((n_vols, n_comps)) - mask = np.empty((n_samples)) + mask = np.empty(n_samples) - data = np.empty((n_samples)) + data = np.empty(n_samples) with pytest.raises(ValueError): get_coeffs(data, x, mask, add_const=False) data = np.empty((n_samples, n_vols)) - x = np.empty((n_vols)) + x = np.empty(n_vols) with pytest.raises(ValueError): get_coeffs(data, x, mask, add_const=False) diff --git a/tedana/tests/test_utils.py b/tedana/tests/test_utils.py index 4039f28ad..45f1b103d 100644 --- a/tedana/tests/test_utils.py +++ b/tedana/tests/test_utils.py @@ -147,7 +147,7 @@ def test_smoke_unmask(): Note: unmask could take in 1D or 2D or 3D arrays. """ - data_1d = np.random.random((100)) + data_1d = np.random.random(100) data_2d = np.random.random((100, 5)) data_3d = np.random.random((100, 5, 20)) mask = np.random.randint(2, size=100) @@ -164,8 +164,8 @@ def test_smoke_dice(): Note: two arrays must be in the same length. """ - arr1 = np.random.random((100)) - arr2 = np.random.random((100)) + arr1 = np.random.random(100) + arr2 = np.random.random(100) assert utils.dice(arr1, arr2) is not None @@ -185,7 +185,7 @@ def test_smoke_get_spectrum(): Ensure that get_spectrum returns reasonable objects with random inputs in the correct format. """ - data = np.random.random((100)) + data = np.random.random(100) tr = random.random() spectrum, freqs = utils.get_spectrum(data, tr) diff --git a/tedana/workflows/ica_reclassify.py b/tedana/workflows/ica_reclassify.py index e88a63fbc..997874975 100644 --- a/tedana/workflows/ica_reclassify.py +++ b/tedana/workflows/ica_reclassify.py @@ -518,7 +518,7 @@ def ica_reclassify_workflow( } io_generator.save_file(derivative_metadata, "data description json") - with open(repname, "r") as fo: + with open(repname) as fo: report = [line.rstrip() for line in fo.readlines()] report = " ".join(report) with open(repname, "w") as fo: diff --git a/tedana/workflows/tedana.py b/tedana/workflows/tedana.py index df59b871e..a5b3575fb 100644 --- a/tedana/workflows/tedana.py +++ b/tedana/workflows/tedana.py @@ -525,7 +525,7 @@ def tedana_workflow( # check if TR is 0 img_t_r = io_generator.reference_img.header.get_zooms()[-1] if img_t_r == 0: - raise IOError( + raise OSError( "Dataset has a TR of 0. This indicates incorrect" " header information. To correct this, we recommend" " using this snippet:" @@ -543,7 +543,7 @@ def tedana_workflow( shutil.copyfile(mixm, mixing_name) shutil.copyfile(mixm, op.join(io_generator.out_dir, op.basename(mixm))) elif mixm is not None: - raise IOError("Argument 'mixm' must be an existing file.") + raise OSError("Argument 'mixm' must be an existing file.") if t2smap is not None and op.isfile(t2smap): t2smap_file = io_generator.get_name("t2star img") @@ -552,7 +552,7 @@ def tedana_workflow( if t2smap != t2smap_file: shutil.copyfile(t2smap, t2smap_file) elif t2smap is not None: - raise IOError("Argument 't2smap' must be an existing file.") + raise OSError("Argument 't2smap' must be an existing file.") RepLGR.info( "TE-dependence analysis was performed on input data using the tedana workflow " @@ -864,7 +864,7 @@ def tedana_workflow( "\\citep{dice1945measures,sorensen1948method}." ) - with open(repname, "r") as fo: + with open(repname) as fo: report = [line.rstrip() for line in fo.readlines()] report = " ".join(report)