diff --git a/.github/workflows/ci.yaml b/.github/workflows/ci.yaml index 754576d3..71b5c629 100644 --- a/.github/workflows/ci.yaml +++ b/.github/workflows/ci.yaml @@ -64,11 +64,15 @@ jobs: # exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide - name: Test with unittest + env: + HED_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} run: | - HED_GITHUB_TOKEN=${{ secrets.HED_GITHUB_TOKEN }} coverage run -m unittest + coverage run -m unittest - name: Run spec_test coverage - run: HED_GITHUB_TOKEN=${{ secrets.HED_GITHUB_TOKEN }} coverage run --append -m unittest spec_tests/test_errors.py + env: + HED_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: coverage run --append -m unittest spec_tests/test_errors.py continue-on-error: true - name: Archive code coverage results diff --git a/hed/models/base_input.py b/hed/models/base_input.py index 58bb94c9..b3b8f848 100644 --- a/hed/models/base_input.py +++ b/hed/models/base_input.py @@ -135,6 +135,7 @@ def _indexed_dict_from_onsets(onsets): return indexed_dict + # This would need to store the index list -> So it can optionally apply to other columns on request @staticmethod def _filter_by_index_list(original_series, indexed_dict): new_series = pd.Series(["n/a"] * len(original_series), dtype=str) @@ -264,7 +265,7 @@ def to_csv(self, file=None): - Cannot open the indicated file """ dataframe = self._dataframe - csv_string_if_filename_none = dataframe.to_csv(file, '\t', index=False, header=self._has_column_names) + csv_string_if_filename_none = dataframe.to_csv(file, sep='\t', index=False, header=self._has_column_names) return csv_string_if_filename_none @property diff --git a/hed/tools/analysis/key_map.py b/hed/tools/analysis/key_map.py index 09d7f318..525caa0b 100644 --- a/hed/tools/analysis/key_map.py +++ b/hed/tools/analysis/key_map.py @@ -190,7 +190,9 @@ def _update(self, base_df): next_pos += pos_update if row_list: df = pd.DataFrame(row_list) - self.col_map = pd.concat([self.col_map, df], axis=0, ignore_index=True) + # Ignore empty col_map to suppress warning + col_map = self.col_map if not self.col_map.empty else None + self.col_map = pd.concat([col_map, df], axis=0, ignore_index=True) def _handle_update(self, row, row_list, next_pos): """ Update the dictionary and counts of the number of times this combination of key columns appears. diff --git a/hed/tools/bids/bids_dataset.py b/hed/tools/bids/bids_dataset.py index a5c47510..fa58af8f 100644 --- a/hed/tools/bids/bids_dataset.py +++ b/hed/tools/bids/bids_dataset.py @@ -8,9 +8,6 @@ from hed.tools.bids.bids_file_group import BidsFileGroup -LIBRARY_URL_BASE = "https://raw.githubusercontent.com/hed-standard/hed-schemas/main/library_schemas/" - - class BidsDataset: """ A BIDS dataset representation primarily focused on HED evaluation. diff --git a/hed/tools/remodeling/dispatcher.py b/hed/tools/remodeling/dispatcher.py index 039b05f2..b9fc1282 100644 --- a/hed/tools/remodeling/dispatcher.py +++ b/hed/tools/remodeling/dispatcher.py @@ -11,6 +11,12 @@ from hed.tools.remodeling.operations.valid_operations import valid_operations from hed.tools.util.io_util import clean_filename, extract_suffix_path, get_timestamp +# This isn't supported in all versions of pandas +try: + pd.set_option('future.no_silent_downcasting', True) +except pd.errors.OptionError: + pass + class Dispatcher: """ Controller for applying operations to tabular files and saving the results. """ @@ -193,7 +199,10 @@ def prep_data(df): df (DataFrame) - The DataFrame to be processed. """ - return df.replace('n/a', np.NaN) + result = df.replace('n/a', np.NaN) + # Comment in the next line if this behavior was actually needed, but I don't think it is. + # result = result.infer_objects(copy=False) + return result @staticmethod def post_proc_data(df): diff --git a/tests/tools/bids/test_bids_dataset.py b/tests/tools/bids/test_bids_dataset.py index ae0ceff6..d0aed2a6 100644 --- a/tests/tools/bids/test_bids_dataset.py +++ b/tests/tools/bids/test_bids_dataset.py @@ -87,15 +87,7 @@ def test_validator_types(self): self.assertFalse(issues, "BidsDataset with participants and events validates") def test_with_schema_group(self): - base_version = '8.2.0' - library1_url = "https://raw.githubusercontent.com/hed-standard/hed-schemas/main/" + \ - "library_schemas/score/hedxml/HED_score_1.0.0.xml" - library2_url = "https://raw.githubusercontent.com/hed-standard/hed-schemas/main/" + \ - "library_schemas/testlib/hedxml/HED_testlib_1.0.2.xml" - schema_list = [load_schema_version(xml_version=base_version), - load_schema(library1_url, schema_namespace="sc"), - load_schema(library2_url, schema_namespace="test")] - x = HedSchemaGroup(schema_list) + x = load_schema_version(["8.2.0", "sc:score_1.0.0", "test:testlib_1.0.2"]) bids = BidsDataset(self.library_path, schema=x, tabular_types=["participants"] ) self.assertIsInstance(bids, BidsDataset, "BidsDataset with libraries should create a valid object from valid dataset") diff --git a/tests/tools/bids/test_bids_file_group.py b/tests/tools/bids/test_bids_file_group.py index d1a66dc0..27b5ff41 100644 --- a/tests/tools/bids/test_bids_file_group.py +++ b/tests/tools/bids/test_bids_file_group.py @@ -1,6 +1,6 @@ import os import unittest -from hed.schema.hed_schema_io import load_schema +from hed.schema.hed_schema_io import load_schema_version from hed.tools.analysis.tabular_summary import TabularSummary from hed.tools.bids.bids_file_group import BidsFileGroup @@ -31,8 +31,7 @@ def test_constructor(self): def test_validator(self): events = BidsFileGroup(self.root_path) - hed = 'https://raw.githubusercontent.com/hed-standard/hed-schemas/main/standard_schema/hedxml/HED8.0.0.xml' - hed_schema = load_schema(hed) + hed_schema = load_schema_version("8.0.0") validation_issues = events.validate_datafiles(hed_schema, check_for_warnings=False) self.assertFalse(validation_issues, "BidsFileGroup should have no validation errors") validation_issues = events.validate_datafiles(hed_schema, check_for_warnings=True) diff --git a/tests/tools/util/test_data_util.py b/tests/tools/util/test_data_util.py index 9777a537..f77185d4 100644 --- a/tests/tools/util/test_data_util.py +++ b/tests/tools/util/test_data_util.py @@ -69,7 +69,7 @@ def test_get_new_dataframe(self): self.assertIsInstance(df_new1, DataFrame) self.assertEqual(len(df_new1), 87, "get_new_dataframe should return correct number of rows") self.assertEqual(len(df_new1.columns), 4, "get_new_dataframe should return correct number of rows") - df_new.iloc[0]['type'] = 'Pear' + df_new.loc[0, 'type'] = 'Pear' self.assertNotEqual(df_new.iloc[0]['type'], df_new1.iloc[0]['type'], "get_new_dataframe returns a new dataframe")