diff --git a/.github/workflows/testing.yml b/.github/workflows/testing.yml index 2ffe28df8..f5b0e6c49 100644 --- a/.github/workflows/testing.yml +++ b/.github/workflows/testing.yml @@ -61,7 +61,7 @@ jobs: - if: steps.cache.outputs.cache-hit != 'true' name: Create and activate environment - run: mamba env update -n nwb-guide -f ${{ matrix.label }} + run: mamba env update --name nwb-guide --file ${{ matrix.label }} - name: Use Node.js 20 uses: actions/setup-node@v4 diff --git a/guide_testing_suite.yml b/guide_testing_suite.yml index d10b00844..2579194a0 100644 --- a/guide_testing_suite.yml +++ b/guide_testing_suite.yml @@ -44,3 +44,7 @@ pipelines: folder_path: ephy_testing_data/cellexplorer/dataset_4/Peter_MS22_180629_110319_concat_stubbed CellExplorerSortingInterface: file_path: ephy_testing_data/cellexplorer/dataset_4/Peter_MS22_180629_110319_concat_stubbed/Peter_MS22_180629_110319_concat_stubbed.spikes.cellinfo.mat + + IntanSingleFileRecording: + IntanRecordingInterface: + file_path: ephy_testing_data/intan/intan_rhd_test_1.rhd diff --git a/nwb-guide.spec b/nwb-guide.spec index eed83761b..6330e8072 100644 --- a/nwb-guide.spec +++ b/nwb-guide.spec @@ -42,6 +42,10 @@ tmp_ret = collect_all('sklearn') datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] tmp_ret = collect_all('ci_info') datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] +tmp_ret = collect_all('tifffile') +datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] +tmp_ret = collect_all('dlc2nwb') +datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] block_cipher = None diff --git a/pyflask/manageNeuroconv/manage_neuroconv.py b/pyflask/manageNeuroconv/manage_neuroconv.py index a4f2c9db5..738ddafe3 100644 --- a/pyflask/manageNeuroconv/manage_neuroconv.py +++ b/pyflask/manageNeuroconv/manage_neuroconv.py @@ -1384,7 +1384,6 @@ def update_recording_properties_from_table_as_json( recording_extractor = recording_interface.recording_extractor channel_ids = recording_extractor.get_channel_ids() - stream_prefix = channel_ids[0].split("#")[0] # TODO: see if this generalized across formats # TODO: uncomment when neuroconv supports contact vectors (probe interface) # property_names = recording_extractor.get_property_keys() @@ -1394,7 +1393,7 @@ def update_recording_properties_from_table_as_json( for entry_index, entry in enumerate(electrode_table_json): electrode_properties = dict(entry) # copy - channel_name = electrode_properties.pop("channel_name", None) + # channel_name = electrode_properties.pop("channel_name", None) for property_name, property_value in electrode_properties.items(): if property_name not in electrode_column_data_types: # Skip data with missing column information continue @@ -1403,13 +1402,10 @@ def update_recording_properties_from_table_as_json( # property_index = contact_vector_property_names.index(property_name) # modified_contact_vector[entry_index][property_index] = property_value else: - ids = ( - [stream_prefix + "#" + channel_name] if channel_name else [] - ) # Correct for minimal metadata (e.g. CellExplorer) recording_extractor.set_property( key=property_name, values=np.array([property_value], dtype=electrode_column_data_types[property_name]), - ids=ids, + ids=[channel_ids[entry_index]], # Assume rows match indices of channel list ) # TODO: uncomment when neuroconv supports contact vectors (probe interface)