From a4cea8ec365bcd938df0aecbee6e893ac3143ac8 Mon Sep 17 00:00:00 2001 From: Cody Baker Date: Thu, 6 Jun 2024 17:34:26 -0400 Subject: [PATCH 01/22] debug --- src/pyflask/manageNeuroconv/manage_neuroconv.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/pyflask/manageNeuroconv/manage_neuroconv.py b/src/pyflask/manageNeuroconv/manage_neuroconv.py index 16ad23404..073164535 100644 --- a/src/pyflask/manageNeuroconv/manage_neuroconv.py +++ b/src/pyflask/manageNeuroconv/manage_neuroconv.py @@ -940,7 +940,7 @@ def update_conversion_progress(message): ) # Add GUIDE watermark - package_json_file_path = resource_path("../package.json" if is_packaged() else "../package.json") + package_json_file_path = resource_path("package.json" if is_packaged() else "../package.json") with open(file=package_json_file_path) as fp: package_json = json.load(fp=fp) app_version = package_json["version"] From 2ff7e2436405257dd389b88502b2b409b35367a2 Mon Sep 17 00:00:00 2001 From: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> Date: Thu, 6 Jun 2024 19:52:09 -0400 Subject: [PATCH 02/22] bump neuroconv --- environments/environment-Linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/environment-Linux.yml b/environments/environment-Linux.yml index ea486f110..7501bce56 100644 --- a/environments/environment-Linux.yml +++ b/environments/environment-Linux.yml @@ -15,7 +15,7 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 - - neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@main#neuroconv[full] + - neuroconv >= 0.4.10 - dandi >= 0.60.0 - pytest == 7.4.0 - pytest-cov == 4.1.0 From d1ae793ba20d08b9d789757b2a062d627fb237f1 Mon Sep 17 00:00:00 2001 From: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> Date: Thu, 6 Jun 2024 19:52:22 -0400 Subject: [PATCH 03/22] bump neuroconv --- environments/environment-MAC-apple-silicon.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/environment-MAC-apple-silicon.yml b/environments/environment-MAC-apple-silicon.yml index 84e29d4f3..744e6d75d 100644 --- a/environments/environment-MAC-apple-silicon.yml +++ b/environments/environment-MAC-apple-silicon.yml @@ -21,7 +21,7 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 - - neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@main#neuroconv[full] + - neuroconv >= 0.4.10 - dandi >= 0.60.0 - pytest == 7.4.0 - pytest-cov == 4.1.0 From 37bb7fc14c0e55c406bed63505e05ecc33ebbff7 Mon Sep 17 00:00:00 2001 From: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> Date: Thu, 6 Jun 2024 19:52:35 -0400 Subject: [PATCH 04/22] bump neuroconv --- environments/environment-MAC-intel.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/environment-MAC-intel.yml b/environments/environment-MAC-intel.yml index 3cb937767..c36c1caf7 100644 --- a/environments/environment-MAC-intel.yml +++ b/environments/environment-MAC-intel.yml @@ -18,7 +18,7 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 - - neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@main#neuroconv[full] + - neuroconv >= 0.4.10 - dandi >= 0.60.0 - pytest == 7.4.0 - pytest-cov == 4.1.0 From 95891074a8697934405d4d09a312894b082c380b Mon Sep 17 00:00:00 2001 From: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> Date: Thu, 6 Jun 2024 19:52:48 -0400 Subject: [PATCH 05/22] version bump --- environments/environment-Windows.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/environment-Windows.yml b/environments/environment-Windows.yml index 1cb6f2e23..609238340 100644 --- a/environments/environment-Windows.yml +++ b/environments/environment-Windows.yml @@ -18,7 +18,7 @@ dependencies: - flask == 2.3.2 - flask-cors === 3.0.10 - flask_restx == 1.1.0 - - neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@main#neuroconv[full] + - neuroconv >= 0.4.10 - dandi >= 0.60.0 - pytest == 7.2.2 - pytest-cov == 4.1.0 From dabe670fa950506ceac68708dcea3b3b5db7cf08 Mon Sep 17 00:00:00 2001 From: rly Date: Fri, 7 Jun 2024 10:14:26 -0700 Subject: [PATCH 06/22] Add import spikeinterface to spec file --- ephy_testing_data | 1 + nwb-guide.spec | 2 ++ 2 files changed, 3 insertions(+) create mode 160000 ephy_testing_data diff --git a/ephy_testing_data b/ephy_testing_data new file mode 160000 index 000000000..e8719e0eb --- /dev/null +++ b/ephy_testing_data @@ -0,0 +1 @@ +Subproject commit e8719e0eb3c1494d0a032e603ee5c1f0921de881 diff --git a/nwb-guide.spec b/nwb-guide.spec index d4868ee6d..fd19a2420 100644 --- a/nwb-guide.spec +++ b/nwb-guide.spec @@ -48,6 +48,8 @@ tmp_ret = collect_all('dlc2nwb') datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] tmp_ret = collect_all('ndx-pose') datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] +tmp_ret = collect_all('spikeinterface') +datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] block_cipher = None From da3b3f2a0f16134454c22e3f3fba21bb8f7c7cf8 Mon Sep 17 00:00:00 2001 From: rly Date: Fri, 7 Jun 2024 10:59:38 -0700 Subject: [PATCH 07/22] Use neuroconv[full] in env --- environments/environment-Linux.yml | 2 +- environments/environment-MAC-apple-silicon.yml | 2 +- environments/environment-MAC-intel.yml | 2 +- environments/environment-Windows.yml | 2 +- nwb-guide.spec | 2 -- 5 files changed, 4 insertions(+), 6 deletions(-) diff --git a/environments/environment-Linux.yml b/environments/environment-Linux.yml index 7501bce56..60af256ac 100644 --- a/environments/environment-Linux.yml +++ b/environments/environment-Linux.yml @@ -15,7 +15,7 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 - - neuroconv >= 0.4.10 + - neuroconv[full] >= 0.4.10 - dandi >= 0.60.0 - pytest == 7.4.0 - pytest-cov == 4.1.0 diff --git a/environments/environment-MAC-apple-silicon.yml b/environments/environment-MAC-apple-silicon.yml index 744e6d75d..b8ef28700 100644 --- a/environments/environment-MAC-apple-silicon.yml +++ b/environments/environment-MAC-apple-silicon.yml @@ -21,7 +21,7 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 - - neuroconv >= 0.4.10 + - neuroconv[full] >= 0.4.10 - dandi >= 0.60.0 - pytest == 7.4.0 - pytest-cov == 4.1.0 diff --git a/environments/environment-MAC-intel.yml b/environments/environment-MAC-intel.yml index c36c1caf7..cdba2b573 100644 --- a/environments/environment-MAC-intel.yml +++ b/environments/environment-MAC-intel.yml @@ -18,7 +18,7 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 - - neuroconv >= 0.4.10 + - neuroconv[full] >= 0.4.10 - dandi >= 0.60.0 - pytest == 7.4.0 - pytest-cov == 4.1.0 diff --git a/environments/environment-Windows.yml b/environments/environment-Windows.yml index 609238340..0ae0bfaf0 100644 --- a/environments/environment-Windows.yml +++ b/environments/environment-Windows.yml @@ -18,7 +18,7 @@ dependencies: - flask == 2.3.2 - flask-cors === 3.0.10 - flask_restx == 1.1.0 - - neuroconv >= 0.4.10 + - neuroconv[full] >= 0.4.10 - dandi >= 0.60.0 - pytest == 7.2.2 - pytest-cov == 4.1.0 diff --git a/nwb-guide.spec b/nwb-guide.spec index fd19a2420..d4868ee6d 100644 --- a/nwb-guide.spec +++ b/nwb-guide.spec @@ -48,8 +48,6 @@ tmp_ret = collect_all('dlc2nwb') datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] tmp_ret = collect_all('ndx-pose') datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] -tmp_ret = collect_all('spikeinterface') -datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] block_cipher = None From 739f272d6db0c0a2f6729d3dc7b00dfc29c1d0bd Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Jun 2024 12:13:11 -0700 Subject: [PATCH 08/22] Pass conda env path --- .github/workflows/build_and_deploy_mac.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_deploy_mac.yml b/.github/workflows/build_and_deploy_mac.yml index e31ace8be..6366cf6b3 100644 --- a/.github/workflows/build_and_deploy_mac.yml +++ b/.github/workflows/build_and_deploy_mac.yml @@ -40,7 +40,7 @@ jobs: run: npm install --verbose - name: Remove bad sonpy file (might make Spike2 format unusable on Mac - should exclude from selection) - run: rm -f /usr/local/miniconda/envs/nwb-guide/lib/python3.9/site-packages/sonpy/linux/sonpy.so + run: rm -f "$(conda info --base)/envs/envs/nwb-guide/lib/python3.9/site-packages/sonpy/linux/sonpy.so" - uses: apple-actions/import-codesign-certs@v2 with: From 9cc5a9262e745b68a53385b56b1b01ffe2a6846a Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Jun 2024 15:46:25 -0700 Subject: [PATCH 09/22] Fix typo --- .github/workflows/build_and_deploy_mac.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_deploy_mac.yml b/.github/workflows/build_and_deploy_mac.yml index 6366cf6b3..484652872 100644 --- a/.github/workflows/build_and_deploy_mac.yml +++ b/.github/workflows/build_and_deploy_mac.yml @@ -40,7 +40,7 @@ jobs: run: npm install --verbose - name: Remove bad sonpy file (might make Spike2 format unusable on Mac - should exclude from selection) - run: rm -f "$(conda info --base)/envs/envs/nwb-guide/lib/python3.9/site-packages/sonpy/linux/sonpy.so" + run: rm -f "$(conda info --base)/envs/nwb-guide/lib/python3.9/site-packages/sonpy/linux/sonpy.so" - uses: apple-actions/import-codesign-certs@v2 with: From c126e880f453cf9fb9ed3f0c4ba3773b69344bb3 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Jun 2024 15:47:28 -0700 Subject: [PATCH 10/22] Use better alias --- .github/workflows/build_and_deploy_mac.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build_and_deploy_mac.yml b/.github/workflows/build_and_deploy_mac.yml index 484652872..50e21044b 100644 --- a/.github/workflows/build_and_deploy_mac.yml +++ b/.github/workflows/build_and_deploy_mac.yml @@ -40,7 +40,7 @@ jobs: run: npm install --verbose - name: Remove bad sonpy file (might make Spike2 format unusable on Mac - should exclude from selection) - run: rm -f "$(conda info --base)/envs/nwb-guide/lib/python3.9/site-packages/sonpy/linux/sonpy.so" + run: rm -f "$CONDA_PREFIX/lib/python3.9/site-packages/sonpy/linux/sonpy.so" - uses: apple-actions/import-codesign-certs@v2 with: From c495d3fbf1e23da5b24a7ac35caf3dd54a829c39 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Jun 2024 16:48:09 -0700 Subject: [PATCH 11/22] Use macos-13 instead of macos-latest --- .github/workflows/build_and_deploy_mac.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_and_deploy_mac.yml b/.github/workflows/build_and_deploy_mac.yml index 50e21044b..77166984a 100644 --- a/.github/workflows/build_and_deploy_mac.yml +++ b/.github/workflows/build_and_deploy_mac.yml @@ -1,13 +1,15 @@ name: Mac Release run-name: ${{ github.actor }} is building a MAC release for NWB GUIDE -# NOTE: even though the runner is an arm64 mac, both x64 and arm64 releases will be made +# NOTE: even though the runner is an x64 mac, both x64 and arm64 releases will be made on: workflow_dispatch: jobs: deploy-on-mac: - runs-on: macos-latest + runs-on: macos-13 + # not using macos-latest because that runner is an arm64 mac and there are issues + # building and deploying with sonpy (Spike2RecordingInterface) on arm64 mac defaults: run: From 18c6020d2b116e650da1fe4b982380992c7e51a8 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Jun 2024 18:50:12 -0700 Subject: [PATCH 12/22] Update build_and_deploy_mac.yml --- .github/workflows/build_and_deploy_mac.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/build_and_deploy_mac.yml b/.github/workflows/build_and_deploy_mac.yml index 77166984a..a0991c835 100644 --- a/.github/workflows/build_and_deploy_mac.yml +++ b/.github/workflows/build_and_deploy_mac.yml @@ -57,4 +57,7 @@ jobs: teamId: ${{ secrets.APPLE_TEAM_ID }} appleId: ${{ secrets.APPLE_ID }} appleIdPassword: ${{ secrets.APPLE_PASSWORD }} + # make build process extra verbose in case of failure + DEBUG: electron-builder + DEBUG_DMG: true run: npm run deploy:mac From 1b6b65a725591f7a0438f9b6e194afe4358d3db2 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Jun 2024 22:20:12 -0700 Subject: [PATCH 13/22] Fix errors with hidden import --- nwb-guide.spec | 1 - 1 file changed, 1 deletion(-) diff --git a/nwb-guide.spec b/nwb-guide.spec index d4868ee6d..9777f6913 100644 --- a/nwb-guide.spec +++ b/nwb-guide.spec @@ -16,7 +16,6 @@ hiddenimports = [ 'email_validator', *collect_submodules('scipy.special.cython_special'), *collect_submodules('scipy.special._cdflib'), - *os.path.join(os.path.dirname(scipy.__file__), '.libs') ] datas += collect_data_files('jsonschema_specifications') From 3114407ecedcfa869878530dc9c6c37aa0773f84 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Fri, 7 Jun 2024 22:21:02 -0700 Subject: [PATCH 14/22] Comment out debug mode for build on mac --- .github/workflows/build_and_deploy_mac.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build_and_deploy_mac.yml b/.github/workflows/build_and_deploy_mac.yml index a0991c835..84d571ebc 100644 --- a/.github/workflows/build_and_deploy_mac.yml +++ b/.github/workflows/build_and_deploy_mac.yml @@ -57,7 +57,7 @@ jobs: teamId: ${{ secrets.APPLE_TEAM_ID }} appleId: ${{ secrets.APPLE_ID }} appleIdPassword: ${{ secrets.APPLE_PASSWORD }} - # make build process extra verbose in case of failure - DEBUG: electron-builder - DEBUG_DMG: true + # uncomment below to make build process extra verbose in case of failure + # DEBUG: electron-builder + # DEBUG_DMG: true run: npm run deploy:mac From 527ad37981a3b9874dc4d433c9cd25d3c7147eb7 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Jun 2024 00:39:07 -0700 Subject: [PATCH 15/22] Testing mac build --- .github/workflows/build_and_deploy_mac.yml | 2 +- environments/environment-MAC-apple-silicon.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build_and_deploy_mac.yml b/.github/workflows/build_and_deploy_mac.yml index 84d571ebc..42bb45ac8 100644 --- a/.github/workflows/build_and_deploy_mac.yml +++ b/.github/workflows/build_and_deploy_mac.yml @@ -7,7 +7,7 @@ on: jobs: deploy-on-mac: - runs-on: macos-13 + runs-on: macos-latest # not using macos-latest because that runner is an arm64 mac and there are issues # building and deploying with sonpy (Spike2RecordingInterface) on arm64 mac diff --git a/environments/environment-MAC-apple-silicon.yml b/environments/environment-MAC-apple-silicon.yml index b8ef28700..84e29d4f3 100644 --- a/environments/environment-MAC-apple-silicon.yml +++ b/environments/environment-MAC-apple-silicon.yml @@ -21,7 +21,7 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 - - neuroconv[full] >= 0.4.10 + - neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@main#neuroconv[full] - dandi >= 0.60.0 - pytest == 7.4.0 - pytest-cov == 4.1.0 From 9aa0382ef5049cdd216494a865b2624680f1b8cf Mon Sep 17 00:00:00 2001 From: rly Date: Sat, 8 Jun 2024 01:51:06 -0700 Subject: [PATCH 16/22] Deploy on x64 mac, install neuroconv from github on arm64 mac --- .github/workflows/build_and_deploy_mac.yml | 10 ++++++---- .github/workflows/testing_flask_build_and_dist.yml | 2 +- environments/environment-MAC-apple-silicon.yml | 2 ++ 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build_and_deploy_mac.yml b/.github/workflows/build_and_deploy_mac.yml index 42bb45ac8..d53b6ec34 100644 --- a/.github/workflows/build_and_deploy_mac.yml +++ b/.github/workflows/build_and_deploy_mac.yml @@ -7,9 +7,11 @@ on: jobs: deploy-on-mac: - runs-on: macos-latest - # not using macos-latest because that runner is an arm64 mac and there are issues - # building and deploying with sonpy (Spike2RecordingInterface) on arm64 mac + runs-on: macos-13 + # NOTE: macos-latest is an arm64 mac, and the dependency sonpy (Spike2RecordingInterface) has a .so file that + # works only on mac x64. This causes issues building and deploying on mac arm64. So we use macos-13 (x64) + # to build and deploy both the x64 and arm64 versions of the app. + # NOTE: if changing this to macos-latest, make sure to use the apple-silicon conda environment. defaults: run: @@ -31,7 +33,7 @@ jobs: use-mamba: true - name: Create and activate environment - run: mamba env update --name nwb-guide --file environments/environment-MAC-apple-silicon.yml + run: mamba env update --name nwb-guide --file environments/environment-MAC-intel.yml - name: Use Node.js 20 uses: actions/setup-node@v4 diff --git a/.github/workflows/testing_flask_build_and_dist.yml b/.github/workflows/testing_flask_build_and_dist.yml index 8b3c1dc88..6c477d5e4 100644 --- a/.github/workflows/testing_flask_build_and_dist.yml +++ b/.github/workflows/testing_flask_build_and_dist.yml @@ -82,7 +82,7 @@ jobs: # Fix for macos build - remove bad sonpy file - if: matrix.os == 'macos-latest' || matrix.os == 'macos-13' - run: rm -f /Users/runner/miniconda3/envs/nwb-guide/lib/python3.9/site-packages/sonpy/linux/sonpy.so + run: rm -f "$CONDA_PREFIX/lib/python3.9/site-packages/sonpy/linux/sonpy.so" - name: Build PyFlask distribution run: npm run build:flask diff --git a/environments/environment-MAC-apple-silicon.yml b/environments/environment-MAC-apple-silicon.yml index 84e29d4f3..39daf9452 100644 --- a/environments/environment-MAC-apple-silicon.yml +++ b/environments/environment-MAC-apple-silicon.yml @@ -21,6 +21,8 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 + # NOTE: the neuroconv wheel on pypi includes sonpy which is not compatible with arm64, so build and install + # neuroconv from github, which will remove the sonpy dependency when building from mac arm64 - neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@main#neuroconv[full] - dandi >= 0.60.0 - pytest == 7.4.0 From 7f3ed45ad5c959ea9a44bb47f07fb2ef5bd646dc Mon Sep 17 00:00:00 2001 From: rly Date: Sat, 8 Jun 2024 01:59:09 -0700 Subject: [PATCH 17/22] Remove accidental submodule --- ephy_testing_data | 1 - 1 file changed, 1 deletion(-) delete mode 160000 ephy_testing_data diff --git a/ephy_testing_data b/ephy_testing_data deleted file mode 160000 index e8719e0eb..000000000 --- a/ephy_testing_data +++ /dev/null @@ -1 +0,0 @@ -Subproject commit e8719e0eb3c1494d0a032e603ee5c1f0921de881 From adb3805b378d01f3e9efeed63e0d275b46772ce0 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Jun 2024 09:32:35 -0700 Subject: [PATCH 18/22] Update environments/environment-Linux.yml Co-authored-by: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> --- environments/environment-Linux.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/environment-Linux.yml b/environments/environment-Linux.yml index 60af256ac..ea486f110 100644 --- a/environments/environment-Linux.yml +++ b/environments/environment-Linux.yml @@ -15,7 +15,7 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 - - neuroconv[full] >= 0.4.10 + - neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@main#neuroconv[full] - dandi >= 0.60.0 - pytest == 7.4.0 - pytest-cov == 4.1.0 From 870e9d96e8fe75b163c94d35c0302838238aed68 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Jun 2024 09:32:42 -0700 Subject: [PATCH 19/22] Update environments/environment-MAC-intel.yml Co-authored-by: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> --- environments/environment-MAC-intel.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/environment-MAC-intel.yml b/environments/environment-MAC-intel.yml index cdba2b573..3cb937767 100644 --- a/environments/environment-MAC-intel.yml +++ b/environments/environment-MAC-intel.yml @@ -18,7 +18,7 @@ dependencies: - flask == 2.3.2 - flask-cors == 4.0.0 - flask_restx == 1.1.0 - - neuroconv[full] >= 0.4.10 + - neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@main#neuroconv[full] - dandi >= 0.60.0 - pytest == 7.4.0 - pytest-cov == 4.1.0 From eb876b302336e432998483d9ca858cc375b6e729 Mon Sep 17 00:00:00 2001 From: Ryan Ly Date: Sat, 8 Jun 2024 09:32:46 -0700 Subject: [PATCH 20/22] Update environments/environment-Windows.yml Co-authored-by: Cody Baker <51133164+CodyCBakerPhD@users.noreply.github.com> --- environments/environment-Windows.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/environments/environment-Windows.yml b/environments/environment-Windows.yml index 0ae0bfaf0..1cb6f2e23 100644 --- a/environments/environment-Windows.yml +++ b/environments/environment-Windows.yml @@ -18,7 +18,7 @@ dependencies: - flask == 2.3.2 - flask-cors === 3.0.10 - flask_restx == 1.1.0 - - neuroconv[full] >= 0.4.10 + - neuroconv @ git+https://github.com/catalystneuro/neuroconv.git@main#neuroconv[full] - dandi >= 0.60.0 - pytest == 7.2.2 - pytest-cov == 4.1.0 From 960dccfdff40f3092a3ec95807fcdfcf1326548a Mon Sep 17 00:00:00 2001 From: rly Date: Sat, 8 Jun 2024 11:40:29 -0700 Subject: [PATCH 21/22] Add tzdata resources to pyinstaller --- nwb-guide.spec | 2 ++ 1 file changed, 2 insertions(+) diff --git a/nwb-guide.spec b/nwb-guide.spec index 9777f6913..f995ddae2 100644 --- a/nwb-guide.spec +++ b/nwb-guide.spec @@ -47,6 +47,8 @@ tmp_ret = collect_all('dlc2nwb') datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] tmp_ret = collect_all('ndx-pose') datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] +tmp_ret = collect_all('tzdata') +datas += tmp_ret[0]; binaries += tmp_ret[1]; hiddenimports += tmp_ret[2] block_cipher = None From d7f4edb11ae4cddc157e8ddf7c65011f84fa60cf Mon Sep 17 00:00:00 2001 From: rly Date: Mon, 10 Jun 2024 00:39:55 -0700 Subject: [PATCH 22/22] Suggest improvements to tutorial text --- docs/tutorials/dataset.rst | 12 ++-- docs/tutorials/multiple_sessions.rst | 37 ++++++----- docs/tutorials/single_session.rst | 92 ++++++++++++++++------------ 3 files changed, 81 insertions(+), 60 deletions(-) diff --git a/docs/tutorials/dataset.rst b/docs/tutorials/dataset.rst index b3a42a6a6..091f22684 100644 --- a/docs/tutorials/dataset.rst +++ b/docs/tutorials/dataset.rst @@ -1,8 +1,8 @@ Example Dataset Generation ========================== -Our tutorials focus on converting extracellular electrophysiology data in the SpikeGLX and Phy formats. -To get you started as quickly as possible, we’ve created a way to generate this Neuropixel-like dataset at the click of a button! +The NWB GUIDE tutorials focus on converting extracellular electrophysiology data in the SpikeGLX and Phy formats. +To get started as quickly as possible, you can use NWB GUIDE to generate a Neuropixels-like dataset at the click of a button! .. note:: The **SpikeGLX** data format stores electrophysiology recordings. @@ -17,7 +17,9 @@ Navigate to the **Settings** page using the button at the bottom of the main sid Press the Generate button on the Settings page to create the dataset. -The generated data will populate in the ``~/NWB_GUIDE/test_data`` directory, where ``~`` is the home directory of your system. This includes both a ``single_session_data`` and ``multi_session_dataset`` folder to accompany the following tutorials. +The dataset will be generated in a new ``~/NWB_GUIDE/test_data`` directory, where ``~`` is the `home directory `_ of your system. This includes both a ``single_session_data`` and ``multi_session_dataset`` folder to use in the following tutorials. + +The folder structure of the generated dataset is as follows: .. code-block:: bash @@ -52,6 +54,4 @@ The generated data will populate in the ``~/NWB_GUIDE/test_data`` directory, whe │ │ └── mouse2_Session2/ │ │ ... - - -Now you’re ready to start your first conversion using the NWB GUIDE! +Now you're ready to start your first conversion using the NWB GUIDE! diff --git a/docs/tutorials/multiple_sessions.rst b/docs/tutorials/multiple_sessions.rst index 5d798c49d..511454aa7 100644 --- a/docs/tutorials/multiple_sessions.rst +++ b/docs/tutorials/multiple_sessions.rst @@ -1,7 +1,7 @@ Managing Multiple Sessions ========================== -Now, let’s say that you’ve already run some of your experiments and now you want to convert them all at the same time. This is where a multi-session workflow will come in handy. +Now, let's imagine that you've already run multiple sessions of an experiment and now you want to convert them all to NWB at the same time. This is where a multi-session workflow will be useful. Begin a new conversion on the **Convert** page and provide a name for your pipeline. @@ -12,19 +12,23 @@ Update the Workflow page to indicate that you'll: #. Run on multiple sessions #. Locate the source data programmatically -#. Specify your dataset location ``~/NWB_GUIDE/test-data/multi_session_dataset``, where **~** is the home directory of your system. +#. Specify your dataset location ``~/NWB_GUIDE/test-data/multi_session_dataset``, where ``~`` is the home directory of your system. #. Skip dataset publication. +Leave the rest of the settings as they are. + .. figure:: ../assets/tutorials/multiple/workflow-page.png :align: center :alt: Workflow page with multiple sessions and locate data selected Data Formats ------------ + As before, specify **SpikeGLX Recording** and **Phy Sorting** as the data formats for this conversion. Locate Data ----------- + This page helps you automatically identify source data for multiple subjects / sessions as long as your files are organized consistently. .. figure:: ../assets/tutorials/multiple/pathexpansion-page.png @@ -34,33 +38,33 @@ This page helps you automatically identify source data for multiple subjects / s File locations are specified as **format strings** that define source data paths of each selected data format. .. note:: - Format strings are one component of NeuroConv's **path expansion language**, which has some nifty features for manually specifying complex paths. Complete documentation of the path expansion feature of NeuroConv can be found :path-expansion-guide:`here <>`. + Format strings are one component of NeuroConv's **path expansion language**, which has nifty features for manually specifying complex paths. Complete documentation of the path expansion feature can be found :path-expansion-guide:`here <>`. -While you don’t have to specify format strings for all of the pipeline’s data formats, we’re going to find all of our data here for this tutorial. You'll always be able to confirm or manually select the final paths on the Source Data page later in the workflow. +While you don't have to specify format strings for all of the pipeline's data formats, we're going to find all of our data here for this tutorial. You'll always be able to confirm or manually select the final paths on the Source Data page later in the workflow. Format strings are specified using two components: the **base directory**, which is the directory to search in, and the **format string path**, where the source data is within that directory. The base directory has been pre-populated based on your selection on the Workflow page. -To avoid specifying the format string path by hand, we can take advantage of **Autocomplete**. Click the **Autocomplete** button to open a pop-up form that will derive the format string from a single example path. +To avoid specifying the format string path by hand, click the **Autocomplete** button to open a pop-up form that will derive the format string from a single example path. .. figure:: ../assets/tutorials/multiple/pathexpansion-autocomplete-open.png :align: center :alt: Autocomplete modal on path expansion page -Provide an example source data path (for example, the ``multi_session_dataset/mouse1/mouse1_Session2/mouse1_Session2_phy`` file for Phy), followed by the Subject (``mouse1``) and Session ID (``Session1``) for this particular path. +Provide a source data path for Phy by either dragging and dropping the folder ``multi_session_dataset/mouse1/mouse1_Session2/mouse1_Session2_phy`` into the **Example Folder** box or clicking the box and selecting a folder. Then enter the Subject ID (``mouse1``) and Session ID (``Session1``) for this particular path. .. figure:: ../assets/tutorials/multiple/pathexpansion-autocomplete-filled.png :align: center :alt: Autocomplete modal completed -When you submit this form, you’ll notice that the Format String Path input has been auto-filled with a pattern for all the sessions. +When you submit this form, you'll notice that the Format String Path input has been auto-filled with a pattern for all of the sessions, and a list of all of the source data found is shown in the gray box. Confirm that this list contains all four Phy folders. .. figure:: ../assets/tutorials/multiple/pathexpansion-autocomplete-submitted.png :align: center :alt: Path expansion page with autocompleted format string -Repeat this process for SpikeGLX, where ``multi_session_dataset/mouse1/mouse1_Session2/mouse1_Session2_g0/mouse1_Session2_g0_imec0/mouse1_Session1_g0_t0.imec0.lf.bin`` will be the example source data path. +Repeat this process for SpikeGLX, where ``multi_session_dataset/mouse1/mouse1_Session2/mouse1_Session2_g0/mouse1_Session2_g0_imec0/mouse1_Session1_g0_t0.imec0.ap.bin`` will be the example source data path. .. figure:: ../assets/tutorials/multiple/pathexpansion-completed.png :align: center @@ -70,15 +74,16 @@ Advance to the next page when you have entered the data locations for both forma Subject Metadata ---------------- -On this page you’ll edit subject-level metadata across all related sessions. Unlike the previous few pages, you’ll notice that -Sex and Species both have gray asterisks next to their name; this means they are **loose requirements**, which aren’t currently required + +On this page, you can edit subject-level metadata that is the same for all sessions. Unlike the previous few pages, you'll notice that +Sex and Species both have gray asterisks next to their name; this means they are **loose requirements**, which aren't currently required but could later block progress if left unspecified. .. figure:: ../assets/tutorials/multiple/subject-page.png :align: center :alt: Blank subject table -In this case, we have two subjects with two sessions each. Let’s say that each of their sessions happened close enough in time that they can be identified using the same **age** entry: ``P29W`` for ``mouse1`` and ``P30W`` for ``mouse2``. +In this case, we have two subjects with two sessions each. Let's say that each of their sessions happened close enough in time that they can be identified using the same **age** entry: ``P29W`` for ``mouse1`` and ``P30W`` for ``mouse2``. We should also indicate the ``sex`` of each subject since this is a requirement for `uploading to the DANDI Archive `_. @@ -90,16 +95,18 @@ Advance to the next page when you have entered subject metadata for all subjects Source Data Information ----------------------- -Because we used the Locate Data page to programmatically identify our source data, this page should mostly be complete. You can use this opportunity to verify that the identified paths appear as expected for each session. + +Because we used the Locate Data page to programmatically identify our source data, this page should mostly be complete. Verify that the identified paths appear as expected for each session by clicking the "Phy Sorting" header to expand the section for Phy data and examining the "Folder Path" value. Do the same for the SpikeGLX data. .. figure:: ../assets/tutorials/multiple/sourcedata-page.png :align: center :alt: Complete source data forms -One notable difference between this and the single-session workflow, however, is that the next few pages will allow you to toggle between sessions using the **session manager** sidebar on the left. +One notable difference between this and the single-session workflow is that the next few pages will allow you to toggle between sessions using the **session manager** sidebar on the left. Try this out. Under "Sessions", click "sub-mouse2" and "ses-Session1" to locate the source data for a different session from this subject. Session Metadata ---------------- + Aside from the session manager, the file metadata page in the multi-session workflow is nearly identical to the single-session version. .. figure:: ../assets/tutorials/multiple/metadata-nwbfile.png @@ -108,7 +115,7 @@ Aside from the session manager, the file metadata page in the multi-session work A complete General Metadata form -Acting as default metadata, the information supplied on the subject metadata page has pre-filled the Subject metadata for each session. +The information supplied on the Subject Metadata page has been used to fill in the Subject metadata for each session. .. figure:: ../assets/tutorials/multiple/metadata-subject-complete.png :align: center @@ -116,7 +123,7 @@ Acting as default metadata, the information supplied on the subject metadata pag A complete Subject metadata form -You'll notice that there's an **Edit Default Metadata** button at the top of the page. This feature allows you to specify a single default value for each property that is expected to be the same across all sessions. **Use this button to fill in general metadata for your sessions**, which will save you time and effort while ensuring your files still follow Best Practices. +You'll notice that there's an **Edit Default Metadata** button at the top of the page. This feature allows you to specify a single default value for each property that is expected to be the same across all sessions. **Use this button to fill in general metadata for your sessions**, such as the Institution, which will save you time and effort while ensuring your files still follow Best Practices. Finish the rest of the workflow as you would for a single session by completing a full conversion after you review the preview files with the NWB Inspector and Neurosift. diff --git a/docs/tutorials/single_session.rst b/docs/tutorials/single_session.rst index c1931803b..06f69e9ec 100644 --- a/docs/tutorials/single_session.rst +++ b/docs/tutorials/single_session.rst @@ -1,9 +1,9 @@ Converting a Single Session =========================== -Let's imagine you've just completed an experimental session and you’d like to convert your data to NWB right away. +Let's imagine you've just completed an experimental session and you'd like to convert your data to NWB right away. -Upon launching the GUIDE, you'll begin on the **Convert** page. If you’re opening the application for the first time, there should be no pipelines listed on this page. +Upon launching the GUIDE, you'll begin on the **Convert** page. If you're opening the application for the first time, there should be no pipelines listed on this page. .. figure:: ../assets/tutorials/home-page.png :align: center @@ -17,49 +17,53 @@ Project Structure Project Setup ^^^^^^^^^^^^^ -The Project Setup page will have you define two pieces of information about your pipeline: the **name** and, optionally, the **output location** for your NWB files. We will not be specifying an output location in this tutorial—so your NWB files will be saved to the default location. +To begin, set the **name** for this tutorial pipeline to "Single Session Workflow". The red asterisk next to the "Name" field indicates that this is a required field. -You’ll notice that the name property has a red asterisk next to it, which identifies it as a required property. +You can also set the **output location** for your NWB files, but for this tutorial, leave it as the default location. -.. figure:: ../assets/tutorials/single/info-page.png +.. figure:: ../assets/tutorials/single/valid-name.png :align: center - :alt: Project Setup page with no name (invalid) + :alt: Project Setup page with valid name +Press "Next" to continue. -After specifying a unique project name, the colored background and error message will disappear, allowing you to advance to the next page. +Pipeline Workflow +^^^^^^^^^^^^^^^^^ -.. figure:: ../assets/tutorials/single/valid-name.png - :align: center - :alt: Project Setup page with valid name +On this page, you'll specify the type of **workflow** you'd like to follow for this conversion pipeline. -Workflow Configuration -^^^^^^^^^^^^^^^^^^^^^^ -On this page, you’ll specify the type of **workflow** you’d like to follow for this conversion pipeline. +First, you can set the time zone for the data. If the data was collected in a different time zone than your local time zone, you can search for and select that time zone. For this tutorial, leave it as the default time zone. -Since this is a single-session workflow, you’ll need to specify a **Subject ID** and **Session ID** to identify the data you’ll be converting. +For the next question "Will this pipeline be run on multiple sessions?", keep the "No" button selected. + +For a single-session workflow, you'll need to specify a **Subject ID** and **Session ID** to identify the data you'll be converting. Enter "sub1" for the Subject ID and "ses1" for the Session ID. .. figure:: ../assets/tutorials/single/workflow-page.png :align: center :alt: Workflow page -Additionally, we’ll turn off the option to upload to the DANDI Archive and approach this in a later tutorial. +For this tutorial, turn off the option to publish the data to the DANDI Archive. This will be covered in a later tutorial. + +For the last question "Would you like to customize low-level data storage options?", keep the "No" button selected. + +Press "Next" to continue. Data Formats ^^^^^^^^^^^^ -Next, you’ll specify the data formats you’re working with on the Data Formats page. The GUIDE supports 40+ total neurophysiology formats. A full registry of available formats is available :doc:`here `. + +Next, you'll specify the data formats you're working with. The GUIDE supports 40+ neurophysiology formats. A full registry of available formats is available :doc:`here `. .. figure:: ../assets/tutorials/single/formats-page.png :align: center - :alt: Date Formats page + :alt: Data Formats page -The tutorial we're working with uses the SpikeGLX and Phy formats, a common output for Neuropixels recordings and subsequent spike sorting. To specify that your pipeline will handle these files, you’ll press the “Add Format” button. +This tutorial uses the SpikeGLX format, a common output for Neuropixels recordings, and the Phy format, a common output of spike sorting and curation. To specify that your pipeline will handle these files, press the “Add Format” button. .. figure:: ../assets/tutorials/single/format-options.png :align: center :alt: Format pop-up on the Data Formats page -Then, select the relevant formats—in this case, **SpikeGLX Recording** and **Phy Sorting**—from the pop-up list. Use the search bar to filter for the format you need. - +Then, select the relevant formats—in this case, **SpikeGLX Recording** (not SpikeGLX Converter) and **Phy Sorting**—from the pop-up list. Use the search bar to filter for the format you need. .. figure:: ../assets/tutorials/single/search-behavior.png :align: center @@ -67,12 +71,11 @@ Then, select the relevant formats—in this case, **SpikeGLX Recording** and **P The selected formats will then display above the button. - .. figure:: ../assets/tutorials/single/interface-added.png :align: center :alt: Data Formats page with SpikeGLX Recording added to the list -Advance to the next page when you have **SpikeGLX Recording** and **Phy Sorting** selected. +Press "Next" after you have selected **SpikeGLX Recording** and **Phy Sorting**. .. figure:: ../assets/tutorials/single/all-interfaces-added.png :align: center @@ -83,31 +86,39 @@ Data Entry Source Data Information ^^^^^^^^^^^^^^^^^^^^^^^ + On this page, specify the **phy** folder and **.ap.bin** (SpikeGLX) file so that the GUIDE can find this source data to complete the conversion. -As discussed in the :doc:`Dataset Generation ` tutorial, these can be found in the ``~/NWB_GUIDE/test-data/single_session_data`` directory, where **~** is the home directory of your system. +As discussed in the :doc:`Dataset Generation ` tutorial, these can be found in the ``~/NWB_GUIDE/test-data/single_session_data`` directory, where ``~`` is the home directory of your system. If you just generated the dataset, this folder may still be open in your file navigator. -Within each data format accordion, you'll find a file selector that will accept relevant source data. You can either click this to navigate to your files or drag-and-drop into the GUIDE from your file navigator. +Click the **Phy Sorting** header to expand the section. Under "Folder Path", you can either drag-and-drop the **phy** folder into the box from your file navigator or click the box to navigate to and select the **phy** folder. .. figure:: ../assets/tutorials/single/sourcedata-page-specified.png :align: center :alt: Source Data page with source locations specified -Advance to the next page to extract metadata from the source data. +Next, click the **SpikeGLX Recording** header to expand the section. Under "File Path", you can either click the box to navigate to the **.ap.bin** file or drag-and-drop the **.ap.bin** file into the box from your file navigator. The **.ap.bin** file is located in the ``~/NWB_GUIDE/test-data/single_session_data/spikeglx/Session1_g0/Session1_g0_imec0`` folder. + +Press "Next" to extract metadata from these source data files and folders. Session Metadata ^^^^^^^^^^^^^^^^ -The file metadata page is a great opportunity to add rich annotations to the file, which will be read by anyone reusing your data in the future! -The Session Start Time in the **General Metadata** section is already specified because this field was automatically extracted from the SpikeGLX source data. +The file metadata page is a great opportunity to add rich annotations to the NWB file, which will be read by anyone reusing your data in the future! + +Click the **General Metadata** header to expand the section. + +The Session Start Time is already specified because this field was automatically extracted from the SpikeGLX source data. .. figure:: ../assets/tutorials/single/metadata-nwbfile.png :align: center :alt: Metadata page with invalid Subject information -While the **General Metadata** section is complete, take some time to fill out additional information such as the **Institutional Info** box and the **Experimenter** field. +The **General Metadata** header is underlined yellow because all required fields have been set, but some recommended fields are missing values, such as **Institution** and **Experiment Description**. These fields are not required, but they can be useful for future users of the data. + +Take a minute to fill out some of these fields, such as the fields in the **Institutional Info** box and the **Experimenter** field. -We also need to add the **Subject** information—as noted by the red accents around that item. Let’s say that our subject is a male mouse with an age of P25W, which represents 25 weeks old. +The **Subject** header is underlined red, indicating that required fields are missing values. Click the **Subject** header to expand the section. The subject's **sex**, **species**, and **age** are missing. Select "Male" for **sex**, "Mus musculus - House mouse" for **species**, and "P25W", which represents 25 weeks old, for **age**. .. figure:: ../assets/tutorials/single/metadata-subject-complete.png :align: center @@ -115,15 +126,13 @@ We also need to add the **Subject** information—as noted by the red accents ar The status of the Subject information will update in real-time as you fill out the form. - -This dataset will also have **Ecephys** metadata extracted from the SpikeGLX source data, though we aren't interested in modifying this information at the moment. +Click the **Ecephys** header to expand the section. Ecephys is short-hand for "extracellular electrophysiology". This section contains metadata about the probes and electrodes used. For the test SpikeGLX data, these metadata have been extracted from the SpikeGLX source data. You do not need to modify it in this tutorial. .. figure:: ../assets/tutorials/single/metadata-ecephys.png :align: center :alt: Ecephys metadata extracted from the SpikeGLX source data - -Let's leave this as-is and advance to the next page. This will trigger the conversion of your source data into a preview NWB file. +Press "Next" to trigger the conversion of a small part of your source data into a preview NWB file. File Conversion --------------- @@ -131,19 +140,22 @@ File Conversion Inspector Report ^^^^^^^^^^^^^^^^ -The Inspector Report page allows you to validate the preview file against the latest Best Practices and make suggestions to improve the content or representations. +This page shows the output of the NWB Inspector tool, which validated your preview NWB file against the latest NWB Best Practices. Red boxes represent errors, and yellow boxes represent best practice warnings that could be ignored. .. figure:: ../assets/tutorials/single/inspect-page.png :align: center :alt: NWB Inspector report -Advance to the next page when you are satisfied with the Inspector Report. +When you are satisfied with the Inspector Report, press "Next". Conversion Preview ^^^^^^^^^^^^^^^^^^ -On the Conversion Preview, Neurosift allows you to explore the structure of the NWB file and ensure the packaged data matches your expectations. -In particular, take a look at the lefthand metadata table and check that the information provided on the previous pages is present in the NWB file. +This page uses the Neurosift tool to allow you to explore the structure of your NWB file so that you can ensure the packaged data matches your expectations. + +In particular, take a look at the lefthand metadata table and check that the information you provided on the previous pages is present in the NWB file. + +Expand the yellow "acquisition" section and select "ElectricalSeriesAP" to view a plot of the test SpikeGLX data. .. figure:: ../assets/tutorials/single/preview-page.png :align: center @@ -151,12 +163,14 @@ In particular, take a look at the lefthand metadata table and check that the inf Neurosift can be useful for many other exploration tasks—but this will not be covered in this tutorial. -Advancing from this page will trigger the full conversion of your data to the NWB format, a process that may take some time depending on the dataset size. +The NWB file shown here is just a preview NWB file that was created using only a small part of the source data. Press "Run Conversion" to trigger the full conversion of your data to the NWB format. This conversion that may take some time depending on the dataset size. Conversion Review ^^^^^^^^^^^^^^^^^ -Congratulations on finishing your first conversion of neurophysiology files using the NWB GUIDE! +Congratulations on finishing your first conversion of neurophysiology data to NWB using the NWB GUIDE! Click the file name ``sub-sub1_ses-ses1.nwb`` to view the location of the NWB file in your file navigator. + +If you had other data to add to the NWB file that are in formats not supported by NWB GUIDE, you can use PyNWB (Python) or MatNWB (MATLAB) to open the NWB file and add the data programmatically. See the documentation links at the bottom of the "Conversion Review" page for tutorials and more information. .. figure:: ../assets/tutorials/single/conversion-results-page.png :align: center