Skip to content

Commit

Permalink
Merge pull request datajoint#71 from kabilar/main
Browse files Browse the repository at this point in the history
Add `black` code formatting
  • Loading branch information
CBroz1 authored Jul 7, 2022
2 parents 2dcde24 + 54e2dc6 commit 020ce9a
Show file tree
Hide file tree
Showing 25 changed files with 1,419 additions and 808 deletions.
3 changes: 2 additions & 1 deletion notebooks/00-data-download-optional.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -27,6 +27,7 @@
"metadata": {},
"outputs": [],
"source": [
"%%bash\n",
"pip install git+https://github.com/datajoint/djarchive-client.git"
]
},
Expand Down Expand Up @@ -183,7 +184,7 @@
],
"metadata": {
"jupytext": {
"formats": "ipynb,py"
"formats": "ipynb,py_scripts//py"
},
"kernelspec": {
"display_name": "ephys_workflow_runner",
Expand Down
20 changes: 14 additions & 6 deletions notebooks/py_scripts/00-data-download-optional.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,12 @@
# ---
# jupyter:
# jupytext:
# formats: ipynb,py
# formats: ipynb,py_scripts//py
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.5'
# jupytext_version: 1.13.7
# jupytext_version: 1.14.0
# kernelspec:
# display_name: ephys_workflow_runner
# language: python
Expand All @@ -19,12 +19,15 @@

# The example dataset was hosted on djarchive, an AWS storage. We provide a client package to download the data.[djarchive-client](https://github.com/datajoint/djarchive-client), which could be installed with pip:

pip install git+https://github.com/datajoint/djarchive-client.git
# + language="bash"
# pip install git+https://github.com/datajoint/djarchive-client.git
# -

# ## Download ephys test datasets using `djarchive-client`

import os
import djarchive_client

client = djarchive_client.client()

# To browse the datasets that are available in djarchive:
Expand All @@ -37,17 +40,22 @@

# To download the dataset, let's prepare a root directory, for example in `/tmp`:

os.mkdir('/tmp/test_data')
os.mkdir("/tmp/test_data")

# Get the dataset revision with the current version of the workflow:

from workflow_array_ephys import version
revision = version.__version__.replace('.', '_')

revision = version.__version__.replace(".", "_")
revision

# Then run download for a given set and the revision:

client.download('workflow-array-ephys-test-set', target_directory='/tmp/test_data', revision=revision)
client.download(
"workflow-array-ephys-test-set",
target_directory="/tmp/test_data",
revision=revision,
)

# ## Directory organization
# After downloading, the directory will be organized as follows:
Expand Down
26 changes: 15 additions & 11 deletions notebooks/py_scripts/01-configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,20 +26,24 @@
# -

import os

# change to the upper level folder
if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')
assert os.path.basename(os.getcwd())=='workflow-array-ephys', ("Please move to the "
+ "workflow directory")
if os.path.basename(os.getcwd()) == "notebooks":
os.chdir("..")
assert os.path.basename(os.getcwd()) == "workflow-array-ephys", (
"Please move to the " + "workflow directory"
)
import datajoint as dj

# ## Setup - Credentials
#
# Now let's set up the host, user and password in the `dj.config` global variable

import getpass
dj.config['database.host'] = '{YOUR_HOST}'
dj.config['database.user'] = '{YOUR_USERNAME}'
dj.config['database.password'] = getpass.getpass() # enter the password securily

dj.config["database.host"] = "{YOUR_HOST}"
dj.config["database.user"] = "{YOUR_USERNAME}"
dj.config["database.password"] = getpass.getpass() # enter the password securily

# You should be able to connect to the database at this stage.

Expand All @@ -55,11 +59,11 @@
#
# The prefix could be configurated as follows in `dj.config`:

dj.config['custom'] = {'database.prefix': 'neuro_'}
dj.config["custom"] = {"database.prefix": "neuro_"}

# ### Root directories for raw/processed data
#
# `ephys_root_data_dir` field indicates the root directory for
# `ephys_root_data_dir` field indicates the root directory for
# + The **ephys raw data** from SpikeGLX or OpenEphys, including `*{.ap,lf}.{bin,meta}`
# + The **clustering results** from kilosort2 (e.g. `spike_{times,clusters}.npy`
#
Expand All @@ -77,9 +81,9 @@
# ```

# If there is only one root path.
dj.config['custom']['ephys_root_data_dir'] = '/tmp/test_data'
dj.config["custom"]["ephys_root_data_dir"] = "/tmp/test_data"
# If there are multiple possible root paths:
dj.config['custom']['ephys_root_data_dir'] = ['/tmp/test_data1', '/tmp/test_data2']
dj.config["custom"]["ephys_root_data_dir"] = ["/tmp/test_data1", "/tmp/test_data2"]

dj.config

Expand All @@ -92,7 +96,7 @@
#
# `element-array-ephys` offers 3 different schemas: `acute`, `chronic`, and `no-curation`. For more information about each, please visit the [electrophysiology description page](https://elements.datajoint.org/description/array_ephys/). This decision should be made before first activating the schema. Note: only `no-curation` is supported for export to NWB directly from the Element.

dj.config['custom']['ephys_mode']='no-curation' # or acute or chronic
dj.config["custom"]["ephys_mode"] = "no-curation" # or acute or chronic

# ## Save configuration
#
Expand Down
26 changes: 14 additions & 12 deletions notebooks/py_scripts/02-workflow-structure-optional.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,9 @@
# To load the local configuration, we will change the directory to the package root.

import os
if os.path.basename(os.getcwd())=='notebooks': os.chdir('..')

if os.path.basename(os.getcwd()) == "notebooks":
os.chdir("..")

# ## Schemas and tables

Expand Down Expand Up @@ -57,24 +59,24 @@
# + `dj.list_schemas()`: list all schemas a user could access.
dj.list_schemas()

# + `dj.Diagram()`: plot tables and dependencies.
# + `dj.Diagram()`: plot tables and dependencies.

# + `dj.Diagram()`: plot tables and dependencies
# plot diagram for all tables in a schema
dj.Diagram(ephys)
# -

# **Table tiers**:
# **Table tiers**:
#
# Manual table: green box, manually inserted table, expect new entries daily, e.g. Subject, ProbeInsertion.
# Lookup table: gray box, pre inserted table, commonly used for general facts or parameters. e.g. Strain, ClusteringMethod, ClusteringParamSet.
# Imported table: blue oval, auto-processing table, the processing depends on the importing of external files. e.g. process of Clustering requires output files from kilosort2.
# Computed table: red circle, auto-processing table, the processing does not depend on files external to the database, commonly used for
# Manual table: green box, manually inserted table, expect new entries daily, e.g. Subject, ProbeInsertion.
# Lookup table: gray box, pre inserted table, commonly used for general facts or parameters. e.g. Strain, ClusteringMethod, ClusteringParamSet.
# Imported table: blue oval, auto-processing table, the processing depends on the importing of external files. e.g. process of Clustering requires output files from kilosort2.
# Computed table: red circle, auto-processing table, the processing does not depend on files external to the database, commonly used for
# Part table: plain text, as an appendix to the master table, all the part entries of a given master entry represent a intact set of the master entry. e.g. Unit of a CuratedClustering.
#
# **Dependencies**:
# **Dependencies**:
#
# One-to-one primary: thick solid line, share the exact same primary key, meaning the child table inherits all the primary key fields from the parent table as its own primary key.
# One-to-one primary: thick solid line, share the exact same primary key, meaning the child table inherits all the primary key fields from the parent table as its own primary key.
# One-to-many primary: thin solid line, inherit the primary key from the parent table, but have additional field(s) as part of the primary key as well
# secondary dependency: dashed line, the child table inherits the primary key fields from parent table as its own secondary attribute.

Expand All @@ -95,7 +97,7 @@
# + `heading`: [markdown]
# # + `describe()`: show table definition with foreign key references.
# -
ephys.EphysRecording.describe();
ephys.EphysRecording.describe()

# + `heading`: show attribute definitions regardless of foreign key references

Expand All @@ -115,14 +117,14 @@
dj.Diagram(subject)

# + [subject](https://github.com/datajoint/element-animal): contains the basic information of subject, including Strain, Line, Subject, Zygosity, and SubjectDeath information.
subject.Subject.describe();
subject.Subject.describe()

# + [`session`](https://github.com/datajoint/element-session): General information of experimental sessions.

dj.Diagram(session)

# + [session](https://github.com/datajoint/element-session): experimental session information
session.Session.describe();
session.Session.describe()

# + [`ephys`](https://github.com/datajoint/element-array-ephys): Neuropixel based probe and ephys information

Expand Down
Loading

0 comments on commit 020ce9a

Please sign in to comment.