diff --git a/.gitignore b/.gitignore
index 00cd680..8dae850 100644
--- a/.gitignore
+++ b/.gitignore
@@ -26,3 +26,5 @@ yarn-debug.log*
yarn-error.log*
yarn.lock
node_modules
+
+.vercel
diff --git a/package.json b/package.json
index 36aae81..ac002b5 100644
--- a/package.json
+++ b/package.json
@@ -16,6 +16,8 @@
"@types/react-dom": "^17.0.0",
"@types/react-rangeslider": "^2.2.3",
"@types/react-router-dom": "^5.1.7",
+ "@types/react-router-hash-link": "^2.4.0",
+ "@types/react-scrollspy": "^3.3.3",
"d3": "6.7.0",
"enzyme": "^3.11.0",
"enzyme-adapter-react-16": "^1.15.6",
@@ -25,7 +27,9 @@
"react-markdown": "^6.0.2",
"react-rangeslider": "^2.2.0",
"react-router-dom": "^5.2.0",
+ "react-router-hash-link": "^2.4.3",
"react-scripts": "4.0.3",
+ "react-scrollspy": "^3.4.3",
"rehype-katex": "^5.0.0",
"remark-gfm": "^1.0.0",
"remark-math": "^4.0.0",
diff --git a/src/App.tsx b/src/App.tsx
index 994c043..b490273 100644
--- a/src/App.tsx
+++ b/src/App.tsx
@@ -1,58 +1,84 @@
+import React from "react"
+import { Card, Container } from "react-bootstrap"
import { BrowserRouter } from "react-router-dom"
import "./App.css"
-import ConfigPanelWrapper from "./components/ConfigPanelWrapper"
+import Algorithms, { AlgorithmEntry } from "./components/Algorithms/Algorithms"
+// import ConfigPanelWrapper from "./components/ConfigPanelWrapper"
import ExpandingHeatmapTable from "./components/ExpandingHeatmapTable"
-import MetricsDescription from "./components/MetricsDescription/MetricsDescription"
+import Preloader from "./components/Shared/Preloader"
+// import MetricsDescription from "./components/MetricsDescription/MetricsDescription"
import "./index.css"
+import algosJson from "./sampleData/algos.json"
import {
- expandingHeatmapTableSampleHeader as header,
- expandingHeatmapTableSampleRows as tableRows
+ expandingHeatmapTableSampleHeader as header,
+ expandingHeatmapTableSampleRows as tableRows
} from "./sampleData/expandingHeatmapTableTestData"
+
// import { basicConfig } from "./sampleData/HeatmapConfigTestData"
function App() {
// const columnConfig = {...basicConfig, useColumnFormat: true}
// const cpuConfig = {...basicConfig, format: 'cpu' as FormatType, showCPU: true }
return (
-
-
-
-
- {
- alert(`Selected cell ${cell.id}`)
- }}
- />
-
+
+
+
+
+
+ {
+ alert(`Selected cell ${cell.id}`)
+ }}
+ />
+
+
+
learn react or the default test will be sad at you
+
-
-
-
Whole Thing
-
+
+ {/*
+
+
+
+ */}
+
+
+
This is what a fetch failure should look like:
+
+
+
+ This is the Algorithms section:
+
+
+ {/*
+
+
Row configuration
+
+
+
+
*/}
+ {/*
*/}
- {/*
-
-
Row configuration
-
-
-
-
*/}
-
learn react or the default test will be sad at you
-
-
-
-
+
)
}
diff --git a/src/components/Algorithms/AlgorithmCard.tsx b/src/components/Algorithms/AlgorithmCard.tsx
new file mode 100644
index 0000000..14f3a75
--- /dev/null
+++ b/src/components/Algorithms/AlgorithmCard.tsx
@@ -0,0 +1,75 @@
+// Replaces former ListCard
+
+import { FunctionComponent } from "react"
+import { Col } from "react-bootstrap"
+import ReactMarkdown from 'react-markdown'
+import { isEmpty } from "../util"
+import { AlgorithmEntry } from "./Algorithms"
+
+const parseDescription = (rawMarkdown: string) => {
+ // Assume markdown is a single string, with a name or something, followed
+ // by a heading ## Description, followed by some more markdown,
+ // ending in a ## References section we don't care about.
+ // Return only the part between the ## Description header and the start of the
+ // ## References header.
+ // This is brittle but works well enough for now.
+ const description = rawMarkdown.split("Description")[1]
+ const before_references = description.split("## References")[0]
+ return before_references
+ }
+
+const AlgorithmCard: FunctionComponent
= (Props: AlgorithmEntry) => {
+ return (
+
+
+
+ {isEmpty(Props) ? //TODO: is this still needed/possible?
+
...
:
+
+
+
+
+
+ {Props.label}
+
+
+ {/* Probably don't need the following check */}
+ {Props.dockerfile ? (
+
+ View Docker
+
+ ) : (
+
+ {Props.env_name} {/*SHOULD BE UNREACHABLE?*/}
+
+ )}
+
+
+ By {Props.authors}
+
+
+
+
+
}
+
+
+ )
+}
+
+export default AlgorithmCard
diff --git a/src/components/Algorithms/AlgorithmOverview.md b/src/components/Algorithms/AlgorithmOverview.md
new file mode 100644
index 0000000..1b77e34
--- /dev/null
+++ b/src/components/Algorithms/AlgorithmOverview.md
@@ -0,0 +1,20 @@
+Generally speaking, a spike sorting algorithm takes in an unfiltered
+multi-channel timeseries (aka, recording) and a dictionary of algorithm
+parameters and outputs a list of firing times and associated integer
+unit labels. This page lists the spike sorting codes we run, as well as
+some that have yet to be incorporated. Most of the codes were developed
+at other institutions; two of them are in-house.
+
+SpikeForest uses Python wrappers to implement the algorithms. Links to those
+may be found in the "Wrapper" links for each entry. For the non-MATLAB
+sorters, we use Singularity containers (similar to Docker containers)
+in order to ensure a reproducible compute environment. In those cases,
+links to the Docker files (environment prescriptions) are provided. We
+almost always use the default parameters of the wrappers, but some may
+be overridden in the
+[analysis configuration files](https://github.com/flatironinstitute/spikeforest/tree/master/working/main_analysis).
+
+Wrappers were created in collaboration with the
+[SpikeInterface](https://github.com/SpikeInterface/)
+project. The goal is ultimately to merge these with the corresponding wrappers
+in SpikeInterface/SpikeToolkit.
diff --git a/src/components/Algorithms/Algorithms.tsx b/src/components/Algorithms/Algorithms.tsx
new file mode 100644
index 0000000..b2d1837
--- /dev/null
+++ b/src/components/Algorithms/Algorithms.tsx
@@ -0,0 +1,354 @@
+import React, { FunctionComponent, useEffect, useState } from "react"
+import { Card, Col, Container, Row } from "react-bootstrap"
+import ReactMarkdown from "react-markdown"
+import Preloader from "../Shared/Preloader"
+import Sidebar from "../Shared/Sidebar"
+import { toTitleCase } from "../util"
+import AlgorithmCard from "./AlgorithmCard"
+import overviewMd from './AlgorithmOverview.md'
+
+// TODO: url stype for dockerfile, wrapper, website, markdown_link?
+export interface AlgorithmEntry {
+ authors: string,
+ dockerfile: string,
+ env_name?: string,
+ // MORE TBD--TODO: Is it a good idea to list these explicitly? How likely is the set to expand?
+ label: "IRONCLUST" | "KILOSORT" | "TRIDESCLOUS" | "HERDING_SPIKES_2" | "JRCLUST" |
+ "SPYKING_CIRCUS" | "KILOSORT2" | "KLUSTA" | "Waveclus" | "YASS" | "MOUNTAINSORT4",
+ markdown: string
+ markdown_link: string,
+ website: string, // link to github repo for the sorter
+ wrapper: string, // wrapper function in SpikeForest for the sorter.
+ wrapper_link?: string // populated with link to SF github for the wrapper.
+}
+
+interface AlgorithmsPageProps {
+ algorithms: AlgorithmEntry[],
+ overviewMd?: string
+}
+
+const sortAlgorithmEntries = (entries: AlgorithmEntry[]): AlgorithmEntry[] => {
+ const sorted = entries.sort((a, b) => {
+ // entries with wrappers sort before those without
+ if (a.wrapper && !b.wrapper) return -1
+ if (!a.wrapper && b.wrapper) return 1
+ // If both entries have wrappers, sort according to label (capitalized)
+ const aLabel = a.label.toUpperCase()
+ const bLabel = b.label.toUpperCase()
+ return aLabel < bLabel ? -1 : aLabel > bLabel ? 1 : 0
+ })
+ return sorted
+}
+
+
+// TODO: handleClick handler??
+const Algorithms: FunctionComponent = (Props: AlgorithmsPageProps) => {
+ const [md, setMd] = useState('')
+ useEffect(() => {
+ fetch(overviewMd)
+ .then((res) => res.text())
+ .then((text) => setMd(text))
+ })
+ return (
+
+ {Props.algorithms.length === 0 || md === '' ? (
+
+ ) : (
+
+ )}
+
+ )
+}
+
+const EmptyAlgorithms: FunctionComponent = (Props: AlgorithmsPageProps) => {
+ return (
+
+ )
+}
+
+const ExtantAlgorithms: FunctionComponent = (Props: AlgorithmsPageProps) => {
+ const sidebarTitle = "Algorithms In Use"
+ const sidebarItems = Props.algorithms.map(item => ({
+ name: toTitleCase(item.label.replace(/_/g, " ").toLowerCase()),
+ value: item.label
+ }))
+
+ return (
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ {/*TODO: This doesn't set rel="noreferrer noopen" properly.
+ There is a remark extension https://github.com/remarkjs/remark-external-links
+ that ought to do it, but it doesn't look like it's been fully supported yet,
+ and the usage is non-obvious anyway. Leaving the linktarget setting for now,
+ but we might just tell people to right-click if they want a new window.
+ */}
+
+
+
+
+
+
+
+ {Props.algorithms.map(item => (
+
+ ))}
+
+
+
+
+
+ )
+}
+
+
+// class Algorithms extends Component {
+// constructor(props: any) {
+// super(props);
+// this.state = {
+// rows: []
+// };
+// // this.handleClick = this.basename.bind(this); // QUERY: Why this??
+// }
+
+// // I don't think we need these? QUERY
+// componentDidMount() {
+// if (this.props.algorithms && this.props.algorithms.length) {
+// this.filterActives();
+// }
+// }
+
+// componentDidUpdate(prevProps) {
+// if (this.props.algorithms !== prevProps.algorithms) {
+// this.filterActives();
+// }
+// }
+
+// basename(path) {
+// return path.split("/").reverse()[0];
+// }
+
+ // parseDescription(markdown) {
+ // let par1 = markdown.split("Description")[1];
+ // let useable = par1.split("## References")[0];
+ // return useable;
+ // }
+
+ // sortRows(rows) {
+ // let sorted = rows.sort((a, b) => {
+ // if (a.wrapper && !b.wrapper) return -1;
+ // if (!a.wrapper && b.wrapper) return 1;
+ // let textA = a.raw_label.toUpperCase();
+ // let textB = b.raw_label.toUpperCase();
+ // return textA < textB ? -1 : textA > textB ? 1 : 0;
+ // });
+ // return sorted;
+ // }
+
+ // Curiously, it's named 'filter' but it never applies any filtering.
+ // QUERY: THIS
+ // filterActives() {
+ // let rows = this.props.algorithms.map(alg => {
+ // let row = {
+ // raw_label: alg.label,
+ // label: alg.label,
+ // processor_name: alg.processor_name,
+ // authors: alg.authors,
+ // notes: alg.notes,
+ // environment: "",
+ // wrapper: "",
+ // markdown: "",
+ // markdown_link: "",
+ // website: "/",
+ // wrapper_link: "/",
+ // env_name: "tbd",
+ // env_link: "/"
+ // };
+ // if (alg.dockerfile) { // always true
+ // row.environment = `${this.basename(alg.dockerfile)}`;
+ // row.env_name = "Docker";
+ // row.env_link = alg.dockerfile;
+ // } else if (alg.environment) { // thus can't happen
+ // row.environment = `${alg.environment}`;
+ // row.env_name = alg.environment;
+ // }
+ // if (alg.wrapper) {
+ // // row.wrapper is never used
+ // row.wrapper = `${this.basename(
+ // alg.wrapper
+ // )}`;
+ // row.wrapper_link = alg.wrapper;
+ // }
+ // if (alg.markdown_link) {
+ // row.markdown_link = `${this.basename(alg.markdown_link)}`;
+ // }
+ // if (alg.markdown) {
+ // row.markdown = this.parseDescription(alg.markdown);
+ // }
+ // if (alg.website) {
+ // // NOTHING EVER ACTUALLY USES THIS
+ // row.label = `${alg.label}`;
+ // row.website = alg.website;
+ // }
+ // return row;
+ // });
+ // let sorted = this.sortRows(rows);
+ // this.setState({ rows: sorted });
+ // }
+
+// render() {
+// // let loading = isEmpty(this.state.rows);
+// // let listCards;
+// // if (this.state.rows) {
+// // listCards = this.state.rows.map((row, index) => (
+// //
+// // ));
+// // }
+// // let sidebarItems = this.state.rows.map(row => ({
+// // name: toTitleCase(row.raw_label.replace(/_/g, " ").toLowerCase()),
+// // value: row.raw_label
+// // }));
+// // sidebarItems.unshift({ name: "Overview", value: "overview" });
+// return (
+// foo
)
+// {/* {loading ? (
+//
+//
+//
+//
+//
+//
+//
+// ) : (
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+//
+// {" "}
+// Generally speaking, a spike sorting algorithm
+// takes in an unfiltered multi-channel timeseries
+// (aka, recording) and a dictionary of algorithm
+// parameters and outputs a list of firing times and
+// associated integer unit labels. This page lists
+// the spike sorting codes we run, as well as some
+// that have yet to be incorporated. Most of the
+// codes were developed at other institutions; two of
+// them are in-house.
+//
+//
+// {" "}
+// SpikeForest uses Python wrappers to implement the
+// algorithms. Links to those may be found in the
+// "Wrapper" links above. For the non-MATLAB sorters,
+// we use singularity containers (similar to docker
+// containers) in order to ensure a reproducible
+// compute environment. In those cases, links to the
+// docker files (environment presciptions) are
+// provided. We almost always use the default
+// parameters of the wrappers, but some may be
+// overriden in the{" "}
+//
+// analysis configuration files
+//
+// .
+//
+//
+// Wrappers were created in collaboration with the{" "}
+//
+// SpikeInterface
+// {" "}
+// project. The goal is to ultimately merge these
+// with the corresponding wrappers in
+// SpikeInterface/SpikeToolkit.
+//
+//
+//
+//
+//
+//
+//
+// {listCards}
+//
+//
+//
+//
+//
+// )}
+//
+// ) */}
+// }
+// }
+
+export default Algorithms
diff --git a/src/components/MetricsDescription/MetricsDescription.md b/src/components/MetricsDescription/MetricsDescription.md
index 7c07d5d..e8b9185 100644
--- a/src/components/MetricsDescription/MetricsDescription.md
+++ b/src/components/MetricsDescription/MetricsDescription.md
@@ -124,4 +124,4 @@ create rapid decay in impulse response in the time domain.
\[1\] James Jaeyoon Jun, Catalin Mitelut, Chongxi Lai, Sergey Gratiy, Costas Anastassiou
and Timothy D Harris,
*[Real-time spike sorting platform for high-density extracellular probes with ground truth validation and drift correction](https://www.biorxiv.org/content/10.1101/101030v2)*.
-bioRxiv 101030 (2017), doi: [https://doi.org/10.1101/101030](https://doi.org/10.1101/101030).
\ No newline at end of file
+bioRxiv 101030 (2017), doi: [https://doi.org/10.1101/101030](https://doi.org/10.1101/101030).
diff --git a/src/components/MetricsDescription/MetricsDescription.tsx b/src/components/MetricsDescription/MetricsDescription.tsx
index 1dd0aed..181e052 100644
--- a/src/components/MetricsDescription/MetricsDescription.tsx
+++ b/src/components/MetricsDescription/MetricsDescription.tsx
@@ -8,10 +8,7 @@ const MetricsDescription = () => {
useEffect(() => {
fetch(mdPath)
.then((res) => res.text())
- .then((text) => {
- console.log(`Got text ${text}`)
- setMd(text)
- })
+ .then((text) => setMd(text))
})
return(
diff --git a/src/components/Shared/Preloader.tsx b/src/components/Shared/Preloader.tsx
new file mode 100644
index 0000000..bae674c
--- /dev/null
+++ b/src/components/Shared/Preloader.tsx
@@ -0,0 +1,41 @@
+import React, { FunctionComponent } from "react";
+import { Container } from "react-bootstrap";
+import logo from "./logo-animation.gif";
+import "./preloader.css";
+
+interface PreloaderProps {
+ message?: string,
+ fetchFailure?: boolean
+}
+
+const Preloader: FunctionComponent
= (Props: PreloaderProps) => {
+ const copy = Props.message
+ ? Props.message
+ : Props.fetchFailure
+ ? "Unable to fetch data. Please refresh the page and try again."
+ : "Loading"
+ const subcopy = (!Props.message && Props.fetchFailure)
+ ? (
+
+
+
+ If you see this message repeatedly, please check your
+ internet connection, as you may have insufficient
+ bandwidth to load this site.
+
+
+ ) : ( // QUERY: Why this?
+ ...
+ )
+ return (
+
+
+ {copy}
+ {subcopy}
+
+
+
+ )
+}
+
+export default Preloader
\ No newline at end of file
diff --git a/src/components/Shared/Sidebar.tsx b/src/components/Shared/Sidebar.tsx
new file mode 100644
index 0000000..946ad5e
--- /dev/null
+++ b/src/components/Shared/Sidebar.tsx
@@ -0,0 +1,41 @@
+import React, { FunctionComponent } from "react"
+import { HashLink } from "react-router-hash-link"
+import Scrollspy from "react-scrollspy"
+
+interface SidebarProps {
+ listTitle: string,
+ listItems: { name: string, value: string }[]
+}
+
+const Sidebar: FunctionComponent = (Props: SidebarProps) => {
+ const scrollToTop = () => {
+ window.scrollTo(0, 0);
+ }
+
+ const path = window.location.pathname + "#"
+
+ return (
+
+
+
+ {Props.listTitle}
+
+ item.value)}
+ currentClassName="sidebar-selected"
+ className="sidebar-list"
+ >
+ {Props.listItems.map((item, index) => (
+
+
+ {item.name}
+
+
+ ))}
+
+
+
+ )
+}
+
+export default Sidebar
diff --git a/src/components/Shared/logo-animation.gif b/src/components/Shared/logo-animation.gif
new file mode 100644
index 0000000..9a17e51
Binary files /dev/null and b/src/components/Shared/logo-animation.gif differ
diff --git a/src/components/Shared/preloader.css b/src/components/Shared/preloader.css
new file mode 100644
index 0000000..11a46fc
--- /dev/null
+++ b/src/components/Shared/preloader.css
@@ -0,0 +1,60 @@
+/* Preloader
+–––––––––––––––––––––––––––––––––––––––––––––––––– */
+
+.preloader {
+ display: flex;
+ flex-direction: column;
+ align-items: center;
+ justify-content: center;
+ font-size: calc(10px + 2vmin);
+ padding: 5.2rem 1rem;
+ }
+ .preloader__image {
+ /* animation: logo-spin infinite 15s linear; */
+ height: 10vmin;
+ }
+ @keyframes logo-spin {
+ from {
+ transform: rotate(0deg);
+ }
+ to {
+ transform: rotate(360deg);
+ }
+ }
+ .preloader__header {
+ text-align: center;
+ margin: 0 auto;
+ margin-bottom: 3rem;
+ font-size: 3rem;
+ font-weight: 300;
+ font-family: var(--sans);
+ }
+
+ .preloader__header span {
+ animation-name: blink;
+ animation-duration: 1.4s;
+ animation-iteration-count: infinite;
+ animation-fill-mode: both;
+ }
+ .preloader__header span:nth-child(2) {
+ animation-delay: 0.2s;
+ }
+ .preloader__header span:nth-child(3) {
+ animation-delay: 0.4s;
+ }
+ @keyframes blink {
+ 0% {
+ opacity: 0.2;
+ }
+ 20% {
+ opacity: 1;
+ }
+ 100% {
+ opacity: 0.2;
+ }
+ }
+
+ .preloader-card {
+ min-height: 80vh;
+ }
+
\ No newline at end of file
diff --git a/src/components/util.ts b/src/components/util.ts
index 734e4c1..f0917b5 100644
--- a/src/components/util.ts
+++ b/src/components/util.ts
@@ -5,3 +5,20 @@ export const toTitleCase = (text: string) => {
return t.charAt(0).toUpperCase() + t.substr(1).toLowerCase()
})
}
+
+// NB this is a variation
+export const isEmpty = (obj: any): boolean => Object.entries(obj).length === 0
+
+// Original version:
+// export const isEmpty = (obj: any): boolean => {
+// for (const key in obj) {
+// if (obj.hasOwnProperty(key)) return false
+// }
+// return true
+// }
+
+// export const target_new_window = {
+// link: (props: any) => {
+// return `${props.children}`
+// }
+// }
\ No newline at end of file
diff --git a/src/sampleData/algos.json b/src/sampleData/algos.json
new file mode 100644
index 0000000..2122024
--- /dev/null
+++ b/src/sampleData/algos.json
@@ -0,0 +1,130 @@
+{
+ "algorithms": [
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b8",
+ "label": "IRONCLUST",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/ironclust/container/Dockerfile",
+ "environment": "MATLAB",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/ironclust/_ironclust.py",
+ "website": "https://github.com/jamesjun/ironclust",
+ "authors": "James J. Jun",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_ironclust.md",
+ "markdown": "_\n# IRONCLUST\n\n## Description\n\nSpike sorting software developed at Flatiron Institute, based on JRCLUST (Janelia Rocket Clust).\n\n## Installation notes\n\n**Prerequisites:**\n\n* MATLAB with the following\n - Statistics and Machine Learning Toolbox\n - Parallel Computing Toolbox\n\n## References",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b7",
+ "label": "KILOSORT",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/kilosort/container/Dockerfile",
+ "environment": "MATLAB",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/kilosort/_kilosort.py",
+ "website": "https://github.com/cortex-lab/KiloSort",
+ "authors": "Marius Pachitariu",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_kilosort.md",
+ "markdown": "_\n# KILOSORT\n\n## Description\n\n*From the KiloSort website*: Fast spike sorting for hundreds of channels. Implements an integrated template matching framework for detecting and clustering spikes from multi-channel electrophysiological recordings.\n\n## References\n\n[1] Pachitariu, Marius, et al. \"Fast and accurate spike sorting of high-channel count probes with KiloSort.\" Advances in Neural Information Processing Systems. 2016.\n\n[2] Pachitariu, Marius, et al. \"Kilosort: realtime spike-sorting for extracellular electrophysiology with hundreds of channels.\" BioRxiv (2016): 061481.",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b5",
+ "label": "TRIDESCLOUS",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/tridesclous/container/Dockerfile",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/tridesclous/_tridesclous.py",
+ "website": "https://github.com/tridesclous/tridesclous",
+ "authors": "Samuel Garcia and Christophe Pouzat",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_tridesclous.md",
+ "markdown": "_\n# TRIDESCLOUS\n\n## Description\n\nFrom the Tridesclous website: The primary goal of tridesclous is to provide a toolkit to teach good practices in spike sorting techniques.\nThis tools is now mature and can be used for experimental data.\n\n## References",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b9",
+ "label": "HERDING_SPIKES_2",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/herdingspikes/container/Dockerfile",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/herdingspikes2/_herdingspikes2.py",
+ "website": "https://github.com/mhhennig/HS2",
+ "authors": "Matthias Hennig, Jano Horvath, Cole Hurwitz, Oliver Muthmann, Albert Puente Encinas, Martino Sorbaro, Cesar Juarez Ramirez, and Raimon Wintzer",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_herding_spikes_2.md",
+ "markdown": "# HERDING_SPIKES_2\n\n## Description\n\n*From the HerdingSpikes2 website*: This software provides functionality for high density electrophysiology. Detection, localisation and clustering of spike data from dense multielectrode arrays.\n\n## References\n[1] J.-O. Muthmann, H. Amin, E. Sernagor, A. Maccione, D. Panas, L. Berdondini, U.S. Bhalla, M.H. Hennig MH (2015). [Spike detection for large neural populations using high density multielectrode arrays](http://journal.frontiersin.org/article/10.3389/fninf.2015.00028/abstract). Front. Neuroinform. 9:28. doi: 10.3389/fninf.2015.00028.\n\n[2] G. Hilgen, M. Sorbaro, S. Pirmoradian, J.-O. Muthmann, I. Kepiro, S. Ullo, C. Juarez Ramirez, A. Puente Encinas, A. Maccione, L. Berdondini, V. Murino, D. Sona, F. Cella Zanacchi, E. Sernagor, M.H. Hennig (2016). [Unsupervised spike sorting for large scale, high density multielectrode arrays.](http://www.cell.com/cell-reports/fulltext/S2211-1247(17)30236-X) Cell Reports 18, 2521–2532. bioRxiv: .",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b6",
+ "label": "JRCLUST",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/jrclust/container/Dockerfile",
+ "environment": "MATLAB",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/jrclust/_jrclust.py",
+ "website": "https://github.com/JaneliaSciComp/JRCLUST",
+ "authors": "James Jun and Alan Liddell",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_jrclust.md",
+ "markdown": "# JRCLUST\n\n## Description\n\nFrom the website: JRCLUST is a scalable and customizable package for spike sorting on high-density silicon probes. It is written in MATLAB and CUDA. JRCLUST was originally developed by James Jun and is currently maintained by Vidrio Technologies.\n\n## References\n[1] Jun, JJ et al., Real-time spike sorting platform for high-density extracellular probes with ground-truth validation and drift correction. bioRxiv, 101030; doi: https://doi.org/10.1101/101030",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b2",
+ "label": "SPYKING_CIRCUS",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/spykingcircus/container/Dockerfile",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/spykingcircus/spykingcircus.py",
+ "website": "https://spyking-circus.readthedocs.io/en/latest/",
+ "authors": "Pierre Yger and Olivier Marre",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_spyking_circus.md",
+ "markdown": "_\n# SPYKING_CIRCUS\n\n## Description\n\nFrom the SpyKING CIRCUS website: The SpyKING CIRCUS is a massively parallel code to perform semi automatic spike sorting on large extra-cellular recordings. Using a smart clustering and a greedy template matching approach, the code can solve the problem of overlapping spikes, and has been tested both for in vitro and in vivo data, from tens of channels to up to 4225 channels.\n\n## References\n\nYger P., Spampinato, G.L.B, Esposito E., Lefebvre B., Deny S., Gardella C., Stimberg M., Jetter F., Zeck G. Picaud S., Duebel J., Marre O., A spike sorting toolbox for up to thousands of electrodes validated with ground truth recordings in vitro and in vivo, eLife 2018;7:e34518",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b0",
+ "label": "KILOSORT2",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/kilosort2/container/Dockerfile",
+ "environment": "MATLAB",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/kilosort2/_kilosort2.py",
+ "website": "https://github.com/MouseLand/Kilosort2",
+ "authors": "Marius Pachitariu",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_kilosort2.md",
+ "markdown": "_\n# KILOSORT2\n\n## Description\n\n*From the KiloSort2 website*: Kilosort2: automated spike sorting with drift tracking and template matching on GPUs\n\nA Matlab package for spike sorting electrophysiological data up to 1024 channels. In many cases, and especially for Neuropixels probes, the automated output of Kilosort2 requires minimal manual curation.\n\n## Installation notes\n\nRequires CUDA toolkit to be installed\n\n## References",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b1",
+ "label": "KLUSTA",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/klusta/container/Dockerfile",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/klusta/_klusta.py",
+ "website": "https://github.com/kwikteam/klusta",
+ "authors": "Cyrille Rossant, Shabnam Kadir, Dan Goodman, Max Hunter, and Kenneth Harris",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_klusta.md",
+ "markdown": "_\n# KLUSTA\n\n## Description\n*From the Klusta website*: Klusta: automatic spike sorting up to 64 channel. \n\n[**klusta**]\n(https://github.com/kwikteam/klusta) is an open source package for automatic spike sorting of multielectrode neurophysiological recordings made with probes containing up to a few dozens of sites.\n\n**klusta** implements the following features:\n\n* **Kwik**: An HDF5-based file format that stores the results of a spike sorting session.\n* **Spike detection** (also known as SpikeDetekt): an algorithm designed for probes containing tens of channels, based on a flood-fill algorithm in the adjacency graph formed by the recording sites in the probe.\n* **Automatic clustering** (also known as Masked KlustaKwik): an automatic clustering algorithm designed for high-dimensional structured datasets.\n\n\n\n## References\n[1] Rossant, Cyrille, et al. (2016). Spike sorting for large, dense electrode arrays. Nature Neuroscience, 19, 634–641.\n[2] Kadir, S.N., Goodman, D.F. & Harris, K.D. (2014). High-dimensional cluster analysis with the masked EM algorithm. Neural Comput. 26, 2379–2394.",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b3",
+ "label": "Waveclus",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/waveclus/container/Dockerfile",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/waveclus/_waveclus.py",
+ "website": "https://github.com/csn-le/wave_clus/wiki",
+ "authors": "F. J. Chaure, H. G. Rey and R. Quian Quiroga",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_waveclus.md",
+ "markdown": "_\n# Waveclus\n\n## Description\n\nWave_clus is a fast and unsupervised algorithm for spike detection and sorting that runs under Windows, Mac or Linux operating systems.\n\nTo install, download this repository into a folder. In MATLAB (R2009b or higher) go to Set Path and add the directory wave_clus with subfolders to the MATLAB path.\n\n\n## References\n\n[1] A novel and fully automatic spike sorting implementation with variable number of features. F. J. Chaure, H. G. Rey and R. Quian Quiroga. Journal of Neurophysiology; 2018. https://doi.org/10.1152/jn.00339.2018\n\n[2] Quian Quiroga R, Nadasdy Z, Ben-Shaul Y. Unsupervised spike detection and sorting with wavelets and superparamagnetic clustering. Neural Comput 16: 1661–1687, 2004. doi:10.1162/089976604774201631.",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7ba",
+ "label": "YASS",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/yass/container/Dockerfile",
+ "environment": "Python",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikesorters/yass/yass.py",
+ "website": "https://yass.readthedocs.io/en/latest/",
+ "authors": "Peter Lee, Eduardo Blancas, Nishchal Dethe, Shenghao Wu, Hooshmand Shokri, Calvin Tong, Catalin Mitelut, Liam Paninski",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_yass.md",
+ "markdown": "_\n# YASS\n\n## Description\n\nFrom the YASS bioRxiv paper [2]: This manuscript describes an efficient, reliable pipeline for spike sorting on dense multi-electrode arrays (MEAs), where neural signals appear across many electrodes and spike sorting currently represents a major computational bottleneck. We present several new techniques that make dense MEA spike sorting more robust and scalable. Our pipeline is based on an efficient multi-stage “triage-then-cluster-then-pursuit” approach that initially extracts only clean, high-quality waveforms from the electrophysiological time series by temporarily skipping noisy or “collided” events (representing two neurons firing synchronously). This is accomplished by developing a neural network detection method followed by efficient outlier triaging. The clean waveforms are then used to infer the set of neural spike waveform templates through nonparametric Bayesian clustering. Our clustering approach adapts a “coreset” approach for data reduction and uses efficient inference methods in a Dirichlet process mixture model framework to dramatically improve the scalability and reliability of the entire pipeline. The “triaged” waveforms are then finally recovered with matching-pursuit deconvolution techniques. The proposed methods improve on the state-of-the-art in terms of accuracy and stability on both real and biophysically-realistic simulated MEA data. Furthermore, the proposed pipeline is efficient, learning templates and clustering much faster than real-time for a ≃ 500-electrode dataset, using primarily a single CPU core.\n\nNote: The YASS algorithm is not currently being tested on the SpikeForest site while the authors are working on releasing a newer version.\n\n## References\n\n[1] Lee, Jin Hyung, et al. \"Yass: Yet another spike sorter.\" Advances in Neural Information Processing Systems. 2017.\n\n[2] Paninski, Liam, and John Cunningham. \"Neural data science: accelerating the experiment-analysis-theory cycle in large-scale neuroscience.\" bioRxiv (2017): 196949.",
+ "__v": 0
+ },
+ {
+ "_id": "5e795ef5d4f81c62f9a1a7b4",
+ "label": "MOUNTAINSORT4",
+ "dockerfile": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/mountainsort4/container/Dockerfile",
+ "wrapper": "https://github.com/flatironinstitute/spikeforest2/blob/master/spikeforest2/sorters/mountainsort4/_mountainsort4.py",
+ "website": "https://github.com/flatironinstitute/mountainsort_examples/blob/master/README.md",
+ "authors": "Jeremy Magland, Alex Barnett, Jason Chung, Loren Frank, and Leslie Greengard",
+ "markdown_link": "https://github.com/flatironinstitute/spikeforest/blob/master/spikeforest/spikeforestsorters/descriptions/alg_mountainsort4.md",
+ "markdown": "_\n# MOUNTAINSORT4\n\n## Description\n\nMountainSort is spike sorting algorithm that uses a novel density-based clustering method called ISO-SPLIT.\n\n## References\n\nChung, J. E., Magland, J. F., Barnett, A. H., Tolosa, V. M., Tooker, A. C., Lee, K. Y., ... & Greengard, L. F. (2017). A fully automated approach to spike sorting. Neuron, 95(6), 1381-1394.",
+ "__v": 0
+ }
+ ]
+}