diff --git a/package.json b/package.json index ac002b5..e9abf18 100644 --- a/package.json +++ b/package.json @@ -15,6 +15,7 @@ "@types/react-bootstrap": "^0.32.25", "@types/react-dom": "^17.0.0", "@types/react-rangeslider": "^2.2.3", + "@types/react-router-bootstrap": "^0.24.5", "@types/react-router-dom": "^5.1.7", "@types/react-router-hash-link": "^2.4.0", "@types/react-scrollspy": "^3.3.3", @@ -25,7 +26,9 @@ "react-bootstrap": "^1.5.2", "react-dom": "^17.0.1", "react-markdown": "^6.0.2", + "react-modal-image": "^2.5.0", "react-rangeslider": "^2.2.0", + "react-router-bootstrap": "^0.25.0", "react-router-dom": "^5.2.0", "react-router-hash-link": "^2.4.3", "react-scripts": "4.0.3", diff --git a/src/App.tsx b/src/App.tsx index b490273..6d20cb3 100644 --- a/src/App.tsx +++ b/src/App.tsx @@ -2,9 +2,12 @@ import React from "react" import { Card, Container } from "react-bootstrap" import { BrowserRouter } from "react-router-dom" import "./App.css" +import About from "./components/About/About" import Algorithms, { AlgorithmEntry } from "./components/Algorithms/Algorithms" // import ConfigPanelWrapper from "./components/ConfigPanelWrapper" import ExpandingHeatmapTable from "./components/ExpandingHeatmapTable" +import Footer from './components/Footer/Footer' +import Header from './components/Header/Header' import Preloader from "./components/Shared/Preloader" // import MetricsDescription from "./components/MetricsDescription/MetricsDescription" import "./index.css" @@ -14,53 +17,53 @@ import { expandingHeatmapTableSampleRows as tableRows } from "./sampleData/expandingHeatmapTableTestData" + // import { basicConfig } from "./sampleData/HeatmapConfigTestData" function App() { // const columnConfig = {...basicConfig, useColumnFormat: true} // const cpuConfig = {...basicConfig, format: 'cpu' as FormatType, showCPU: true } return ( - +
+
- - { - alert(`Selected cell ${cell.id}`) - }} - /> - + { + alert(`Selected cell ${cell.id}`) + }} + />
learn react or the default test will be sad at you
-
{/*


*/} - -
-

This is what a fetch failure should look like:

-
- - - - - - - -
+
+ +
+
+

This is what a fetch failure should look like:

+
+ + + + + + +
-
-

This is the Algorithms section:

- - +
+
+

This is the Algorithms section:

+ {/*

Row configuration

@@ -77,8 +80,9 @@ function App() {
*/} {/* */} +
- +
) } diff --git a/src/components/About/About.tsx b/src/components/About/About.tsx new file mode 100644 index 0000000..5527d2d --- /dev/null +++ b/src/components/About/About.tsx @@ -0,0 +1,49 @@ +import { FunctionComponent, useEffect, useState } from "react" +import { Container, Row } from "react-bootstrap" +import ReactMarkdown from 'react-markdown' +import { Copy, PageCopy, PageSidebar, parseMarkdownToContentCards, Section } from '../Shared/MarkdownHandling' +import aboutMd from './AboutContent.md' +import AboutGraphicalAbstracts from "./AboutGraphicalAbstracts" + +const About = () => { + const [copyMd, setCopyMd] = useState("") + useEffect(() => { + fetch(aboutMd) + .then((res) => res.text()) + .then((text) => setCopyMd(text)) + }) + + const content: Copy = parseMarkdownToContentCards(copyMd) + // This is hacky, ought to find a better way to handle it in future + if (content.Sections[0]) content.Sections[0].Flavor = "special" + + return( +
+ + + + + + +
+ ) +} + +const CardHook = (Props: Section) => { + return ( + Props.Flavor === 'special' + ? + : undefined + ) +} + +const OverviewCardWithImages: FunctionComponent
= (Props: Section) => { + return ( + + + + + ) +} + +export default About; diff --git a/src/components/About/AboutContent.md b/src/components/About/AboutContent.md new file mode 100644 index 0000000..7ddc4e4 --- /dev/null +++ b/src/components/About/AboutContent.md @@ -0,0 +1,290 @@ +# About SpikeForest + +## Overview + +Extracellular electrical recording is a popular +and affordable method to measure the +simultaneous spiking activity of a large neural +population. The key computational extraction of +distinct neuronal units and firing times is +known as spike sorting. However, there is a +growing number of automated spike sorting codes, +and much uncertainty and folklore about their +accuracy in various experimental conditions. +Several papers report comparisons on a +case-by-case basis, but there is a lack of +standardized measures and validation data. +Furthermore, there is a potential for bias, such +as sub-optimal tuning of competing algorithms, +and a focus on one brain region or probe type. +Without a fair and transparent comparison, +genuine progress in the field remains difficult. + +Aiming to address this need, SpikeForest is a +reproducible, continuously updating platform +which benchmarks the performance of spike +sorting codes across a large curated database of +electrophysiological recordings with ground +truth. It consists of this website for +presenting our up-to-date findings, a +[Python package](https://github.com/flatironinstitute/spikeforest2) +which contains the tools for running the +SpikeForest analysis, and an expanding +collection of electrophysiology recordings with +ground-truth spiking information. + +We host a variety of experimental paired ground +truth recordings from the community and also +many in silico synthetic recordings. Each sorter +is run on all recordings and the resulting +accuracies for the ground truth units are +updated on a daily basis as needed. Approximate +CPU/GPU run times are also reported. + +Visitors may browse all datasets, algorithms, +sorting results, and comparisons, and inspect +the source code used to generate these data. Use +the links on the navbar to learn about +[recordings](/recordings), +[algorithms](/algorithms), and +[metric definitions](/metrics). + +## Citation + +For a comprehensive discussion of SpikeForest, +please consult the following eLife paper: + +Magland, J., Jun, J. J., Lovero, E., Morley, A. J., +Hurwitz, C. L., Buccino, A. P., Garcia, S., Barnett, +A. H. (2020). SpikeForest, reproducible web-facing +ground-truth validation of automated neural spike +sorters. *eLife, 9.* [doi:10.7554/elife.55167](https://elifesciences.org/articles/55167) + +## Running locally (Reproducing) + +* For information on reproducing the results reported on the webpage or running +the SpikeForest-wrapped sorters on your own data, see +the [SpikeForest2 Python package](https://github.com/flatironinstitute/spikeforest2). + +## Feedback + +There are several ways to provide feedback, report +problems, or contact us with questions: + +* You may email us using the [contact form](/contact). +* If you prefer a more graphical/interactive way to +label problems or questions on a particular page, +click on the **Contact Us** tab in the +bottom-right corner of any page. + +## Credits + +SpikeForest is a project of the +[Flatiron Institute](https://flatironinstitute.org) +involving the +[Center for Computational Mathematics (CCM)](https://www.simonsfoundation.org/flatiron/center-for-computational-mathematics/) +and the +[Scientific Computing Core (SCC)](https://www.simonsfoundation.org/flatiron/scientific-computing-core/). + +* Jeremy Magland, CCM (chief infrastructure +developer, backend analysis framework) +* James Jun, CCM (infrastructure, testing, recording +preparation, and algorithm integration) +* Elizabeth Lovero, SCC (site design, web development, visualizations) +* Leslie Greengard, CCM (concept, planning) +* Alex Barnett, CCM (concept, planning, site design, testing) + +  + +Other key contributors to the project include: + +* Alex Morley - Mozilla Fellow, MRC Brain Network +Dynamics Unit, University of Oxford, UK +(infrastructure, testing) +* Witold Wysota - Warsaw, Poland (infrastructure, testing) + +  + +File format conversions and some computations make +use of [SpikeInterface](https://github.com/SpikeInterface/). +Over time we will expand our integration with this +project. It is under development by the following +individuals: + +* Cole Hurwitz - The Institute for Adaptive and +Neural Computation (ANC), University of Edinburgh, +Edinburgh, Scotland +* Alessio Paolo Buccino - Center for Inegrative +Neurolasticity (CINPLA), Department of +Biosciences, Physics, and Informatics, University +of Oslo, Oslo, Norway +* Matthias Hennig - The Institute for Adaptive and +Neural Computation (ANC), University of Edinburgh, +Edinburgh, Scotland +* Samuel Garcia - Centre de Recherche en +Neuroscience de Lyon (CRNL), Lyon, France +* Jeremy Magland - Center for Computational +Mathematics, Flatiron Institute, New York, NY + +  + +We are grateful for many collaborators at other +institutions for their vital help and supply of +recordings: + +* Frank Lab, UCSF + * Jason Chung (UCSF) + * Loren Frank (UCSF) +* Allen Institute for Brian Science + * Catalin Mitelut (Columbia) + * Sergey Gratiy (AIBS) + * Costas Anastassiou (AIBS) +* Buzsaki Lab (NYU) + * Dan English (Virginia Tech) + * Anton Sirota (LMU Munich) + * György Buzsáki (NYU) +* Kampff Lab, UCL + * André Marques-Smith (UCL) + * Joana P. Neto (UCL) + * Adam R. Kampff (UCL) +* Boyden Lab, MIT + * Ed Boyden (MIT) + * Brian D. Allen (MIT) + * Caroline Moore-Kochlacs (MIT) +* Institute de la Vision, CNRS + * Pierre Yger (CNRS) + * Giulia LB Spampinato (CNRS) + * Olivier Marre (CNRS) +* HHMI - Janelia Research Campus (hybrid drift simulation) + * Jennifer Colonell + * Marius Pachitariu + +## References + +[1] F. J. Chaure, H. G. Rey, and R. Quian Quiroga. A +novel and fully automatic spike-sorting +implementation with variable number of +features. Journal of neurophysiology, +120(4): 1859–1871, 2018. +[10.1152/jn.00339.2018](https://doi.org/10.1152/jn.00339.2018) + +[2] J. E. Chung, J. F. Magland, A. H. Barnett, et +al. A fully automated approach to +spikesorting. Neuron, 95(6): 1381–1394, 2017. +[10.1016/j.neuron.2017.08.030](https://doi.org/10.1016/j.neuron.2017.08.030) + +[3] S. Garcia and C. Pouzat. +[Tridesclous](https://github.com/tridesclous/tridesclous). +https://github.com/tridesclous/tridesclous. + +[4] G. Hilgen, M. Sorbaro, S. Pirmoradian, J.-O. +Muthmann, I. E. Kepiro, S. Ullo, C. J. Ramirez, A. P. +Encinas, A. Maccione, L. Berdondini, et al. +Unsupervised spike sort-ing for large-scale, +high-density multielectrode arrays. Cell Reports, +18(10): 2521–2532, 2017. +[10.1016/j.celrep.2017.02.038](https://doi.org/10.1016/j.celrep.2017.02.038) + +[5] J. J. Jun, C. Mitelut, C. Lai, S. Gratiy, C. +Anastassiou, and T. D. Harris. Real-time spike +sorting platform for high-density extracellular +probes with ground-truthvalidation and drift +correction. bioRxiv, page 101030, 2017. +[10.1101/101030](https://doi.org/10.1101/101030) + +[6] J. J. Jun, N. A. Steinmetz, J. H. Siegle, D. J. +Denman, M. Bauza, B. Barbarits, A. K.Lee, C. A. +Anastassiou, A. Andrei, Ç. Aydın, et al. Fully +integrated silicon probes for high-density recording +of neural activity. Nature, 551(7679): 232-236, 2017. +[10.1038/nature24636](https://doi.org/10.1038/nature24636) + +[7] M. Pachitariu, N. A. Steinmetz, and J. Colonell. +[Kilosort2](https://github.com/MouseLand/Kilosort2). +https://github.com/MouseLand/Kilosort2. + +[8] R. Q. Quiroga, Z. Nadasdy, and Y. Ben-Shaul. +Unsupervised spike detection and sorting with +wavelets and superparamagnetic clustering. Neural +Computation, 16(8): 1661–1687, 2004. +[10.1162/089976604774201631](https://doi.org/10.1162/089976604774201631) + +[9] P. Yger, G. L. Spampinato, E. Esposito, B. +Lefebvre, S. Deny, C. Gardella, M. Stim-berg, F. +Jetter, G. Zeck, S. Picaud, et al. A spike sorting +toolbox for up to thousands of electrodes validated +with ground truth recordings in vitro and in +vivo. Elife, 7: e34518, 2018. +[10.7554/eLife.34518](https://doi.org/10.7554/eLife.34518) + +[10] M. J. Zaki and W. Meira Jr. *Data mining and +analysis: fundamental concepts and algorithms.* +Cambridge University Press, New York, NY, 2014.12 +[ISBN: 978-0521766333](https://dataminingbook.info/first_edition/) + +## Background and related projects (Background) + +This project is the fruition of a long-term goal +within the spike sorting effort of CCM, starting in +2014 at what was then SCDA (the Simons Center for +Data Analysis). The original spike sorting effort +comprised Jeremy Magland, Alex Barnett, and Leslie +Greengard, and collaborators in Loren Frank's lab. + +Design principles were outlined in our +[white paper](https://github.com/flatironinstitute/spikesortercomparison) +of May 2018. This was inspired in part by a +community discussion on validation at the Janelia +spike sorting workshop of 3/22/18. + +We were influenced (in terms of concept, features, +and site design) by many neuroscience and +non-neuroscience algorithm validation websites, +including: + +* [ClustEval website](https://clusteval.sdu.dk/) +for comparing clustering algorithms, including +parameter optimizations. Their publication is: +Wiwie, C., Röttger, R. & Baumbach, J. "Comparing +the performance of biomedical clustering methods," +Nature Methods (2015). +* [NeuroFinder](http://neurofinder.codeneuro.org/) +by Jeremy Freeman, for calcium imaging spatial +neuron extraction comparison. +* [SpikeFinder](http://spikefinder.codeneuro.org), by P. Berens, +for extracting spikes from calcium +imaging fluorescence curves. +* [G-Node](http://spike.g-node.org). A now-defunct 2011-2012 project +where the user +uploads sorted data, which is compared against a +hidden ground truth sorting and optionally +published. +* [cortexlab](http://phy.cortexlab.net/data/sortingComparison), by +N. Steinmetz. Comparison of several algorithms on hybrid data. +* [Spikesortingtest](http://www.spikesortingtest.com) by C. Mitelut. +* [SpikeSortingSoftware](http://simonster.github.io/SpikeSortingSoftware) +Older list of spike sorting codes and their features. + +## Future plans + +We have many future plans, and welcome your +suggestions. Some of our future plans include: + +1. Expanding the set of test recordings +1. Parameter settings: more explicit control of +parameters for sorting algorithms, with possible +optimization. +1. Hybrid recordings: incorporate a new recording +class to complement the current *in vivo*, +*ex vivo*, and simulated recordings. +1. Expansion of SNR to other surrogate quality +metrics: display noise overlap, isolation, etc, as +used in MountainView. +1. Stability-based quality metrics: run sorters +multiple times to measure stability, as in our +work "Validation of neural spike sorting +algorithms without ground-truth information," A. +H. Barnett, J. F. Magland, and L. Greengard, J. +Neurosci. Meth., 264, 65--77 (2016) +[doi: 10.1016/j.jneumeth.2016.02.022](https://doi.org/10.1016/j.jneumeth.2016.02.022). +1. Further integration with SpikeInterface. diff --git a/src/components/About/AboutGraphicalAbstracts.tsx b/src/components/About/AboutGraphicalAbstracts.tsx new file mode 100644 index 0000000..d070063 --- /dev/null +++ b/src/components/About/AboutGraphicalAbstracts.tsx @@ -0,0 +1,37 @@ +import React, { FunctionComponent } from "react" +import { Col, Row } from "react-bootstrap" +import ModalImage from "react-modal-image" +import long from "./SpikeForest_Long.jpg" +import short from "./SpikeForest_Short.jpg" + +const AboutGraphicalAbstracts: FunctionComponent = () => { + return ( + +

+ Click to expand graphical abstracts +

+ + {" "} + + {" "} + + + + + + + + ) +} + +export default AboutGraphicalAbstracts \ No newline at end of file diff --git a/src/components/About/SpikeForest_Long.jpg b/src/components/About/SpikeForest_Long.jpg new file mode 100644 index 0000000..acda4c9 Binary files /dev/null and b/src/components/About/SpikeForest_Long.jpg differ diff --git a/src/components/About/SpikeForest_Long.pdf b/src/components/About/SpikeForest_Long.pdf new file mode 100644 index 0000000..c8841ca Binary files /dev/null and b/src/components/About/SpikeForest_Long.pdf differ diff --git a/src/components/About/SpikeForest_Short.jpg b/src/components/About/SpikeForest_Short.jpg new file mode 100644 index 0000000..a679d9a Binary files /dev/null and b/src/components/About/SpikeForest_Short.jpg differ diff --git a/src/components/About/SpikeForest_Short.pdf b/src/components/About/SpikeForest_Short.pdf new file mode 100644 index 0000000..80c0207 Binary files /dev/null and b/src/components/About/SpikeForest_Short.pdf differ diff --git a/src/components/Algorithms/Algorithms.tsx b/src/components/Algorithms/Algorithms.tsx index b2d1837..38be563 100644 --- a/src/components/Algorithms/Algorithms.tsx +++ b/src/components/Algorithms/Algorithms.tsx @@ -86,7 +86,7 @@ const ExtantAlgorithms: FunctionComponent = (Props: Algorit return ( - + = (Props: Algorit ) } - -// class Algorithms extends Component { -// constructor(props: any) { -// super(props); -// this.state = { -// rows: [] -// }; -// // this.handleClick = this.basename.bind(this); // QUERY: Why this?? -// } - -// // I don't think we need these? QUERY -// componentDidMount() { -// if (this.props.algorithms && this.props.algorithms.length) { -// this.filterActives(); -// } -// } - -// componentDidUpdate(prevProps) { -// if (this.props.algorithms !== prevProps.algorithms) { -// this.filterActives(); -// } -// } - -// basename(path) { -// return path.split("/").reverse()[0]; -// } - - // parseDescription(markdown) { - // let par1 = markdown.split("Description")[1]; - // let useable = par1.split("## References")[0]; - // return useable; - // } - - // sortRows(rows) { - // let sorted = rows.sort((a, b) => { - // if (a.wrapper && !b.wrapper) return -1; - // if (!a.wrapper && b.wrapper) return 1; - // let textA = a.raw_label.toUpperCase(); - // let textB = b.raw_label.toUpperCase(); - // return textA < textB ? -1 : textA > textB ? 1 : 0; - // }); - // return sorted; - // } - - // Curiously, it's named 'filter' but it never applies any filtering. - // QUERY: THIS - // filterActives() { - // let rows = this.props.algorithms.map(alg => { - // let row = { - // raw_label: alg.label, - // label: alg.label, - // processor_name: alg.processor_name, - // authors: alg.authors, - // notes: alg.notes, - // environment: "", - // wrapper: "", - // markdown: "", - // markdown_link: "", - // website: "/", - // wrapper_link: "/", - // env_name: "tbd", - // env_link: "/" - // }; - // if (alg.dockerfile) { // always true - // row.environment = `${this.basename(alg.dockerfile)}`; - // row.env_name = "Docker"; - // row.env_link = alg.dockerfile; - // } else if (alg.environment) { // thus can't happen - // row.environment = `${alg.environment}`; - // row.env_name = alg.environment; - // } - // if (alg.wrapper) { - // // row.wrapper is never used - // row.wrapper = `${this.basename( - // alg.wrapper - // )}`; - // row.wrapper_link = alg.wrapper; - // } - // if (alg.markdown_link) { - // row.markdown_link = `${this.basename(alg.markdown_link)}`; - // } - // if (alg.markdown) { - // row.markdown = this.parseDescription(alg.markdown); - // } - // if (alg.website) { - // // NOTHING EVER ACTUALLY USES THIS - // row.label = `${alg.label}`; - // row.website = alg.website; - // } - // return row; - // }); - // let sorted = this.sortRows(rows); - // this.setState({ rows: sorted }); - // } - -// render() { -// // let loading = isEmpty(this.state.rows); -// // let listCards; -// // if (this.state.rows) { -// // listCards = this.state.rows.map((row, index) => ( -// // -// // )); -// // } -// // let sidebarItems = this.state.rows.map(row => ({ -// // name: toTitleCase(row.raw_label.replace(/_/g, " ").toLowerCase()), -// // value: row.raw_label -// // })); -// // sidebarItems.unshift({ name: "Overview", value: "overview" }); -// return ( -//
foo
) -// {/* {loading ? ( -// -// -// -// -// -// -// -// ) : ( -// -// -// -// -// -// -// -// -// -//
-//

Algorithms

-//
-// -//
-//
-// -// -//
-//
-//
-//

-// Overview -//

-//
-//
-//
-//

-// {" "} -// Generally speaking, a spike sorting algorithm -// takes in an unfiltered multi-channel timeseries -// (aka, recording) and a dictionary of algorithm -// parameters and outputs a list of firing times and -// associated integer unit labels. This page lists -// the spike sorting codes we run, as well as some -// that have yet to be incorporated. Most of the -// codes were developed at other institutions; two of -// them are in-house. -//

-//

-// {" "} -// SpikeForest uses Python wrappers to implement the -// algorithms. Links to those may be found in the -// "Wrapper" links above. For the non-MATLAB sorters, -// we use singularity containers (similar to docker -// containers) in order to ensure a reproducible -// compute environment. In those cases, links to the -// docker files (environment presciptions) are -// provided. We almost always use the default -// parameters of the wrappers, but some may be -// overriden in the{" "} -// -// analysis configuration files -// -// . -//

-//

-// Wrappers were created in collaboration with the{" "} -// -// SpikeInterface -// {" "} -// project. The goal is to ultimately merge these -// with the corresponding wrappers in -// SpikeInterface/SpikeToolkit. -//

-//
-//
-//
-// -//
-// -// {listCards} -// -// -// -// -// -// )} -//
-// ) */} -// } -// } - export default Algorithms diff --git a/src/components/Footer/Footer.tsx b/src/components/Footer/Footer.tsx new file mode 100644 index 0000000..c3fc163 --- /dev/null +++ b/src/components/Footer/Footer.tsx @@ -0,0 +1,74 @@ +import React from 'react' +import { Link } from 'react-router-dom' +import './footer.css' +import github from './github-white.svg' + + +const Footer = () => { + return ( +
+

Spike-front client updated on October 16, 2020 (3:38 PM)

+
+

+ SpikeForest is a website and open source computing framework for evaluating and + comparing spike sorting algorithms for neurophysiology data analysis. It is a project of + the Center for Computational Mathematics at the Flatiron Institute. +

+
+ + +
+ +
+
+ ); +} + +export default Footer diff --git a/src/components/Footer/footer.css b/src/components/Footer/footer.css new file mode 100644 index 0000000..d8f09a6 --- /dev/null +++ b/src/components/Footer/footer.css @@ -0,0 +1,76 @@ +.footer { + padding: 2.5rem 0; + text-align: center; + background: #222; + color: #fff; + border-top: 0.05rem solid #e5e5e5; + font-size: 1.6rem; +} + +.footer > p.updated { + color: #282828; +} + +.footer p:last-child { + margin-bottom: 0; +} + +.footer__description { + line-height: 1.5; + text-align: center; + padding: 0 1rem; + margin: 2rem auto; + max-width: 50rem; + /* color: rgb(42, 42, 42); */ + background: #222; + color: #fff; +} + +.footer__interwebs { + display: flex; + flex-wrap: wrap; + width: 100%; + justify-content: center; + max-width: 50rem; + margin: 2rem auto; + padding: 1rem; +} + +.footer__interwebs a { + border: none; + margin-right: 0.5em; +} + +.footer__links { + display: flex; + justify-content: center; + margin-top: 1rem; + flex-wrap: wrap; +} + +.footer__links li { + margin: 0.25rem 0.5rem; + list-style-type: none; + font-size: 1.5rem; + line-height: 1.5; +} + +.footer__links li a { + text-decoration: none; + border-bottom: 1px solid var(--fi-blue); + color: #fff; +} + +.footer__location { + width: 100%; + text-align: center; + font-weight: 500; + text-transform: uppercase; + letter-spacing: 0.05em; + color: #fff; + font-size: 1.4rem; +} + +.footer__emoji { + padding-left: 9px; +} diff --git a/src/components/Footer/github-white.svg b/src/components/Footer/github-white.svg new file mode 100644 index 0000000..30f035c --- /dev/null +++ b/src/components/Footer/github-white.svg @@ -0,0 +1,11 @@ + + + + github + Created with Sketch. + + + + + + \ No newline at end of file diff --git a/src/components/Footer/github.svg b/src/components/Footer/github.svg new file mode 100644 index 0000000..d8e050b --- /dev/null +++ b/src/components/Footer/github.svg @@ -0,0 +1,11 @@ + + + + github + Created with Sketch. + + + + + + \ No newline at end of file diff --git a/src/components/Header/Header.tsx b/src/components/Header/Header.tsx new file mode 100644 index 0000000..29f7656 --- /dev/null +++ b/src/components/Header/Header.tsx @@ -0,0 +1,79 @@ +import React from "react"; +import { Nav, Navbar } from "react-bootstrap"; +import { LinkContainer } from "react-router-bootstrap"; +import logo from "./logo-no-icon.svg"; + + +// const debounce = (fn: Function, ms: number = 1000) => { +// let timer: ReturnType +// return function (this: any, ...args: any[]) { +// clearTimeout(timer) +// timer = setTimeout(() => fn.apply(this, args), ms) +// } +// } + +const Header = () => { + // NOTE: Code equivalent to this existed in the old spikeforest. + // However, it doesn't seem to have actually done anything... + // const [dimensions, setDimensions] = useState({ + // // height: window.innerHeight, + // // width: window.innerWidth + // height: 1500, + // width: 341 + // }) + // useEffect(() => { + // const debouncedUpdateDimensions = debounce(() => { + // // console.log(`Dims were ${JSON.stringify(dimensions)}`) + // const newDim = window.innerWidth < 500 + // ? { width: 450, height: 102 } + // : { + // width: window.innerWidth - 100, + // height: Math.round((window.innerWidth - 100) / 4.4) + // } + // setDimensions(newDim) + // // console.log(`Dims now ${JSON.stringify(newDim)}`) + // }, 500) + // window.addEventListener('resize', debouncedUpdateDimensions) + + // return () => { window.removeEventListener('resize', debouncedUpdateDimensions) } + // }) + + return ( +
+ +
+ ) +} + +export default Header; diff --git a/src/components/Header/logo-no-icon.svg b/src/components/Header/logo-no-icon.svg new file mode 100644 index 0000000..60bd58c --- /dev/null +++ b/src/components/Header/logo-no-icon.svg @@ -0,0 +1,17 @@ + + + Logo Copy + Created with Sketch. + + + + + + + SpikeForest + + + + diff --git a/src/components/MetricsDescription/MetricsDescription.tsx b/src/components/MetricsDescription/MetricsDescription.tsx index 181e052..6f25e1f 100644 --- a/src/components/MetricsDescription/MetricsDescription.tsx +++ b/src/components/MetricsDescription/MetricsDescription.tsx @@ -1,5 +1,5 @@ import { useEffect, useState } from 'react' -import MarkdownRenderKatex from './MarkdownRenderKatex' +import MarkdownRenderKatex from '../Shared/MarkdownRenderKatex' import mdPath from './MetricsDescription.md' diff --git a/src/components/Shared/MarkdownHandling.tsx b/src/components/Shared/MarkdownHandling.tsx new file mode 100644 index 0000000..12b20e0 --- /dev/null +++ b/src/components/Shared/MarkdownHandling.tsx @@ -0,0 +1,163 @@ +import { Fragment, FunctionComponent } from 'react' +import { Col, Container, Row } from 'react-bootstrap' +import ReactMarkdown from 'react-markdown' +import Sidebar from './Sidebar' + +export type Copy = { + Title: string // overall heading for the page content + Sections: Section[] // set of section cards +} + +// TODO: Not super happy about this 'flavor' thing... +export type Section = { + Header: string // heading for the section + Content: string // markdown content + Label: string // How the section should appear in the sidebar ("Recording Types") + Tag: string // text ID to use for sidebar identification ("recordingtypes") + // distinguish Cards, ListCards, others that need special treatment + Flavor?: 'static' | 'list' | 'preloader' | 'special' +} + +// The "ContentHook" provides the capacity to apply modifications or special handling +// for some Sections in the page copy. A canonical example is the About page, where it +// is used to insert a specific card for the "Overview" section that includes lightboxed images. +// This hook takes a Section object and should return any desired custom card content. +// If "undefined" is returned, the default is to just run the Section content field through +// ReactMarkdown. +// This should be passed through to the PageCopy function, but only gets called at the +// individual Section/Card layout level. +export type ContentHook = (Section: Section) => JSX.Element | undefined + +// "AdditionalContent" is any custom content that should appear after the static cards +// (derived from the input Markdown). Canonical example is the Recordings page, where +// we have a variable list of Study Sets (each derived from live-queried API data) +// that comes after the static (Markdown) preface. +// This is usually expected to be page content that derives from a non-Markdown source; +// if you just want to render the Markdown-based cards differently, use a ContentHook +// function that can render the appropriate changes from the Section object (perhaps +// by looking at the Flavor field, for example). +export type CopyHook = { Copy: Copy, ContentHook: ContentHook, AdditionalContent?: JSX.Element } + +export type SectionHook = { Section: Section, ContentHook: ContentHook } + +export const getCanonicalSectionTag = (raw: string): string => { + return raw.toLowerCase().replace(/\s+/g, '') +} + +export const parseMarkdownToContentCards = (rawMd: string): Copy => { + // Expect input to be well-formatted markdown with a single top-level + // header (# xxxxx) and several sections consisting of a second-level + // header (## yyyy) on their own line, followed by chunks of well-formed + // markdown only. + let copy: Copy = { Title: 'Parse error', Sections: [] } + + // Regex matches a line that begins with '# ' and captures the rest + // of its content, then skips any whitespace and returns the balance + // of the markdown as the next capture group. So match[1] has the + // intended overall title and match[2] has the rest of the file. + const titleParser = /^# (.+)$\s*([\S\s]*)/m + const match = rawMd.match(titleParser) + if (!match || match.length < 3) return copy // match failed or empty file--abort + + // Splits the 'rest of file'. Regex matches all second-level header lines ('## ...') + // and any following whitespace, while capturing the H2 text to use as the section header. + // The first element (in the markdown, anything between the H1 line and the + // first H2 line) *should* be empty--we're definitely going to ignore it! + let sections = match[2].split(/^## (.+)$/m) + + // if match failed, there wasn't at least one section, or there's an odd number of + // 'good' sections + if (!sections || sections.length < 3 || (sections.length - 1) % 2 !== 0) return copy + sections.shift() + // Now we have a list of pairs of header and content. Let's pair them up: + let pairs = sections + .map((element, i, sections) => { + return [element, (i+1 < sections.length) ? sections[i+1] : null] + }) + .filter((element, i) => !(i%2)) + // pairs is now an array of two-element string arrays, where the first element is + // the header and the second element is the (Markdown) section content. + + // So finally let's build some Sections. We'll use any parenthesized content + // in the section header as the sidebar label; if none, we'll use the exact header. + // The corresponding item ID for the tag system should be the sidebar label, + // normalized to lowercase and with spaces removed. + pairs.forEach((pair) => { + if (pair.length > 2 || !pair[0] || !pair[1]){ + console.log(`Error in pair: ${pair}. Skipping...`) + return + } + let header = (pair[0] ?? '').replace(/\s+$/g, '') + let m = (header).match(/^(.+)\s+\((.*)\)$/) + copy.Sections.push({ + Header: m ? m[1] : pair[0], + Content: pair[1], + Label: m ? m[2] : pair[0], + Tag: getCanonicalSectionTag(m ? m[1] : pair[0]), + Flavor: 'static' + }) + }) + copy.Title = match[1] + + return copy +} + +export const PageSidebar: FunctionComponent = (Props: Copy) => { + const sidebarItems = Props.Sections.map( + (section) => { + return { + name: section.Label, + value: section.Tag + } + } + ) + return ( + + + + ) +} + +export const PageCopy: FunctionComponent = (Props: CopyHook) => { + return ( + + + + +
+

{Props.Copy.Title}

+
+ +
+ { Props.Copy.Sections.map(item => PageCard({Section: item, ContentHook: Props.ContentHook})) } + { Props.AdditionalContent } +
+ + ) +} + +const PageCard: FunctionComponent = (Props: SectionHook) => { + const customElement = Props.ContentHook(Props.Section) ?? + return ( + +
+ + +
+
+
+

+ {Props.Section.Header} +

+
+
+
+ {customElement} +
+
+
+ +
+ + ) +} diff --git a/src/components/MetricsDescription/MarkdownRenderKatex.tsx b/src/components/Shared/MarkdownRenderKatex.tsx similarity index 100% rename from src/components/MetricsDescription/MarkdownRenderKatex.tsx rename to src/components/Shared/MarkdownRenderKatex.tsx diff --git a/src/components/util.ts b/src/components/util.ts index f0917b5..ca933ed 100644 --- a/src/components/util.ts +++ b/src/components/util.ts @@ -21,4 +21,4 @@ export const isEmpty = (obj: any): boolean => Object.entries(obj).length === 0 // link: (props: any) => { // return `${props.children}` // } -// } \ No newline at end of file +// } diff --git a/src/index.css b/src/index.css index 9599cf2..c3b3204 100644 --- a/src/index.css +++ b/src/index.css @@ -1,3 +1,4 @@ +@import url('https://cdn.jsdelivr.net/npm/bootstrap@5.0.2/dist/css/bootstrap.min.css'); /* Base Styles –––––––––––––––––––––––––––––––––––––––––––––––––– */ /* NOTE diff --git a/src/typings.d.ts b/src/typings.d.ts index c84a7b8..67efa71 100644 --- a/src/typings.d.ts +++ b/src/typings.d.ts @@ -3,4 +3,6 @@ declare module '*.md' { export default content } +declare module 'react-modal-image' + declare module 'react-katex' \ No newline at end of file