diff --git a/FAQ.html b/FAQ.html index b32ad8fe..08ea3f26 100644 --- a/FAQ.html +++ b/FAQ.html @@ -18,7 +18,7 @@ - + diff --git a/_autosummary/one.alf.cache.html b/_autosummary/one.alf.cache.html index d58967ec..f64035ae 100644 --- a/_autosummary/one.alf.cache.html +++ b/_autosummary/one.alf.cache.html @@ -18,7 +18,7 @@ - + @@ -193,7 +193,7 @@
cache_dir (str, pathlib.Path) –
cache_dir (str, pathlib.Path)
tables (dict[str, pandas.DataFrame], optional) – A dict with keys (‘sessions’, ‘datasets’), containing the cache tables as DataFrames.
remove_empty_sessions (bool) – Attempt to remove session folders that are empty and not in the sessions table.
dry (bool) – If true, do not remove anything.
query_type (str) – Query cache (‘local’) or Alyx database (‘remote’)
download_only (bool) – When true the data are downloaded and the file path is returned.
**kwargs – Additional filters for datasets, including namespace and timescale. For full list +
kwargs – Additional filters for datasets, including namespace and timescale. For full list see the one.alf.spec.describe function.
cache_dir (pathlib.Path, str) – A path to the ALF data directory.
silent ((False) bool) – When True will prompt for cache_dir, if cache_dir is None, and overwrite cache if any. When False will use cwd for cache_dir, if cache_dir is None, and use existing cache.
**kwargs – Optional arguments to pass to one.alf.cache.make_parquet_db.
kwargs – Optional arguments to pass to one.alf.cache.make_parquet_db.
base_url (str) – An Alyx database URL. If None, the current default database is used.
**kwargs – Optional arguments to pass to one.params.setup.
kwargs – Optional arguments to pass to one.params.setup.
return jsonable.read(fil)
if fil.suffix == '.npy':
return _ensure_flat(np.load(file=fil, allow_pickle=True))
+ if fil.suffix == '.npz':
+ arr = np.load(file=fil)
+ # If single array with the default name ('arr_0') return individual array
+ return arr['arr_0'] if set(arr.files) == {'arr_0'} else arr
if fil.suffix == '.pqt':
return parquet.load(fil)[0]
if fil.suffix == '.ssv':
diff --git a/_modules/one/alf/spec.html b/_modules/one/alf/spec.html
index 45559f82..97ff0068 100644
--- a/_modules/one/alf/spec.html
+++ b/_modules/one/alf/spec.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/api.html b/_modules/one/api.html
index d69d0f64..b94a18c3 100644
--- a/_modules/one/api.html
+++ b/_modules/one/api.html
@@ -17,7 +17,7 @@
-
+
@@ -1515,7 +1515,7 @@ Source code for one.api
Query cache ('local') or Alyx database ('remote')
download_only : bool
When true the data are downloaded and the file path is returned.
- **kwargs
+ kwargs
Additional filters for datasets, including namespace and timescale. For full list
see the one.alf.spec.describe function.
@@ -1552,18 +1552,19 @@ Source code for one.api
if len(datasets) == 0:
raise alferr.ALFObjectNotFound(object or '')
parts = [alfiles.rel_path_parts(x) for x in datasets.rel_path]
- unique_objects = set(x[3] or '' for x in parts)
# For those that don't exist, download them
offline = None if query_type == 'auto' else self.mode == 'local'
files = self._check_filesystem(datasets, offline=offline)
- files = [x for x in files if x]
- if not files:
+ if not any(files):
raise alferr.ALFObjectNotFound(f'ALF collection "{collection}" not found on disk')
+ # Remove missing items
+ files, parts = zip(*[(x, y) for x, y in zip(files, parts) if x])
if download_only:
return files
+ unique_objects = set(x[3] or '' for x in parts)
kwargs.update(wildcards=self.wildcards)
collection = {
obj: alfio.load_object([x for x, y in zip(files, parts) if y[3] == obj], **kwargs)
@@ -1585,7 +1586,7 @@ Source code for one.api
silent : (False) bool
When True will prompt for cache_dir, if cache_dir is None, and overwrite cache if any.
When False will use cwd for cache_dir, if cache_dir is None, and use existing cache.
- **kwargs
+ kwargs
Optional arguments to pass to one.alf.cache.make_parquet_db.
Returns
@@ -2698,7 +2699,7 @@ Source code for one.api
----------
base_url : str
An Alyx database URL. If None, the current default database is used.
- **kwargs
+ kwargs
Optional arguments to pass to one.params.setup.
Returns
@@ -3008,7 +3009,7 @@ Source code for one.api
Parameters
----------
- **kwargs
+ kwargs
See one.params.setup.
Returns
diff --git a/_modules/one/converters.html b/_modules/one/converters.html
index e86b5cea..6f021d4a 100644
--- a/_modules/one/converters.html
+++ b/_modules/one/converters.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/params.html b/_modules/one/params.html
index f35be959..3adb4c46 100644
--- a/_modules/one/params.html
+++ b/_modules/one/params.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/registration.html b/_modules/one/registration.html
index d8029072..917c4bdb 100644
--- a/_modules/one/registration.html
+++ b/_modules/one/registration.html
@@ -17,7 +17,7 @@
-
+
@@ -208,6 +208,9 @@ Source code for one.registration
self.one = one
if not one:
self.one = ONE(cache_rest=None)
+ elif one.alyx.cache_mode == 'GET':
+ _logger.warning('AlyxClient REST cache active; '
+ 'this may cause issues with registration.')
self.dtypes = list(map(Bunch, self.one.alyx.rest('dataset-types', 'list')))
self.registration_patterns = [
dt['filename_pattern'] for dt in self.dtypes if dt['filename_pattern']]
diff --git a/_modules/one/remote/aws.html b/_modules/one/remote/aws.html
index 84bcecdf..35b16890 100644
--- a/_modules/one/remote/aws.html
+++ b/_modules/one/remote/aws.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/remote/base.html b/_modules/one/remote/base.html
index d6e3311d..d1b43b86 100644
--- a/_modules/one/remote/base.html
+++ b/_modules/one/remote/base.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/remote/globus.html b/_modules/one/remote/globus.html
index 5f02301e..fcf441c4 100644
--- a/_modules/one/remote/globus.html
+++ b/_modules/one/remote/globus.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/alf/test_alf_files.html b/_modules/one/tests/alf/test_alf_files.html
index c4912d25..5d509623 100644
--- a/_modules/one/tests/alf/test_alf_files.html
+++ b/_modules/one/tests/alf/test_alf_files.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/alf/test_alf_io.html b/_modules/one/tests/alf/test_alf_io.html
index ee337735..4f22317d 100644
--- a/_modules/one/tests/alf/test_alf_io.html
+++ b/_modules/one/tests/alf/test_alf_io.html
@@ -17,7 +17,7 @@
-
+
@@ -704,7 +704,11 @@ Source code for one.tests.alf.test_alf_io
yaml.dump({'a': [1, 2, 3], 'b': [4, 5, 6]}, f)
self.xyz = Path(self.tmpdir.name) / 'foo.baz.xyz'
with open(self.xyz, 'wb') as f:
- f.write(b'\x00\x00')
+ f.write(b'\x00\x00')
+ self.npz1 = Path(self.tmpdir.name) / 'foo.baz.npz'
+ np.savez_compressed(self.npz1, np.random.rand(5))
+ self.npz2 = Path(self.tmpdir.name) / 'foo.bar.npz'
+ np.savez_compressed(self.npz2, np.random.rand(5), np.random.rand(5))
@@ -737,7 +741,14 @@ Source code for one.tests.alf.test_alf_io
self.assertIsNone(alfio.load_file_content(None))
# Load YAML file
loaded = alfio.load_file_content(str(self.yaml))
- self.assertCountEqual(loaded.keys(), ['a', 'b'])
+ self.assertCountEqual(loaded.keys(), ['a', 'b'])
+ # Load npz file
+ loaded = alfio.load_file_content(str(self.npz1))
+ self.assertIsInstance(loaded, np.ndarray, 'failed to unpack')
+ self.assertEqual(loaded.shape, (5,))
+ loaded = alfio.load_file_content(str(self.npz2))
+ self.assertIsInstance(loaded, np.lib.npyio.NpzFile, 'failed to return npz array')
+ self.assertEqual(loaded['arr_0'].shape, (5,))
diff --git a/_modules/one/tests/alf/test_alf_spec.html b/_modules/one/tests/alf/test_alf_spec.html
index 6d85bb7b..f703c045 100644
--- a/_modules/one/tests/alf/test_alf_spec.html
+++ b/_modules/one/tests/alf/test_alf_spec.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/alf/test_cache.html b/_modules/one/tests/alf/test_cache.html
index e88717bf..23650d23 100644
--- a/_modules/one/tests/alf/test_cache.html
+++ b/_modules/one/tests/alf/test_cache.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/remote/test_aws.html b/_modules/one/tests/remote/test_aws.html
index 210e7153..4b09e950 100644
--- a/_modules/one/tests/remote/test_aws.html
+++ b/_modules/one/tests/remote/test_aws.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/remote/test_base.html b/_modules/one/tests/remote/test_base.html
index ef855516..e1e30428 100644
--- a/_modules/one/tests/remote/test_base.html
+++ b/_modules/one/tests/remote/test_base.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/remote/test_globus.html b/_modules/one/tests/remote/test_globus.html
index 1312b554..91b7d6ba 100644
--- a/_modules/one/tests/remote/test_globus.html
+++ b/_modules/one/tests/remote/test_globus.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/test_alyxclient.html b/_modules/one/tests/test_alyxclient.html
index f2e1e65f..1ed3fde0 100644
--- a/_modules/one/tests/test_alyxclient.html
+++ b/_modules/one/tests/test_alyxclient.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/test_alyxrest.html b/_modules/one/tests/test_alyxrest.html
index f6e8c677..b29e15d5 100644
--- a/_modules/one/tests/test_alyxrest.html
+++ b/_modules/one/tests/test_alyxrest.html
@@ -17,7 +17,7 @@
-
+
@@ -404,6 +404,7 @@ Source code for one.tests.test_alyxrest
self.addCleanup(self.alyx.rest, 'insertions', 'delete', id=probe_insertion['id'])
trajectory = self.alyx.rest('trajectories', 'create', data={
'probe_insertion': probe_insertion['id'],
+ 'chronic_insertion': None,
'x': 1500,
'y': -2000,
'z': 0,
diff --git a/_modules/one/tests/test_converters.html b/_modules/one/tests/test_converters.html
index 0aba271a..775fc321 100644
--- a/_modules/one/tests/test_converters.html
+++ b/_modules/one/tests/test_converters.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/test_one.html b/_modules/one/tests/test_one.html
index 61f6bf9f..4fa22f6e 100644
--- a/_modules/one/tests/test_one.html
+++ b/_modules/one/tests/test_one.html
@@ -17,7 +17,7 @@
-
+
@@ -1580,7 +1580,7 @@ Source code for one.tests.test_one
self.one._cache['datasets'] = self.one._cache['datasets'].iloc[0:0].copy()
dsets = self.one.list_datasets(self.eid, details=True, query_type='remote')
- self.assertEqual(171, len(dsets)) # this may change after a BWM release or patch
+ self.assertEqual(183, len(dsets)) # this may change after a BWM release or patch
self.assertEqual(1, dsets.index.nlevels, 'details data frame should be without eid index')
# Test missing eid
@@ -1597,12 +1597,12 @@ Source code for one.tests.test_one
# Test details=False, with eid
dsets = self.one.list_datasets(self.eid, details=False, query_type='remote')
self.assertIsInstance(dsets, list)
- self.assertEqual(171, len(dsets)) # this may change after a BWM release or patch
+ self.assertEqual(183, len(dsets)) # this may change after a BWM release or patch
# Test with other filters
dsets = self.one.list_datasets(self.eid, collection='*probe*', filename='*channels*',
details=False, query_type='remote')
- self.assertEqual(20, len(dsets))
+ self.assertEqual(24, len(dsets))
self.assertTrue(all(x in y for x in ('probe', 'channels') for y in dsets))
with self.assertWarns(Warning):
diff --git a/_modules/one/tests/test_params.html b/_modules/one/tests/test_params.html
index 56bd4a0d..82439bf4 100644
--- a/_modules/one/tests/test_params.html
+++ b/_modules/one/tests/test_params.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/test_registration.html b/_modules/one/tests/test_registration.html
index c418d302..a0166704 100644
--- a/_modules/one/tests/test_registration.html
+++ b/_modules/one/tests/test_registration.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/tests/util.html b/_modules/one/tests/util.html
index 0e172215..3b4e2f7d 100644
--- a/_modules/one/tests/util.html
+++ b/_modules/one/tests/util.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/util.html b/_modules/one/util.html
index af848071..c9fae638 100644
--- a/_modules/one/util.html
+++ b/_modules/one/util.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_modules/one/webclient.html b/_modules/one/webclient.html
index 14a7bead..b5502e9f 100644
--- a/_modules/one/webclient.html
+++ b/_modules/one/webclient.html
@@ -17,7 +17,7 @@
-
+
diff --git a/_sources/notebooks/one_load/one_load.ipynb.txt b/_sources/notebooks/one_load/one_load.ipynb.txt
index f1ad5fdf..9ba340dc 100644
--- a/_sources/notebooks/one_load/one_load.ipynb.txt
+++ b/_sources/notebooks/one_load/one_load.ipynb.txt
@@ -286,7 +286,7 @@
"revision will be returned. The revisions are ordered lexicographically.\n",
"\n",
"```python\n",
- "probe1_spikes = one.load_dataset(eid, 'trials.intervals.npy', revision='2021-03-15a')\n",
+ "intervals = one.load_dataset(eid, 'trials.intervals.npy', revision='2021-03-15a')\n",
"```\n",
"\n",
"## Download only\n",
@@ -662,8 +662,7 @@
" filepath = one.load_dataset(eid '_ibl_trials.intervals.npy', download_only=True)\n",
" spike_times = one.load_dataset(eid 'spikes.times.npy', collection='alf/probe01')\n",
" old_spikes = one.load_dataset(eid, 'spikes.times.npy',\n",
- " collection='alf/probe01', revision='2020-08-31')\n",
- "\n"
+ " collection='alf/probe01', revision='2020-08-31')\n"
]
}
],
@@ -733,4 +732,4 @@
},
"nbformat": 4,
"nbformat_minor": 0
-}
\ No newline at end of file
+}
diff --git a/_static/basic.css b/_static/basic.css
index 30fee9d0..f316efcb 100644
--- a/_static/basic.css
+++ b/_static/basic.css
@@ -4,7 +4,7 @@
*
* Sphinx stylesheet -- basic theme.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
diff --git a/_static/doctools.js b/_static/doctools.js
index d06a71d7..4d67807d 100644
--- a/_static/doctools.js
+++ b/_static/doctools.js
@@ -4,7 +4,7 @@
*
* Base JavaScript utilities for all Sphinx HTML documentation.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
diff --git a/_static/language_data.js b/_static/language_data.js
index 250f5665..367b8ed8 100644
--- a/_static/language_data.js
+++ b/_static/language_data.js
@@ -5,7 +5,7 @@
* This script contains the language-specific data used by searchtools.js,
* namely the list of stopwords, stemmer, scorer and splitter.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -13,7 +13,7 @@
var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
-/* Non-minified version is copied as a separate JS file, is available */
+/* Non-minified version is copied as a separate JS file, if available */
/**
* Porter Stemmer
diff --git a/_static/searchtools.js b/_static/searchtools.js
index 7918c3fa..92da3f8b 100644
--- a/_static/searchtools.js
+++ b/_static/searchtools.js
@@ -4,7 +4,7 @@
*
* Sphinx JavaScript utilities for the full-text search.
*
- * :copyright: Copyright 2007-2023 by the Sphinx team, see AUTHORS.
+ * :copyright: Copyright 2007-2024 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
@@ -99,7 +99,7 @@ const _displayItem = (item, searchTerms, highlightTerms) => {
.then((data) => {
if (data)
listItem.appendChild(
- Search.makeSearchSummary(data, searchTerms)
+ Search.makeSearchSummary(data, searchTerms, anchor)
);
// highlight search terms in the summary
if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
@@ -116,8 +116,8 @@ const _finishSearch = (resultCount) => {
);
else
Search.status.innerText = _(
- `Search finished, found ${resultCount} page(s) matching the search query.`
- );
+ "Search finished, found ${resultCount} page(s) matching the search query."
+ ).replace('${resultCount}', resultCount);
};
const _displayNextItem = (
results,
@@ -137,6 +137,22 @@ const _displayNextItem = (
// search finished, update title and status message
else _finishSearch(resultCount);
};
+// Helper function used by query() to order search results.
+// Each input is an array of [docname, title, anchor, descr, score, filename].
+// Order the results by score (in opposite order of appearance, since the
+// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically.
+const _orderResultsByScoreThenName = (a, b) => {
+ const leftScore = a[4];
+ const rightScore = b[4];
+ if (leftScore === rightScore) {
+ // same score: sort alphabetically
+ const leftTitle = a[1].toLowerCase();
+ const rightTitle = b[1].toLowerCase();
+ if (leftTitle === rightTitle) return 0;
+ return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
+ }
+ return leftScore > rightScore ? 1 : -1;
+};
/**
* Default splitQuery function. Can be overridden in ``sphinx.search`` with a
@@ -160,13 +176,26 @@ const Search = {
_queued_query: null,
_pulse_status: -1,
- htmlToText: (htmlString) => {
+ htmlToText: (htmlString, anchor) => {
const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
- htmlElement.querySelectorAll(".headerlink").forEach((el) => { el.remove() });
+ for (const removalQuery of [".headerlinks", "script", "style"]) {
+ htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() });
+ }
+ if (anchor) {
+ const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`);
+ if (anchorContent) return anchorContent.textContent;
+
+ console.warn(
+ `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.`
+ );
+ }
+
+ // if anchor not specified or not found, fall back to main content
const docContent = htmlElement.querySelector('[role="main"]');
- if (docContent !== undefined) return docContent.textContent;
+ if (docContent) return docContent.textContent;
+
console.warn(
- "Content block not found. Sphinx search tries to obtain it via '[role=main]'. Could you check your theme or template."
+ "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template."
);
return "";
},
@@ -239,16 +268,7 @@ const Search = {
else Search.deferQuery(query);
},
- /**
- * execute search (requires search index to be loaded)
- */
- query: (query) => {
- const filenames = Search._index.filenames;
- const docNames = Search._index.docnames;
- const titles = Search._index.titles;
- const allTitles = Search._index.alltitles;
- const indexEntries = Search._index.indexentries;
-
+ _parseQuery: (query) => {
// stem the search terms and add them to the correct list
const stemmer = new Stemmer();
const searchTerms = new Set();
@@ -284,16 +304,32 @@ const Search = {
// console.info("required: ", [...searchTerms]);
// console.info("excluded: ", [...excludedTerms]);
- // array of [docname, title, anchor, descr, score, filename]
- let results = [];
+ return [query, searchTerms, excludedTerms, highlightTerms, objectTerms];
+ },
+
+ /**
+ * execute search (requires search index to be loaded)
+ */
+ _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+ const allTitles = Search._index.alltitles;
+ const indexEntries = Search._index.indexentries;
+
+ // Collect multiple result groups to be sorted separately and then ordered.
+ // Each is an array of [docname, title, anchor, descr, score, filename].
+ const normalResults = [];
+ const nonMainIndexResults = [];
+
_removeChildren(document.getElementById("search-progress"));
- const queryLower = query.toLowerCase();
+ const queryLower = query.toLowerCase().trim();
for (const [title, foundTitles] of Object.entries(allTitles)) {
- if (title.toLowerCase().includes(queryLower) && (queryLower.length >= title.length/2)) {
+ if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) {
for (const [file, id] of foundTitles) {
let score = Math.round(100 * queryLower.length / title.length)
- results.push([
+ normalResults.push([
docNames[file],
titles[file] !== title ? `${titles[file]} > ${title}` : title,
id !== null ? "#" + id : "",
@@ -308,46 +344,47 @@ const Search = {
// search for explicit entries in index directives
for (const [entry, foundEntries] of Object.entries(indexEntries)) {
if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
- for (const [file, id] of foundEntries) {
- let score = Math.round(100 * queryLower.length / entry.length)
- results.push([
+ for (const [file, id, isMain] of foundEntries) {
+ const score = Math.round(100 * queryLower.length / entry.length);
+ const result = [
docNames[file],
titles[file],
id ? "#" + id : "",
null,
score,
filenames[file],
- ]);
+ ];
+ if (isMain) {
+ normalResults.push(result);
+ } else {
+ nonMainIndexResults.push(result);
+ }
}
}
}
// lookup as object
objectTerms.forEach((term) =>
- results.push(...Search.performObjectSearch(term, objectTerms))
+ normalResults.push(...Search.performObjectSearch(term, objectTerms))
);
// lookup as search terms in fulltext
- results.push(...Search.performTermsSearch(searchTerms, excludedTerms));
+ normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms));
// let the scorer override scores with a custom scoring function
- if (Scorer.score) results.forEach((item) => (item[4] = Scorer.score(item)));
-
- // now sort the results by score (in opposite order of appearance, since the
- // display function below uses pop() to retrieve items) and then
- // alphabetically
- results.sort((a, b) => {
- const leftScore = a[4];
- const rightScore = b[4];
- if (leftScore === rightScore) {
- // same score: sort alphabetically
- const leftTitle = a[1].toLowerCase();
- const rightTitle = b[1].toLowerCase();
- if (leftTitle === rightTitle) return 0;
- return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
- }
- return leftScore > rightScore ? 1 : -1;
- });
+ if (Scorer.score) {
+ normalResults.forEach((item) => (item[4] = Scorer.score(item)));
+ nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item)));
+ }
+
+ // Sort each group of results by score and then alphabetically by name.
+ normalResults.sort(_orderResultsByScoreThenName);
+ nonMainIndexResults.sort(_orderResultsByScoreThenName);
+
+ // Combine the result groups in (reverse) order.
+ // Non-main index entries are typically arbitrary cross-references,
+ // so display them after other results.
+ let results = [...nonMainIndexResults, ...normalResults];
// remove duplicate search results
// note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
@@ -361,7 +398,12 @@ const Search = {
return acc;
}, []);
- results = results.reverse();
+ return results.reverse();
+ },
+
+ query: (query) => {
+ const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query);
+ const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms);
// for debugging
//Search.lastresults = results.slice(); // a copy
@@ -466,14 +508,18 @@ const Search = {
// add support for partial matches
if (word.length > 2) {
const escapedWord = _escapeRegExp(word);
- Object.keys(terms).forEach((term) => {
- if (term.match(escapedWord) && !terms[word])
- arr.push({ files: terms[term], score: Scorer.partialTerm });
- });
- Object.keys(titleTerms).forEach((term) => {
- if (term.match(escapedWord) && !titleTerms[word])
- arr.push({ files: titleTerms[word], score: Scorer.partialTitle });
- });
+ if (!terms.hasOwnProperty(word)) {
+ Object.keys(terms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: terms[term], score: Scorer.partialTerm });
+ });
+ }
+ if (!titleTerms.hasOwnProperty(word)) {
+ Object.keys(titleTerms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: titleTerms[term], score: Scorer.partialTitle });
+ });
+ }
}
// no match but word was a required one
@@ -496,9 +542,8 @@ const Search = {
// create the mapping
files.forEach((file) => {
- if (fileMap.has(file) && fileMap.get(file).indexOf(word) === -1)
- fileMap.get(file).push(word);
- else fileMap.set(file, [word]);
+ if (!fileMap.has(file)) fileMap.set(file, [word]);
+ else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word);
});
});
@@ -549,8 +594,8 @@ const Search = {
* search summary for a given text. keywords is a list
* of stemmed words.
*/
- makeSearchSummary: (htmlText, keywords) => {
- const text = Search.htmlToText(htmlText);
+ makeSearchSummary: (htmlText, keywords, anchor) => {
+ const text = Search.htmlToText(htmlText, anchor);
if (text === "") return null;
const textLower = text.toLowerCase();
diff --git a/alf_intro.html b/alf_intro.html
index 811ba46f..93558748 100644
--- a/alf_intro.html
+++ b/alf_intro.html
@@ -18,7 +18,7 @@
-
+
diff --git a/api_reference.html b/api_reference.html
index 5c0cda42..4d382d5c 100644
--- a/api_reference.html
+++ b/api_reference.html
@@ -18,7 +18,7 @@
-
+
diff --git a/contributing.html b/contributing.html
index 3e927e59..cac9b737 100644
--- a/contributing.html
+++ b/contributing.html
@@ -18,7 +18,7 @@
-
+
diff --git a/genindex.html b/genindex.html
index 216bc1b8..29624d05 100644
--- a/genindex.html
+++ b/genindex.html
@@ -17,7 +17,7 @@
-
+
diff --git a/index.html b/index.html
index c55d879f..1ada276b 100644
--- a/index.html
+++ b/index.html
@@ -18,11 +18,13 @@
-
+
+
+
diff --git a/notebooks/alyx_files.html b/notebooks/alyx_files.html
index b0ccecc3..9e160091 100644
--- a/notebooks/alyx_files.html
+++ b/notebooks/alyx_files.html
@@ -19,7 +19,7 @@
-
+
diff --git a/notebooks/data_sharing.html b/notebooks/data_sharing.html
index f057cdb5..7c0c41f8 100644
--- a/notebooks/data_sharing.html
+++ b/notebooks/data_sharing.html
@@ -19,7 +19,7 @@
-
+
diff --git a/notebooks/datasets_and_types.html b/notebooks/datasets_and_types.html
index e5fefd5f..e7f2f3b5 100644
--- a/notebooks/datasets_and_types.html
+++ b/notebooks/datasets_and_types.html
@@ -19,7 +19,7 @@
-
+
diff --git a/notebooks/experiment_ids.html b/notebooks/experiment_ids.html
index a0bd578e..bc5199de 100644
--- a/notebooks/experiment_ids.html
+++ b/notebooks/experiment_ids.html
@@ -19,7 +19,7 @@
-
+
diff --git a/notebooks/one_advanced/one_advanced.html b/notebooks/one_advanced/one_advanced.html
index a2e3df3e..276916e5 100644
--- a/notebooks/one_advanced/one_advanced.html
+++ b/notebooks/one_advanced/one_advanced.html
@@ -19,7 +19,7 @@
-
+
diff --git a/notebooks/one_list/one_list.html b/notebooks/one_list/one_list.html
index 5b4736f5..3a8b7102 100644
--- a/notebooks/one_list/one_list.html
+++ b/notebooks/one_list/one_list.html
@@ -19,7 +19,7 @@
-
+
diff --git a/notebooks/one_load/one_load.html b/notebooks/one_load/one_load.html
index 64a1d91c..0c008400 100644
--- a/notebooks/one_load/one_load.html
+++ b/notebooks/one_load/one_load.html
@@ -19,7 +19,7 @@
-
+
@@ -269,7 +269,7 @@ Collections
Revisions
Revisions provide an optional way to organize data by version. The version label is arbitrary, however the folder must start and end with pound signs and is typically an ISO date, e.g. “#2021-01-01#”. Unlike collections, if a specified revision is not found, the previous revision will be returned. The revisions are ordered lexicographically.
-