All modules for which code is available
- sharrow.dataset @@ -315,22 +338,20 @@
diff --git a/.buildinfo b/.buildinfo index 848089c..b292330 100644 --- a/.buildinfo +++ b/.buildinfo @@ -1,4 +1,4 @@ # Sphinx build info version 1 # This file hashes the configuration used when building these files. When it is not found, a full rebuild will be done. -config: 51f2cc419d3bb38499a014514559aa0c +config: e81c7af78bdc45d0158326e2c90823f7 tags: 645f666f9bcd5a90fca523b33c5a78b7 diff --git a/_modules/index.html b/_modules/index.html index d17e14d..797bbae 100644 --- a/_modules/index.html +++ b/_modules/index.html @@ -3,12 +3,12 @@ - +
-© Copyright 2022 AMPO Research Foundation. @@ -373,8 +395,8 @@
from __future__ import annotations
@@ -386,7 +409,7 @@ Source code for sharrow.dataset
[docs]def construct(source):
"""
- A generic constructor for creating Datasets from various similar objects.
+ Create Datasets from various similar objects.
Parameters
----------
@@ -418,7 +441,7 @@ Source code for sharrow.dataset
sparse: bool = False,
preserve_cat: bool = True,
) -> Dataset:
- """Convert a pandas.DataFrame into an xarray.Dataset
+ """Convert a pandas.DataFrame into an xarray.Dataset.
Each column will be converted into an independent variable in the
Dataset. If the dataframe's index is a MultiIndex, it will be expanded
@@ -436,6 +459,13 @@ Source code for sharrow.dataset
If true, create a sparse arrays instead of dense numpy arrays. This
can potentially save a large amount of memory if the DataFrame has
a MultiIndex. Requires the sparse package (sparse.pydata.org).
+ preserve_cat : bool, default True
+ If true, preserve encoding of categorical columns. Xarray lacks an
+ official implementation of a categorical datatype, so sharrow's
+ dictionary-based digital encoding is applied instead. Note that in
+ native xarray usage, the resulting variable will look like integer
+ values instead of the category values. The `dataset.cat` accessor
+ can be used to interact with the categorical data.
Returns
-------
@@ -446,7 +476,6 @@ Source code for sharrow.dataset
xarray.DataArray.from_series
pandas.DataFrame.to_xarray
"""
-
# this is much faster than the default xarray version when not
# using a MultiIndex.
@@ -515,7 +544,7 @@ Source code for sharrow.dataset
index=None,
):
"""
- Convert a pyarrow.Table into an xarray.Dataset
+ Convert a pyarrow.Table into an xarray.Dataset.
Parameters
----------
@@ -620,7 +649,6 @@ Source code for sharrow.dataset
-------
Dataset
"""
-
# handle both larch.OMX and openmatrix.open_file versions
if "lar" in type(omx).__module__:
omx_data = omx.data
@@ -995,9 +1023,7 @@ Source code for sharrow.dataset
@xr.register_dataset_accessor("single_dim")
class _SingleDim:
- """
- Convenience accessor for single-dimension datasets.
- """
+ """Convenience accessor for single-dimension datasets."""
__slots__ = ("dataset", "dim_name")
@@ -1139,9 +1165,7 @@ Source code for sharrow.dataset
@xr.register_dataarray_accessor("single_dim")
class _SingleDimArray:
- """
- Convenience accessor for single-dimension datasets.
- """
+ """Convenience accessor for single-dimension datasets."""
__slots__ = ("dataarray", "dim_name")
@@ -1494,7 +1518,7 @@ Source code for sharrow.dataset
from .relationships import sparse_array_type
def to_numpy(var):
- """Coerces wrapped data to numpy and returns a numpy.ndarray"""
+ """Coerces wrapped data to numpy and returns a numpy.ndarray."""
data = var.data
if hasattr(data, "chunks"):
data = data.compute()
@@ -1518,7 +1542,7 @@ Source code for sharrow.dataset
@register_dataset_method
def select_and_rename(self, name_dict=None, **names):
"""
- Select and rename variables from this Dataset
+ Select and rename variables from this Dataset.
Parameters
----------
@@ -1682,22 +1706,20 @@ Source code for sharrow.dataset
-
+
+
+
-
-
-
-
-
-
+
+
@@ -1714,6 +1736,7 @@ Source code for sharrow.dataset
+
© Copyright 2022 AMPO Research Foundation.
@@ -1740,8 +1763,8 @@
Source code for sharrow.dataset
-
-
+
+
diff --git a/_modules/sharrow/flows.html b/_modules/sharrow/flows.html
index 275afde..f8bd6a0 100644
--- a/_modules/sharrow/flows.html
+++ b/_modules/sharrow/flows.html
@@ -3,12 +3,12 @@
-
+
- sharrow.flows — v2.6.0
+ sharrow.flows — v2.7.0
@@ -18,15 +18,15 @@
-
-
-
+
+
+
-
-
-
-
+
+
+
+
@@ -39,12 +39,14 @@
-
-
+
+
+
+
@@ -65,7 +67,7 @@
-
+
@@ -73,7 +75,16 @@
- Skip to main content
+ Skip to main content
+
+
+
+
+
+
Ctrl+K
+
+
-
+
+
+
+
+
+
+
+
@@ -127,6 +146,7 @@
@@ -262,20 +283,22 @@
+
+
@@ -302,7 +325,7 @@
-
+
Source code for sharrow.flows
import ast
@@ -1329,7 +1352,7 @@ Source code for sharrow.flows
bool_wrapping=False,
):
"""
- Initialize up to the flow_hash
+ Initialize up to the flow_hash.
See main docstring for arguments.
"""
@@ -1357,7 +1380,9 @@ Source code for sharrow.flows
all_raw_names |= attribute_pairs.get(self.tree.root_node_name, set())
all_raw_names |= subscript_pairs.get(self.tree.root_node_name, set())
- dimensions_ordered = presorted(self.tree.dims, self.dim_order, self.dim_exclude)
+ dimensions_ordered = presorted(
+ self.tree.sizes, self.dim_order, self.dim_exclude
+ )
index_slots = {i: n for n, i in enumerate(dimensions_ordered)}
self.arg_name_positions = index_slots
self.arg_names = dimensions_ordered
@@ -1381,7 +1406,7 @@ Source code for sharrow.flows
self._used_aux_vars.append(aux_var)
subspace_names = set()
- for (k, _) in self.tree.subspaces_iter():
+ for k, _ in self.tree.subspaces_iter():
subspace_names.add(k)
for k in self.tree.subspace_fallbacks:
subspace_names.add(k)
@@ -1390,7 +1415,7 @@ Source code for sharrow.flows
)
self._optional_get_tokens = []
if optional_get_tokens:
- for (_spacename, _varname) in optional_get_tokens:
+ for _spacename, _varname in optional_get_tokens:
found = False
if (
_spacename in self.tree.subspaces
@@ -1509,7 +1534,7 @@ Source code for sharrow.flows
return {
i: n
for n, i in enumerate(
- presorted(self.tree.dims, self.dim_order, self.dim_exclude)
+ presorted(self.tree.sizes, self.dim_order, self.dim_exclude)
)
}
@@ -1526,7 +1551,7 @@ Source code for sharrow.flows
index_slots = {
i: n
for n, i in enumerate(
- presorted(self.tree.dims, self.dim_order, self.dim_exclude)
+ presorted(self.tree.sizes, self.dim_order, self.dim_exclude)
)
}
self.arg_name_positions = index_slots
@@ -1821,6 +1846,7 @@ Source code for sharrow.flows
with_root_node_name=None,
):
"""
+ Second step in initialization, only used if the flow is not cached.
Parameters
----------
@@ -1842,7 +1868,6 @@ Source code for sharrow.flows
be sure to avoid name conflicts with other flow's in the same
directory.
"""
-
if self._hashing_level <= 1:
func_code, all_name_tokens = self.init_sub_funcs(
defs,
@@ -1972,7 +1997,6 @@ Source code for sharrow.flows
with rewrite(
os.path.join(self.cache_dir, self.name, "__init__.py"), "wt"
) as f_code:
-
f_code.write(
textwrap.dedent(
f"""
@@ -2026,13 +2050,17 @@ Source code for sharrow.flows
f_code.write("\n\n# machinery code\n\n")
if self.tree.relationships_are_digitized:
+ if with_root_node_name is None:
+ with_root_node_name = self.tree.root_node_name
if with_root_node_name is None:
with_root_node_name = self.tree.root_node_name
root_dims = list(
presorted(
- self.tree._graph.nodes[with_root_node_name]["dataset"].dims,
+ self.tree._graph.nodes[with_root_node_name][
+ "dataset"
+ ].sizes,
self.dim_order,
self.dim_exclude,
)
@@ -2110,7 +2138,6 @@ Source code for sharrow.flows
raise ValueError(f"invalid n_root_dims {n_root_dims}")
else:
-
raise RuntimeError("digitization is now required")
f_code.write(blacken(textwrap.dedent(line_template)))
@@ -2245,7 +2272,7 @@ Source code for sharrow.flows
# raise the inner key error which is more helpful
context = getattr(err, "__context__", None)
if context:
- raise context
+ raise context from None
else:
raise err
@@ -2355,11 +2382,11 @@ Source code for sharrow.flows
kwargs["mask"] = mask
if self.with_root_node_name is None:
- tree_root_dims = rg.root_dataset.dims
+ tree_root_dims = rg.root_dataset.sizes
else:
tree_root_dims = rg._graph.nodes[self.with_root_node_name][
"dataset"
- ].dims
+ ].sizes
argshape = [
tree_root_dims[i]
for i in presorted(tree_root_dims, self.dim_order, self.dim_exclude)
@@ -2573,12 +2600,12 @@ Source code for sharrow.flows
if self.with_root_node_name is None:
use_dims = list(
- presorted(source.root_dataset.dims, self.dim_order, self.dim_exclude)
+ presorted(source.root_dataset.sizes, self.dim_order, self.dim_exclude)
)
else:
use_dims = list(
presorted(
- source._graph.nodes[self.with_root_node_name]["dataset"].dims,
+ source._graph.nodes[self.with_root_node_name]["dataset"].sizes,
self.dim_order,
self.dim_exclude,
)
@@ -2748,7 +2775,6 @@ Source code for sharrow.flows
{k: result[:, n] for n, k in enumerate(self._raw_functions.keys())}
)
elif as_dataarray:
-
if result_squeeze:
result = squeeze(result, result_squeeze)
result_p = squeeze(result_p, result_squeeze)
@@ -3076,8 +3102,7 @@ Source code for sharrow.flows
self._raw_functions[name] = (None, None, set(), [])
def _spill(self, all_name_tokens=None):
- cmds = [self.tree._spill(all_name_tokens)]
- cmds.append("\n")
+ cmds = ["\n"]
cmds.append(f"output_name_positions = {self.output_name_positions!r}")
cmds.append(f"function_names = {self.function_names!r}")
return "\n".join(cmds)
@@ -3156,7 +3181,7 @@ Source code for sharrow.flows
selected_args = tuple(general_mapping[k] for k in named_args)
len_self_raw_functions = len(self._raw_functions)
- tree_root_dims = source.root_dataset.dims
+ tree_root_dims = source.root_dataset.sizes
argshape = tuple(
tree_root_dims[i]
for i in presorted(tree_root_dims, self.dim_order, self.dim_exclude)
@@ -3204,22 +3229,20 @@ Source code for sharrow.flows
-
+
+
+
-
-
-
-
-
-
+
+
@@ -3236,6 +3259,7 @@ Source code for sharrow.flows
+
© Copyright 2022 AMPO Research Foundation.
@@ -3262,8 +3286,8 @@
Source code for sharrow.flows
-
-
+
+
diff --git a/_modules/sharrow/relationships.html b/_modules/sharrow/relationships.html
index b6a0ade..25680cf 100644
--- a/_modules/sharrow/relationships.html
+++ b/_modules/sharrow/relationships.html
@@ -3,12 +3,12 @@
-
+
- sharrow.relationships — v2.6.0
+ sharrow.relationships — v2.7.0
@@ -18,15 +18,15 @@
-
-
-
+
+
+
-
-
-
-
+
+
+
+
@@ -39,12 +39,14 @@
-
-
+
+
+
+
@@ -65,7 +67,7 @@
-
+
@@ -73,7 +75,16 @@
- Skip to main content
+ Skip to main content
+
+
+
+
+
+
Ctrl+K
+
+
-
+
+
+
+
+
+
+
+
@@ -127,6 +146,7 @@
@@ -262,20 +283,22 @@
+
+
@@ -302,7 +325,7 @@
-
+
Source code for sharrow.relationships
import ast
@@ -461,7 +484,7 @@ Source code for sharrow.relationships
def _dataarray_to_numpy(self) -> np.ndarray:
- """Coerces wrapped data to numpy and returns a numpy.ndarray"""
+ """Coerces wrapped data to numpy and returns a numpy.ndarray."""
data = self.data
if isinstance(data, dask_array_type):
data = data.compute()
@@ -472,9 +495,7 @@ Source code for sharrow.relationships
[docs]class Relationship:
- """
- Defines a linkage between datasets in a `DataTree`.
- """
+ """Defines a linkage between datasets in a `DataTree`."""
def __init__(
self,
@@ -850,7 +871,7 @@ Source code for sharrow.relationships
return Relationship(parent_data=parent, child_data=child, **attrs)
def list_relationships(self) -> list[Relationship]:
- """list : List all relationships defined in this tree."""
+ """List : List all relationships defined in this tree."""
result = []
for e in self._graph.edges:
result.append(self._get_relationship(e))
@@ -884,7 +905,6 @@ Source code for sharrow.relationships
self.digitize_relationships(inplace=True)
def add_items(self, items):
-
from collections.abc import Mapping, Sequence
if isinstance(items, Sequence):
@@ -1014,7 +1034,6 @@ Source code for sharrow.relationships
just_node_name=False,
dim_names_from_top=False,
):
-
if isinstance(item, (list, tuple)):
from .dataset import Dataset
@@ -1097,7 +1116,7 @@ Source code for sharrow.relationships
# path_indexing = self._graph.edges[path[-1]].get('indexing')
t1 = None
# intermediate nodes on path
- for (e, e_next) in zip(path[:-1], path[1:]):
+ for e, e_next in zip(path[:-1], path[1:]):
r = self._get_relationship(e)
r_next = self._get_relationship(e_next)
if t1 is None:
@@ -1213,7 +1232,7 @@ Source code for sharrow.relationships
def contains_subspace(self, key) -> bool:
"""
- Is this named Dataset in this tree's subspaces
+ Is this named Dataset in this tree's subspaces.
Parameters
----------
@@ -1227,7 +1246,7 @@ Source code for sharrow.relationships
def get_subspace(self, key, default_empty=False) -> xr.Dataset:
"""
- Access named Dataset from this tree's subspaces
+ Access named Dataset from this tree's subspaces.
Parameters
----------
@@ -1263,9 +1282,7 @@ Source code for sharrow.relationships
@property
def dims(self):
- """
- Mapping from dimension names to lengths across all dataset nodes.
- """
+ """Mapping from dimension names to lengths across all dataset nodes."""
dims = {}
for _k, v in self.subspaces_iter():
for name, length in v.dims.items():
@@ -1278,6 +1295,8 @@ Source code for sharrow.relationships
dims[name] = length
return xr.core.utils.Frozen(dims)
+ sizes = dims # alternate name
+
def dims_detail(self):
"""
Report on the names and sizes of dimensions in all Dataset nodes.
@@ -1312,7 +1331,6 @@ Source code for sharrow.relationships
Returns self if dropping inplace, otherwise returns a copy
with dimensions dropped.
"""
-
if isinstance(dims, str):
dims = [dims]
if inplace:
@@ -1346,7 +1364,7 @@ Source code for sharrow.relationships
while boot_queue:
b = boot_queue.pop()
booted.add(b)
- for (up, dn, _n) in obj._graph.edges.keys():
+ for up, dn, _n in obj._graph.edges.keys():
if up == b:
boot_queue.add(dn)
@@ -1564,21 +1582,6 @@ Source code for sharrow.relationships
with_root_node_name=with_root_node_name,
)
- def _spill(self, all_name_tokens=()):
- """
- Write backup code for sharrow-lite.
-
- Parameters
- ----------
- all_name_tokens
-
- Returns
- -------
-
- """
- cmds = []
- return "\n".join(cmds)
-
def get_named_array(self, mangled_name):
if mangled_name[:2] != "__":
raise KeyError(mangled_name)
@@ -1618,7 +1621,6 @@ Source code for sharrow.relationships
DataTree or None
Only returns a copy if not digitizing in-place.
"""
-
if inplace:
obj = self
else:
@@ -1692,7 +1694,7 @@ Source code for sharrow.relationships
@property
def relationships_are_digitized(self):
- """bool : Whether all relationships are digital (by position)."""
+ """Bool : Whether all relationships are digital (by position)."""
for e in self._graph.edges:
r = self._get_relationship(e)
if r.indexing != "position":
@@ -1702,7 +1704,6 @@ Source code for sharrow.relationships
def _arg_tokenizer(
self, spacename, spacearray, spacearrayname, exclude_dims=None, blends=None
):
-
if blends is None:
blends = {}
@@ -1879,22 +1880,20 @@ Source code for sharrow.relationships
-
+
+
+
-
-
-
-
-
-
+
+
@@ -1911,6 +1910,7 @@ Source code for sharrow.relationships
+
© Copyright 2022 AMPO Research Foundation.
@@ -1937,8 +1937,8 @@
Source code for sharrow.relationships
-
-
+
+
diff --git a/_sources/walkthrough/encoding.ipynb b/_sources/walkthrough/encoding.ipynb
index 3976978..cb9b7a7 100644
--- a/_sources/walkthrough/encoding.ipynb
+++ b/_sources/walkthrough/encoding.ipynb
@@ -23,7 +23,8 @@
"source": [
"# HIDDEN\n",
"import warnings\n",
- "warnings.filterwarnings(\"ignore\", category=DeprecationWarning) "
+ "\n",
+ "warnings.filterwarnings(\"ignore\", category=DeprecationWarning)"
]
},
{
@@ -38,9 +39,9 @@
"import numpy as np\n",
"import pandas as pd\n",
"import xarray as xr\n",
- "from io import StringIO\n",
"\n",
"import sharrow as sh\n",
+ "\n",
"sh.__version__"
]
},
@@ -57,6 +58,7 @@
"source": [
"# check versions\n",
"import packaging\n",
+ "\n",
"assert packaging.version.parse(xr.__version__) >= packaging.version.parse(\"0.20.2\")"
]
},
@@ -146,7 +148,7 @@
"metadata": {},
"outputs": [],
"source": [
- "from sharrow.digital_encoding import array_encode, array_decode"
+ "from sharrow.digital_encoding import array_decode, array_encode"
]
},
{
@@ -165,7 +167,7 @@
"metadata": {},
"outputs": [],
"source": [
- "skims.DIST.values[:2,:3]"
+ "skims.DIST.values[:2, :3]"
]
},
{
@@ -210,7 +212,7 @@
"outputs": [],
"source": [
"distance_encoded = array_encode(skims.DIST, scale=0.01, offset=0)\n",
- "distance_encoded.values[:2,:3]"
+ "distance_encoded.values[:2, :3]"
]
},
{
@@ -227,10 +229,14 @@
"# TEST encoding\n",
"assert distance_encoded.dtype == np.int16\n",
"np.testing.assert_array_equal(\n",
- " distance_encoded.values[:2,:3],\n",
- " np.array([[12, 24, 44], [37, 14, 28]], dtype=np.int16)\n",
+ " distance_encoded.values[:2, :3],\n",
+ " np.array([[12, 24, 44], [37, 14, 28]], dtype=np.int16),\n",
")\n",
- "assert distance_encoded.attrs['digital_encoding'] == {'scale': 0.01, 'offset': 0, 'missing_value': None}"
+ "assert distance_encoded.attrs[\"digital_encoding\"] == {\n",
+ " \"scale\": 0.01,\n",
+ " \"offset\": 0,\n",
+ " \"missing_value\": None,\n",
+ "}"
]
},
{
@@ -249,9 +255,7 @@
"metadata": {},
"outputs": [],
"source": [
- "skims_encoded = skims.assign(\n",
- " {'DIST': array_encode(skims.DIST, scale=0.01, offset=0)}\n",
- ")"
+ "skims_encoded = skims.assign({\"DIST\": array_encode(skims.DIST, scale=0.01, offset=0)})"
]
},
{
@@ -271,7 +275,9 @@
"metadata": {},
"outputs": [],
"source": [
- "skims_encoded = skims_encoded.digital_encoding.set(['DISTWALK', 'DISTBIKE'], scale=0.01, offset=0)"
+ "skims_encoded = skims_encoded.digital_encoding.set(\n",
+ " [\"DISTWALK\", \"DISTBIKE\"], scale=0.01, offset=0\n",
+ ")"
]
},
{
@@ -305,9 +311,9 @@
"source": [
"# TEST\n",
"assert skims_encoded.digital_encoding.info() == {\n",
- " 'DIST': {'scale': 0.01, 'offset': 0, 'missing_value': None},\n",
- " 'DISTBIKE': {'scale': 0.01, 'offset': 0, 'missing_value': None},\n",
- " 'DISTWALK': {'scale': 0.01, 'offset': 0, 'missing_value': None},\n",
+ " \"DIST\": {\"scale\": 0.01, \"offset\": 0, \"missing_value\": None},\n",
+ " \"DISTBIKE\": {\"scale\": 0.01, \"offset\": 0, \"missing_value\": None},\n",
+ " \"DISTWALK\": {\"scale\": 0.01, \"offset\": 0, \"missing_value\": None},\n",
"}"
]
},
@@ -330,16 +336,16 @@
"metadata": {},
"outputs": [],
"source": [
- "pairs = pd.DataFrame({'orig': [0,0,0,1,1,1], 'dest': [0,1,2,0,1,2]})\n",
+ "pairs = pd.DataFrame({\"orig\": [0, 0, 0, 1, 1, 1], \"dest\": [0, 1, 2, 0, 1, 2]})\n",
"tree = sh.DataTree(\n",
- " base=pairs, \n",
- " skims=skims.drop_dims('time_period'), \n",
+ " base=pairs,\n",
+ " skims=skims.drop_dims(\"time_period\"),\n",
" relationships=(\n",
" \"base.orig -> skims.otaz\",\n",
" \"base.dest -> skims.dtaz\",\n",
" ),\n",
")\n",
- "flow = tree.setup_flow({'d1': 'DIST', 'd2': 'DIST**2'})\n",
+ "flow = tree.setup_flow({\"d1\": \"DIST\", \"d2\": \"DIST**2\"})\n",
"arr = flow.load()\n",
"arr"
]
@@ -361,14 +367,14 @@
"outputs": [],
"source": [
"tree_enc = sh.DataTree(\n",
- " base=pairs, \n",
- " skims=skims_encoded.drop_dims('time_period'), \n",
+ " base=pairs,\n",
+ " skims=skims_encoded.drop_dims(\"time_period\"),\n",
" relationships=(\n",
" \"base.orig -> skims.otaz\",\n",
" \"base.dest -> skims.dtaz\",\n",
" ),\n",
")\n",
- "flow_enc = tree_enc.setup_flow({'d1': 'DIST', 'd2': 'DIST**2'})\n",
+ "flow_enc = tree_enc.setup_flow({\"d1\": \"DIST\", \"d2\": \"DIST**2\"})\n",
"arr_enc = flow_enc.load()\n",
"arr_enc"
]
@@ -440,7 +446,7 @@
"metadata": {},
"outputs": [],
"source": [
- "skims.WLK_LOC_WLK_FAR.values[:2,:3,:]"
+ "skims.WLK_LOC_WLK_FAR.values[:2, :3, :]"
]
},
{
@@ -460,7 +466,7 @@
"outputs": [],
"source": [
"wlwfare_enc = array_encode(skims.WLK_LOC_WLK_FAR, bitwidth=8, by_dict=True)\n",
- "wlwfare_enc.values[:2,:3,:]"
+ "wlwfare_enc.values[:2, :3, :]"
]
},
{
@@ -470,7 +476,7 @@
"metadata": {},
"outputs": [],
"source": [
- "wlwfare_enc.attrs['digital_encoding']['dictionary']"
+ "wlwfare_enc.attrs[\"digital_encoding\"][\"dictionary\"]"
]
},
{
@@ -487,18 +493,18 @@
"# TEST encoding\n",
"assert wlwfare_enc.dtype == np.uint8\n",
"np.testing.assert_array_equal(\n",
- " wlwfare_enc.values[:2,:3,:],\n",
- " np.array([[[0, 0, 0, 0, 0],\n",
- " [1, 2, 2, 1, 2],\n",
- " [1, 2, 2, 1, 2]],\n",
- "\n",
- " [[1, 1, 2, 2, 1],\n",
- " [0, 0, 0, 0, 0],\n",
- " [1, 2, 2, 1, 2]]], dtype=np.uint8)\n",
+ " wlwfare_enc.values[:2, :3, :],\n",
+ " np.array(\n",
+ " [\n",
+ " [[0, 0, 0, 0, 0], [1, 2, 2, 1, 2], [1, 2, 2, 1, 2]],\n",
+ " [[1, 1, 2, 2, 1], [0, 0, 0, 0, 0], [1, 2, 2, 1, 2]],\n",
+ " ],\n",
+ " dtype=np.uint8,\n",
+ " ),\n",
")\n",
"np.testing.assert_array_equal(\n",
- " wlwfare_enc.attrs['digital_encoding']['dictionary'],\n",
- " np.array([ 0., 152., 474., 626.], dtype=np.float32)\n",
+ " wlwfare_enc.attrs[\"digital_encoding\"][\"dictionary\"],\n",
+ " np.array([0.0, 152.0, 474.0, 626.0], dtype=np.float32),\n",
")"
]
},
@@ -561,12 +567,14 @@
"outputs": [],
"source": [
"skims1 = skims.digital_encoding.set(\n",
- " ['WLK_LOC_WLK_FAR', \n",
- " 'WLK_EXP_WLK_FAR', \n",
- " 'WLK_HVY_WLK_FAR', \n",
- " 'DRV_LOC_WLK_FAR',\n",
- " 'DRV_HVY_WLK_FAR',\n",
- " 'DRV_EXP_WLK_FAR'],\n",
+ " [\n",
+ " \"WLK_LOC_WLK_FAR\",\n",
+ " \"WLK_EXP_WLK_FAR\",\n",
+ " \"WLK_HVY_WLK_FAR\",\n",
+ " \"DRV_LOC_WLK_FAR\",\n",
+ " \"DRV_HVY_WLK_FAR\",\n",
+ " \"DRV_EXP_WLK_FAR\",\n",
+ " ],\n",
" joint_dict=True,\n",
")"
]
@@ -591,8 +599,7 @@
"outputs": [],
"source": [
"skims1 = skims1.digital_encoding.set(\n",
- " ['DISTBIKE', \n",
- " 'DISTWALK'],\n",
+ " [\"DISTBIKE\", \"DISTWALK\"],\n",
" joint_dict=\"jointWB\",\n",
")"
]
@@ -638,9 +645,9 @@
"outputs": [],
"source": [
"tree1 = sh.DataTree(\n",
- " base=pairs, \n",
- " skims=skims1, \n",
- " rskims=skims1, \n",
+ " base=pairs,\n",
+ " skims=skims1,\n",
+ " rskims=skims1,\n",
" relationships=(\n",
" \"base.orig -> skims.otaz\",\n",
" \"base.dest -> skims.dtaz\",\n",
@@ -648,15 +655,18 @@
" \"base.dest -> rskims.otaz\",\n",
" ),\n",
")\n",
- "flow1 = tree1.setup_flow({\n",
- " 'd1': 'skims[\"WLK_LOC_WLK_FAR\", \"AM\"]', \n",
- " 'd2': 'skims[\"WLK_LOC_WLK_FAR\", \"AM\"]**2',\n",
- " 'w1': 'skims.DISTWALK',\n",
- " 'w2': 'skims.reverse(\"DISTWALK\")',\n",
- " 'w3': 'rskims.DISTWALK',\n",
- " 'x1': 'skims.DIST',\n",
- " 'x2': 'skims.reverse(\"DIST\")',\n",
- "}, hashing_level=2)\n",
+ "flow1 = tree1.setup_flow(\n",
+ " {\n",
+ " \"d1\": 'skims[\"WLK_LOC_WLK_FAR\", \"AM\"]',\n",
+ " \"d2\": 'skims[\"WLK_LOC_WLK_FAR\", \"AM\"]**2',\n",
+ " \"w1\": \"skims.DISTWALK\",\n",
+ " \"w2\": 'skims.reverse(\"DISTWALK\")',\n",
+ " \"w3\": \"rskims.DISTWALK\",\n",
+ " \"x1\": \"skims.DIST\",\n",
+ " \"x2\": 'skims.reverse(\"DIST\")',\n",
+ " },\n",
+ " hashing_level=2,\n",
+ ")\n",
"arr1 = flow1.load_dataframe()\n",
"arr1"
]
@@ -673,13 +683,72 @@
"outputs": [],
"source": [
"# TEST\n",
- "assert (arr1 == np.array([[ 0.00000e+00, 0.00000e+00, 1.20000e-01, 1.20000e-01, 1.20000e-01, 1.20000e-01, 1.20000e-01],\n",
- " [ 4.74000e+02, 2.24676e+05, 2.40000e-01, 3.70000e-01, 3.70000e-01, 2.40000e-01, 3.70000e-01],\n",
- " [ 4.74000e+02, 2.24676e+05, 4.40000e-01, 5.70000e-01, 5.70000e-01, 4.40000e-01, 5.70000e-01],\n",
- " [ 1.52000e+02, 2.31040e+04, 3.70000e-01, 2.40000e-01, 2.40000e-01, 3.70000e-01, 2.40000e-01],\n",
- " [ 0.00000e+00, 0.00000e+00, 1.40000e-01, 1.40000e-01, 1.40000e-01, 1.40000e-01, 1.40000e-01],\n",
- " [ 4.74000e+02, 2.24676e+05, 2.80000e-01, 2.80000e-01, 2.80000e-01, 2.80000e-01, 2.80000e-01]],\n",
- " dtype=np.float32)).all().all()"
+ "assert (\n",
+ " (\n",
+ " arr1\n",
+ " == np.array(\n",
+ " [\n",
+ " [\n",
+ " 0.00000e00,\n",
+ " 0.00000e00,\n",
+ " 1.20000e-01,\n",
+ " 1.20000e-01,\n",
+ " 1.20000e-01,\n",
+ " 1.20000e-01,\n",
+ " 1.20000e-01,\n",
+ " ],\n",
+ " [\n",
+ " 4.74000e02,\n",
+ " 2.24676e05,\n",
+ " 2.40000e-01,\n",
+ " 3.70000e-01,\n",
+ " 3.70000e-01,\n",
+ " 2.40000e-01,\n",
+ " 3.70000e-01,\n",
+ " ],\n",
+ " [\n",
+ " 4.74000e02,\n",
+ " 2.24676e05,\n",
+ " 4.40000e-01,\n",
+ " 5.70000e-01,\n",
+ " 5.70000e-01,\n",
+ " 4.40000e-01,\n",
+ " 5.70000e-01,\n",
+ " ],\n",
+ " [\n",
+ " 1.52000e02,\n",
+ " 2.31040e04,\n",
+ " 3.70000e-01,\n",
+ " 2.40000e-01,\n",
+ " 2.40000e-01,\n",
+ " 3.70000e-01,\n",
+ " 2.40000e-01,\n",
+ " ],\n",
+ " [\n",
+ " 0.00000e00,\n",
+ " 0.00000e00,\n",
+ " 1.40000e-01,\n",
+ " 1.40000e-01,\n",
+ " 1.40000e-01,\n",
+ " 1.40000e-01,\n",
+ " 1.40000e-01,\n",
+ " ],\n",
+ " [\n",
+ " 4.74000e02,\n",
+ " 2.24676e05,\n",
+ " 2.80000e-01,\n",
+ " 2.80000e-01,\n",
+ " 2.80000e-01,\n",
+ " 2.80000e-01,\n",
+ " 2.80000e-01,\n",
+ " ],\n",
+ " ],\n",
+ " dtype=np.float32,\n",
+ " )\n",
+ " )\n",
+ " .all()\n",
+ " .all()\n",
+ ")"
]
},
{
@@ -694,11 +763,13 @@
"outputs": [],
"source": [
"# TEST\n",
- "assert skims1.digital_encoding.baggage(['WLK_LOC_WLK_FAR']) == {'joined_0_offsets'}\n",
- "assert (skims1.iat(\n",
- " otaz=[0,1,2], dtaz=[0,0,0], time_period=[1,1,1],\n",
- " _name='WLK_LOC_WLK_FAR'\n",
- ").to_series() == [0,152,474]).all()"
+ "assert skims1.digital_encoding.baggage([\"WLK_LOC_WLK_FAR\"]) == {\"joined_0_offsets\"}\n",
+ "assert (\n",
+ " skims1.iat(\n",
+ " otaz=[0, 1, 2], dtaz=[0, 0, 0], time_period=[1, 1, 1], _name=\"WLK_LOC_WLK_FAR\"\n",
+ " ).to_series()\n",
+ " == [0, 152, 474]\n",
+ ").all()"
]
},
{
@@ -728,8 +799,10 @@
"outputs": [],
"source": [
"hh = sh.example_data.get_households()\n",
- "hh[\"income_grp\"] = pd.cut(hh.income, bins=[-np.inf,30000,60000,np.inf], labels=['Low', \"Mid\", \"High\"])\n",
- "hh = hh[[\"income\",\"income_grp\"]]\n",
+ "hh[\"income_grp\"] = pd.cut(\n",
+ " hh.income, bins=[-np.inf, 30000, 60000, np.inf], labels=[\"Low\", \"Mid\", \"High\"]\n",
+ ")\n",
+ "hh = hh[[\"income\", \"income_grp\"]]\n",
"hh.head()"
]
},
@@ -762,7 +835,7 @@
},
"outputs": [],
"source": [
- "hh_dataset = sh.dataset.construct(hh[[\"income\",\"income_grp\"]])\n",
+ "hh_dataset = sh.dataset.construct(hh[[\"income\", \"income_grp\"]])\n",
"hh_dataset"
]
},
@@ -803,9 +876,12 @@
"source": [
"# TESTING\n",
"assert hh_dataset[\"income_grp\"].dtype == \"int8\"\n",
- "assert hh_dataset[\"income_grp\"].digital_encoding.keys() == {'dictionary', 'ordered'}\n",
- "assert all(hh_dataset[\"income_grp\"].digital_encoding['dictionary'] == np.array(['Low', 'Mid', 'High'], dtype='= packaging.version.parse(\"0.20.2\")"
]
},
@@ -84,7 +87,7 @@
"source": [
"# TEST households content\n",
"assert len(households) == 5000\n",
- "assert \"income\" in households \n",
+ "assert \"income\" in households\n",
"assert households.index.name == \"HHID\""
]
},
@@ -112,7 +115,7 @@
"source": [
"assert len(persons) == 8212\n",
"assert \"household_id\" in persons\n",
- "assert persons.index.name == 'PERID'"
+ "assert persons.index.name == \"PERID\""
]
},
{
@@ -178,13 +181,17 @@
"source": [
"def random_tours(n_tours=100_000, seed=42):\n",
" rng = np.random.default_rng(seed)\n",
- " n_zones = skims.dims['dtaz']\n",
- " return pd.DataFrame({\n",
- " 'PERID': rng.choice(persons.index, size=n_tours),\n",
- " 'dest_taz_idx': rng.choice(n_zones, size=n_tours),\n",
- " 'out_time_period': rng.choice(skims.time_period, size=n_tours),\n",
- " 'in_time_period': rng.choice(skims.time_period, size=n_tours),\n",
- " }).rename_axis(\"TOURIDX\")\n",
+ " n_zones = skims.dims[\"dtaz\"]\n",
+ " return pd.DataFrame(\n",
+ " {\n",
+ " \"PERID\": rng.choice(persons.index, size=n_tours),\n",
+ " \"dest_taz_idx\": rng.choice(n_zones, size=n_tours),\n",
+ " \"out_time_period\": rng.choice(skims.time_period, size=n_tours),\n",
+ " \"in_time_period\": rng.choice(skims.time_period, size=n_tours),\n",
+ " }\n",
+ " ).rename_axis(\"TOURIDX\")\n",
+ "\n",
+ "\n",
"tours = random_tours()\n",
"tours.head()"
]
@@ -269,7 +276,7 @@
"metadata": {},
"outputs": [],
"source": [
- "spec = pd.read_csv(StringIO(mini_spec), index_col='Label')\n",
+ "spec = pd.read_csv(StringIO(mini_spec), index_col=\"Label\")\n",
"spec"
]
},
@@ -286,7 +293,7 @@
"source": [
"# TEST check spec\n",
"assert spec.index.name == \"Label\"\n",
- "assert all(spec.columns == ['Expression', 'DRIVE', 'WALK', 'TRANSIT'])"
+ "assert all(spec.columns == [\"Expression\", \"DRIVE\", \"WALK\", \"TRANSIT\"])"
]
},
{
@@ -309,7 +316,7 @@
"metadata": {},
"outputs": [],
"source": [
- "income_breakpoints = nb.typed.Dict.empty(nb.types.int32,nb.types.int32)\n",
+ "income_breakpoints = nb.typed.Dict.empty(nb.types.int32, nb.types.int32)\n",
"income_breakpoints[0] = 15000\n",
"income_breakpoints[1] = 30000\n",
"income_breakpoints[2] = 60000\n",
@@ -331,12 +338,12 @@
" \"tour.in_time_period @ dot_skims.time_period\",\n",
" ),\n",
" extra_vars={\n",
- " 'shortwait': 3,\n",
- " 'one': 1,\n",
+ " \"shortwait\": 3,\n",
+ " \"one\": 1,\n",
" },\n",
" aux_vars={\n",
- " 'short_i_wait_mult': 0.75,\n",
- " 'income_breakpoints': income_breakpoints,\n",
+ " \"short_i_wait_mult\": 0.75,\n",
+ " \"income_breakpoints\": income_breakpoints,\n",
" },\n",
")"
]
@@ -414,9 +421,9 @@
"outputs": [],
"source": [
"# TEST\n",
- "from pytest import approx\n",
- "assert flow.tree.aux_vars['short_i_wait_mult'] == 0.75\n",
- "assert flow.tree.aux_vars['income_breakpoints'][2] == 60000"
+ "\n",
+ "assert flow.tree.aux_vars[\"short_i_wait_mult\"] == 0.75\n",
+ "assert flow.tree.aux_vars[\"income_breakpoints\"][2] == 60000"
]
},
{
@@ -443,16 +450,21 @@
"# TEST utility data\n",
"assert flow.check_cache_misses(fresh=False)\n",
"actual = flow.load()\n",
- "expected = np.array([[ 9.4 , 16.9572 , 4.5 , 0. , 1. ],\n",
- " [ 9.32 , 14.3628 , 4.5 , 1. , 1. ],\n",
- " [ 7.62 , 11.0129 , 4.5 , 1. , 1. ],\n",
- " [ 4.25 , 7.6692 , 2.50065 , 0. , 1. ],\n",
- " [ 6.16 , 8.2186 , 3.387825, 0. , 1. ],\n",
- " [ 4.86 , 4.9288 , 4.5 , 0. , 1. ],\n",
- " [ 1.07 , 0. , 0. , 0. , 1. ],\n",
- " [ 8.52 , 11.615499, 3.260325, 0. , 1. ],\n",
- " [ 11.74 , 16.2798 , 3.440325, 0. , 1. ],\n",
- " [ 10.48 , 13.3974 , 3.942825, 0. , 1. ]], dtype=np.float32)\n",
+ "expected = np.array(\n",
+ " [\n",
+ " [9.4, 16.9572, 4.5, 0.0, 1.0],\n",
+ " [9.32, 14.3628, 4.5, 1.0, 1.0],\n",
+ " [7.62, 11.0129, 4.5, 1.0, 1.0],\n",
+ " [4.25, 7.6692, 2.50065, 0.0, 1.0],\n",
+ " [6.16, 8.2186, 3.387825, 0.0, 1.0],\n",
+ " [4.86, 4.9288, 4.5, 0.0, 1.0],\n",
+ " [1.07, 0.0, 0.0, 0.0, 1.0],\n",
+ " [8.52, 11.615499, 3.260325, 0.0, 1.0],\n",
+ " [11.74, 16.2798, 3.440325, 0.0, 1.0],\n",
+ " [10.48, 13.3974, 3.942825, 0.0, 1.0],\n",
+ " ],\n",
+ " dtype=np.float32,\n",
+ ")\n",
"\n",
"np.testing.assert_array_almost_equal(actual[:5], expected[:5])\n",
"np.testing.assert_array_almost_equal(actual[-5:], expected[-5:])\n",
@@ -491,8 +503,11 @@
"# TEST compile flags\n",
"flow.load(compile_watch=False)\n",
"import pytest\n",
+ "\n",
"with pytest.raises(AttributeError):\n",
- " flow.compiled_recently # attribute does not exist if compile_watch flag is off"
+ " compiled_recently = (\n",
+ " flow.compiled_recently\n",
+ " ) # attribute does not exist if compile_watch flag is off"
]
},
{
@@ -554,8 +569,9 @@
"source": [
"# TEST\n",
"from pytest import approx\n",
- "assert tree_2.aux_vars['short_i_wait_mult'] == 0.75\n",
- "assert tree_2.aux_vars['income_breakpoints'][2] == approx(60000)"
+ "\n",
+ "assert tree_2.aux_vars[\"short_i_wait_mult\"] == 0.75\n",
+ "assert tree_2.aux_vars[\"income_breakpoints\"][2] == approx(60000)"
]
},
{
@@ -581,18 +597,23 @@
"source": [
"# TEST that aux_vars also work with arrays\n",
"tree_a = tree_2.replace_datasets(tour=tours)\n",
- "tree_a.aux_vars['income_breakpoints'] = np.asarray([1,2,60000])\n",
+ "tree_a.aux_vars[\"income_breakpoints\"] = np.asarray([1, 2, 60000])\n",
"actual = flow.load(tree_a)\n",
- "expected = np.array([[ 9.4 , 16.9572 , 4.5 , 0. , 1. ],\n",
- " [ 9.32 , 14.3628 , 4.5 , 1. , 1. ],\n",
- " [ 7.62 , 11.0129 , 4.5 , 1. , 1. ],\n",
- " [ 4.25 , 7.6692 , 2.50065 , 0. , 1. ],\n",
- " [ 6.16 , 8.2186 , 3.387825, 0. , 1. ],\n",
- " [ 4.86 , 4.9288 , 4.5 , 0. , 1. ],\n",
- " [ 1.07 , 0. , 0. , 0. , 1. ],\n",
- " [ 8.52 , 11.615499, 3.260325, 0. , 1. ],\n",
- " [ 11.74 , 16.2798 , 3.440325, 0. , 1. ],\n",
- " [ 10.48 , 13.3974 , 3.942825, 0. , 1. ]], dtype=np.float32)\n",
+ "expected = np.array(\n",
+ " [\n",
+ " [9.4, 16.9572, 4.5, 0.0, 1.0],\n",
+ " [9.32, 14.3628, 4.5, 1.0, 1.0],\n",
+ " [7.62, 11.0129, 4.5, 1.0, 1.0],\n",
+ " [4.25, 7.6692, 2.50065, 0.0, 1.0],\n",
+ " [6.16, 8.2186, 3.387825, 0.0, 1.0],\n",
+ " [4.86, 4.9288, 4.5, 0.0, 1.0],\n",
+ " [1.07, 0.0, 0.0, 0.0, 1.0],\n",
+ " [8.52, 11.615499, 3.260325, 0.0, 1.0],\n",
+ " [11.74, 16.2798, 3.440325, 0.0, 1.0],\n",
+ " [10.48, 13.3974, 3.942825, 0.0, 1.0],\n",
+ " ],\n",
+ " dtype=np.float32,\n",
+ ")\n",
"\n",
"np.testing.assert_array_almost_equal(actual[:5], expected[:5])\n",
"np.testing.assert_array_almost_equal(actual[-5:], expected[-5:])\n",
@@ -633,15 +654,20 @@
"# TEST df\n",
"assert len(df) == len(tours)\n",
"pd.testing.assert_index_equal(\n",
- " df.columns, \n",
- " pd.Index(['Drive Time', 'Transit IVT', 'Transit Wait Time', 'Income', 'Constant']),\n",
+ " df.columns,\n",
+ " pd.Index([\"Drive Time\", \"Transit IVT\", \"Transit Wait Time\", \"Income\", \"Constant\"]),\n",
")\n",
- "expected_df_head = pd.read_csv(StringIO(''',Drive Time,Transit IVT,Transit Wait Time,Income,Constant\n",
+ "expected_df_head = pd.read_csv(\n",
+ " StringIO(\n",
+ " \"\"\",Drive Time,Transit IVT,Transit Wait Time,Income,Constant\n",
"0,9.4,16.9572,4.5,0.0,1.0\n",
"1,9.32,14.3628,4.5,1.0,1.0\n",
"2,7.62,11.0129,4.5,1.0,1.0\n",
"3,4.25,7.6692,2.50065,0.0,1.0\n",
- "4,6.16,8.2186,3.387825,0.0,1.0'''), index_col=0).astype(np.float32)\n",
+ "4,6.16,8.2186,3.387825,0.0,1.0\"\"\"\n",
+ " ),\n",
+ " index_col=0,\n",
+ ").astype(np.float32)\n",
"pd.testing.assert_frame_equal(df.head(), expected_df_head)"
]
},
@@ -667,7 +693,7 @@
"outputs": [],
"source": [
"x = flow.load()\n",
- "b = spec.iloc[:,1:].fillna(0).astype(np.float32).values\n",
+ "b = spec.iloc[:, 1:].fillna(0).astype(np.float32).values\n",
"np.dot(x, b)"
]
},
@@ -688,7 +714,17 @@
"metadata": {},
"outputs": [],
"source": [
- "%time u = flow.dot(b)\n",
+ "%time flow.dot(b)"
+ ]
+ },
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "5776822fb0889df",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "u = flow.dot(b)\n",
"u"
]
},
@@ -763,8 +799,7 @@
"outputs": [],
"source": [
"B = xr.DataArray(\n",
- " spec.iloc[:,1:].fillna(0).astype(np.float32), \n",
- " dims=('expressions','modes')\n",
+ " spec.iloc[:, 1:].fillna(0).astype(np.float32), dims=(\"expressions\", \"modes\")\n",
")\n",
"flow.dot_dataarray(B, source=tree_2)"
]
@@ -804,6 +839,16 @@
"was computed for each chosen alternative. "
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "d54d71021951470b",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "choices, choice_probs = flow.logit_draws(b, draws)"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -825,6 +870,16 @@
"milliseconds more time than just computing the utilities."
]
},
+ {
+ "cell_type": "code",
+ "execution_count": null,
+ "id": "eec9ebd14ff646eb",
+ "metadata": {},
+ "outputs": [],
+ "source": [
+ "choices2, choice_probs2 = flow.logit_draws(b, draws, source=tree_2)"
+ ]
+ },
{
"cell_type": "code",
"execution_count": null,
@@ -884,9 +939,9 @@
"source": [
"# TEST mnl choices\n",
"uz = np.exp(flow.dot(b))\n",
- "uz = uz / uz.sum(1)[:,None]\n",
+ "uz = uz / uz.sum(1)[:, None]\n",
"np.testing.assert_array_almost_equal(\n",
- " uz[range(uz.shape[0]),choices.ravel()],\n",
+ " uz[range(uz.shape[0]), choices.ravel()],\n",
" choice_probs.ravel(),\n",
")"
]
@@ -946,12 +1001,12 @@
"\"\"\"\n",
"\n",
"import yaml\n",
+ "\n",
"from sharrow.nested_logit import construct_nesting_tree\n",
"\n",
- "nesting_settings = yaml.safe_load(nesting_settings)['NESTS']\n",
+ "nesting_settings = yaml.safe_load(nesting_settings)[\"NESTS\"]\n",
"nest_tree = construct_nesting_tree(\n",
- " alternatives=spec.columns[1:],\n",
- " nesting_settings=nesting_settings\n",
+ " alternatives=spec.columns[1:], nesting_settings=nesting_settings\n",
")"
]
},
@@ -985,7 +1040,9 @@
"metadata": {},
"outputs": [],
"source": [
- "nesting = nest_tree.as_arrays(trim=True, parameter_dict={'coef_nest_motor': 0.5, 'coef_nest_root': 1.0})"
+ "nesting = nest_tree.as_arrays(\n",
+ " trim=True, parameter_dict={\"coef_nest_motor\": 0.5, \"coef_nest_root\": 1.0}\n",
+ ")"
]
},
{
@@ -1051,8 +1108,11 @@
"source": [
"# TEST devolve NL to MNL\n",
"choices_nl_1, choice_probs_nl_1 = flow.logit_draws(\n",
- " b, draws, \n",
- " nesting=nest_tree.as_arrays(trim=True, parameter_dict={'coef_nest_motor': 1.0, 'coef_nest_root': 1.0}),\n",
+ " b,\n",
+ " draws,\n",
+ " nesting=nest_tree.as_arrays(\n",
+ " trim=True, parameter_dict={\"coef_nest_motor\": 1.0, \"coef_nest_root\": 1.0}\n",
+ " ),\n",
")\n",
"assert (choices_nl_1 == choices).all()\n",
"assert choice_probs == approx(choice_probs_nl_1)"
@@ -1087,23 +1147,28 @@
},
"outputs": [],
"source": [
- "# TEST \n",
- "_ch, _pr, _pc, _ls = flow.logit_draws(b, draws, source=tree_2, nesting=nesting, logsums=1)\n",
+ "# TEST\n",
+ "_ch, _pr, _pc, _ls = flow.logit_draws(\n",
+ " b, draws, source=tree_2, nesting=nesting, logsums=1\n",
+ ")\n",
"assert _ch is None\n",
"assert _pr is None\n",
"assert _pc is None\n",
"assert _ls.size == 100000\n",
"np.testing.assert_array_almost_equal(\n",
- " _ls[:5],\n",
- " [ 0.532791, 0.490935, 0.557529, 0.556371, 0.54812 ]\n",
+ " _ls[:5], [0.532791, 0.490935, 0.557529, 0.556371, 0.54812]\n",
")\n",
"np.testing.assert_array_almost_equal(\n",
- " _ls[-5:],\n",
- " [ 0.452682, 0.465422, 0.554312, 0.525064, 0.515226 ]\n",
+ " _ls[-5:], [0.452682, 0.465422, 0.554312, 0.525064, 0.515226]\n",
")\n",
"\n",
"_ch, _pr, _pc, _ls = flow.logit_draws(\n",
- " b, draws, source=tree_2, nesting=nesting, logsums=1, as_dataarray=True,\n",
+ " b,\n",
+ " draws,\n",
+ " source=tree_2,\n",
+ " nesting=nesting,\n",
+ " logsums=1,\n",
+ " as_dataarray=True,\n",
")\n",
"assert _ch is None\n",
"assert _pr is None\n",
@@ -1127,7 +1192,9 @@
"# TEST masking\n",
"masker = np.zeros(draws.shape, dtype=np.int8)\n",
"masker[::2] = 1\n",
- "_ch_m, _pr_m, _pc_m, _ls_m = flow.logit_draws(b, draws, source=tree_2, nesting=nesting, logsums=1, mask=masker)\n",
+ "_ch_m, _pr_m, _pc_m, _ls_m = flow.logit_draws(\n",
+ " b, draws, source=tree_2, nesting=nesting, logsums=1, mask=masker\n",
+ ")\n",
"\n",
"assert _ls_m == approx(np.where(masker, _ls, 0))\n",
"assert (_ch_m, _pr_m, _pc_m) == (None, None, None)"
@@ -1166,37 +1233,31 @@
},
"outputs": [],
"source": [
- "# TEST \n",
- "_ch, _pr, _pc, _ls = flow.logit_draws(b, draws, source=tree_2, nesting=nesting, logsums=2)\n",
+ "# TEST\n",
+ "_ch, _pr, _pc, _ls = flow.logit_draws(\n",
+ " b, draws, source=tree_2, nesting=nesting, logsums=2\n",
+ ")\n",
"assert _ch.size == 100000\n",
"assert _pr.size == 100000\n",
"assert _pc is None\n",
"assert _ls.size == 100000\n",
+ "np.testing.assert_array_almost_equal(_ch[:5], [1, 2, 1, 1, 1])\n",
+ "np.testing.assert_array_almost_equal(_ch[-5:], [0, 1, 0, 1, 0])\n",
"np.testing.assert_array_almost_equal(\n",
- " _ch[:5],\n",
- " [ 1, 2, 1, 1, 1 ]\n",
- ")\n",
- "np.testing.assert_array_almost_equal(\n",
- " _ch[-5:],\n",
- " [ 0, 1, 0, 1, 0 ]\n",
+ " _pr[:5], [0.393454, 0.16956, 0.38384, 0.384285, 0.387469]\n",
")\n",
"np.testing.assert_array_almost_equal(\n",
- " _pr[:5],\n",
- " [ 0.393454, 0.16956 , 0.38384 , 0.384285, 0.387469 ]\n",
+ " _pr[-5:], [0.503606, 0.420874, 0.478898, 0.396506, 0.468742]\n",
")\n",
"np.testing.assert_array_almost_equal(\n",
- " _pr[-5:],\n",
- " [ 0.503606, 0.420874, 0.478898, 0.396506, 0.468742 ]\n",
+ " _ls[:5], [0.532791, 0.490935, 0.557529, 0.556371, 0.54812]\n",
")\n",
"np.testing.assert_array_almost_equal(\n",
- " _ls[:5],\n",
- " [ 0.532791, 0.490935, 0.557529, 0.556371, 0.54812 ]\n",
+ " _ls[-5:], [0.452682, 0.465422, 0.554312, 0.525064, 0.515226]\n",
")\n",
- "np.testing.assert_array_almost_equal(\n",
- " _ls[-5:],\n",
- " [ 0.452682, 0.465422, 0.554312, 0.525064, 0.515226 ]\n",
+ "_ch, _pr, _pc, _ls = flow.logit_draws(\n",
+ " b, draws, source=tree_2, nesting=nesting, logsums=2, as_dataarray=True\n",
")\n",
- "_ch, _pr, _pc, _ls = flow.logit_draws(b, draws, source=tree_2, nesting=nesting, logsums=2, as_dataarray=True)\n",
"assert _ch.size == 100000\n",
"assert _ch.dims == (\"TOURIDX\",)\n",
"assert _ch.shape == (100000,)\n",
@@ -1221,23 +1282,33 @@
"source": [
"# TEST\n",
"draws_many = np.random.default_rng(42).random(size=(tree.shape[0], 5))\n",
- "_ch, _pr, _pc, _ls = flow.logit_draws(b, draws_many, source=tree_2, nesting=nesting, logsums=2, as_dataarray=True)\n",
- "assert _ch.dims == ('TOURIDX', 'DRAW')\n",
+ "_ch, _pr, _pc, _ls = flow.logit_draws(\n",
+ " b, draws_many, source=tree_2, nesting=nesting, logsums=2, as_dataarray=True\n",
+ ")\n",
+ "assert _ch.dims == (\"TOURIDX\", \"DRAW\")\n",
"assert _ch.shape == (100000, 5)\n",
- "assert _pr.dims == ('TOURIDX', 'DRAW')\n",
+ "assert _pr.dims == (\"TOURIDX\", \"DRAW\")\n",
"assert _pr.shape == (100000, 5)\n",
- "assert _ls.dims == ('TOURIDX', )\n",
- "assert _ls.shape == (100000, )\n",
+ "assert _ls.dims == (\"TOURIDX\",)\n",
+ "assert _ls.shape == (100000,)\n",
"assert _pc is None\n",
"\n",
- "_ch, _pr, _pc, _ls = flow.logit_draws(b, draws_many, source=tree_2, nesting=nesting, logsums=2, as_dataarray=True, pick_counted=True)\n",
- "assert _ch.dims == ('TOURIDX', 'DRAW')\n",
+ "_ch, _pr, _pc, _ls = flow.logit_draws(\n",
+ " b,\n",
+ " draws_many,\n",
+ " source=tree_2,\n",
+ " nesting=nesting,\n",
+ " logsums=2,\n",
+ " as_dataarray=True,\n",
+ " pick_counted=True,\n",
+ ")\n",
+ "assert _ch.dims == (\"TOURIDX\", \"DRAW\")\n",
"assert _ch.shape == (100000, 5)\n",
- "assert _pr.dims == ('TOURIDX', 'DRAW')\n",
+ "assert _pr.dims == (\"TOURIDX\", \"DRAW\")\n",
"assert _pr.shape == (100000, 5)\n",
- "assert _ls.dims == ('TOURIDX', )\n",
- "assert _ls.shape == (100000, )\n",
- "assert _pc.dims == ('TOURIDX', 'DRAW')\n",
+ "assert _ls.dims == (\"TOURIDX\",)\n",
+ "assert _ls.shape == (100000,)\n",
+ "assert _pc.dims == (\"TOURIDX\", \"DRAW\")\n",
"assert _pc.shape == (100000, 5)"
]
},
@@ -1257,7 +1328,14 @@
"masker[::3] = 1\n",
"\n",
"_ch_m, _pr_m, _pc_m, _ls_m = flow.logit_draws(\n",
- " b, draws_many, source=tree_2, nesting=nesting, logsums=2, as_dataarray=True, mask=masker, pick_counted=True\n",
+ " b,\n",
+ " draws_many,\n",
+ " source=tree_2,\n",
+ " nesting=nesting,\n",
+ " logsums=2,\n",
+ " as_dataarray=True,\n",
+ " mask=masker,\n",
+ " pick_counted=True,\n",
")\n",
"\n",
"assert (_ch_m.values == (np.where(np.expand_dims(masker, -1), _ch, -1))).all()\n",
@@ -1289,8 +1367,10 @@
"metadata": {},
"outputs": [],
"source": [
- "tour_by_dest = tree.subspaces['tour']\n",
- "tour_by_dest = tour_by_dest.assign_coords({'CAND_DEST': xr.DataArray(np.arange(25), dims='CAND_DEST')})\n",
+ "tour_by_dest = tree.subspaces[\"tour\"]\n",
+ "tour_by_dest = tour_by_dest.assign_coords(\n",
+ " {\"CAND_DEST\": xr.DataArray(np.arange(25), dims=\"CAND_DEST\")}\n",
+ ")\n",
"tour_by_dest"
]
},
@@ -1326,14 +1406,14 @@
" \"tour.in_time_period @ dot_skims.time_period\",\n",
" ),\n",
" extra_vars={\n",
- " 'shortwait': 3,\n",
- " 'one': 1,\n",
+ " \"shortwait\": 3,\n",
+ " \"one\": 1,\n",
" },\n",
" aux_vars={\n",
- " 'short_i_wait_mult': 0.75,\n",
- " 'income_breakpoints': income_breakpoints,\n",
+ " \"short_i_wait_mult\": 0.75,\n",
+ " \"income_breakpoints\": income_breakpoints,\n",
" },\n",
- " dim_order=('TOURIDX', 'CAND_DEST')\n",
+ " dim_order=(\"TOURIDX\", \"CAND_DEST\"),\n",
")\n",
"wide_flow = wide_tree.setup_flow(spec.Expression)"
]
@@ -1345,7 +1425,7 @@
"metadata": {},
"outputs": [],
"source": [
- "%time wide_logsums = wide_flow.logit_draws(b, logsums=1, compile_watch=\"simple\")[-1]"
+ "wide_logsums = wide_flow.logit_draws(b, logsums=1, compile_watch=\"simple\")[-1]"
]
},
{
@@ -1372,20 +1452,30 @@
"source": [
"# TEST\n",
"np.testing.assert_array_almost_equal(\n",
- " wide_logsums[:5,:5],\n",
- " np.array([[ 0.759222, 0.75862 , 0.744936, 0.758251, 0.737007],\n",
- " [ 0.671698, 0.671504, 0.663015, 0.661482, 0.667133],\n",
- " [ 0.670188, 0.678498, 0.687647, 0.691152, 0.715783],\n",
- " [ 0.760743, 0.769123, 0.763733, 0.784487, 0.802356],\n",
- " [ 0.73474 , 0.743051, 0.751439, 0.754731, 0.778121]], dtype=np.float32)\n",
+ " wide_logsums[:5, :5],\n",
+ " np.array(\n",
+ " [\n",
+ " [0.759222, 0.75862, 0.744936, 0.758251, 0.737007],\n",
+ " [0.671698, 0.671504, 0.663015, 0.661482, 0.667133],\n",
+ " [0.670188, 0.678498, 0.687647, 0.691152, 0.715783],\n",
+ " [0.760743, 0.769123, 0.763733, 0.784487, 0.802356],\n",
+ " [0.73474, 0.743051, 0.751439, 0.754731, 0.778121],\n",
+ " ],\n",
+ " dtype=np.float32,\n",
+ " ),\n",
")\n",
"np.testing.assert_array_almost_equal(\n",
- " wide_logsums[-5:,-5:],\n",
- " np.array([[ 0.719523, 0.755152, 0.739368, 0.762664, 0.764388],\n",
- " [ 0.740303, 0.678783, 0.649964, 0.694407, 0.681555],\n",
- " [ 0.758865, 0.663663, 0.637266, 0.673351, 0.65875 ],\n",
- " [ 0.765125, 0.706478, 0.676878, 0.717814, 0.713912],\n",
- " [ 0.73348 , 0.683626, 0.647698, 0.69146 , 0.673006]], dtype=np.float32)\n",
+ " wide_logsums[-5:, -5:],\n",
+ " np.array(\n",
+ " [\n",
+ " [0.719523, 0.755152, 0.739368, 0.762664, 0.764388],\n",
+ " [0.740303, 0.678783, 0.649964, 0.694407, 0.681555],\n",
+ " [0.758865, 0.663663, 0.637266, 0.673351, 0.65875],\n",
+ " [0.765125, 0.706478, 0.676878, 0.717814, 0.713912],\n",
+ " [0.73348, 0.683626, 0.647698, 0.69146, 0.673006],\n",
+ " ],\n",
+ " dtype=np.float32,\n",
+ " ),\n",
")"
]
},
@@ -1402,8 +1492,8 @@
"source": [
"# TEST\n",
"np.testing.assert_array_almost_equal(\n",
- " wide_logsums[np.arange(len(tours)), tours['dest_taz_idx'].to_numpy()],\n",
- " flow.logit_draws(b, logsums=1)[-1]\n",
+ " wide_logsums[np.arange(len(tours)), tours[\"dest_taz_idx\"].to_numpy()],\n",
+ " flow.logit_draws(b, logsums=1)[-1],\n",
")"
]
},
@@ -1419,7 +1509,9 @@
"outputs": [],
"source": [
"# TEST\n",
- "wide_logsums_ = wide_flow.logit_draws(b, logsums=1, compile_watch=True, as_dataarray=True)[-1]\n",
+ "wide_logsums_ = wide_flow.logit_draws(\n",
+ " b, logsums=1, compile_watch=True, as_dataarray=True\n",
+ ")[-1]\n",
"assert wide_logsums_.dims == (\"TOURIDX\", \"CAND_DEST\")\n",
"assert wide_logsums_.shape == (100000, 25)"
]
@@ -1460,7 +1552,9 @@
"source": [
"# TEST\n",
"wide_draws = np.random.default_rng(42).random(size=wide_tree.shape + (2,))\n",
- "wide_logsums_plus = wide_flow.logit_draws(b, logsums=2, compile_watch=True, as_dataarray=True, draws=wide_draws)\n",
+ "wide_logsums_plus = wide_flow.logit_draws(\n",
+ " b, logsums=2, compile_watch=True, as_dataarray=True, draws=wide_draws\n",
+ ")\n",
"assert wide_logsums_plus[0].dims == (\"TOURIDX\", \"CAND_DEST\", \"DRAW\")\n",
"assert wide_logsums_plus[0].shape == (100000, 25, 2)\n",
"assert wide_logsums_plus[3].dims == (\"TOURIDX\", \"CAND_DEST\")\n",
@@ -1490,8 +1584,12 @@
"assert wide_logsums_mask[3].dims == (\"TOURIDX\", \"CAND_DEST\")\n",
"assert wide_logsums_mask[3].shape == (100000, 25)\n",
"\n",
- "assert (wide_logsums_plus[0].where(np.expand_dims(mask, -1), -1) == wide_logsums_mask[0]).all()\n",
- "assert (wide_logsums_plus[1].where(np.expand_dims(mask, -1), 0) == wide_logsums_mask[1]).all()\n",
+ "assert (\n",
+ " wide_logsums_plus[0].where(np.expand_dims(mask, -1), -1) == wide_logsums_mask[0]\n",
+ ").all()\n",
+ "assert (\n",
+ " wide_logsums_plus[1].where(np.expand_dims(mask, -1), 0) == wide_logsums_mask[1]\n",
+ ").all()\n",
"assert (wide_logsums_plus[3].where(mask, 0) == wide_logsums_mask[3]).all()"
]
},
@@ -1507,17 +1605,30 @@
"outputs": [],
"source": [
"# TEST masking performance\n",
- "import timeit, warnings\n",
+ "import timeit\n",
+ "import warnings\n",
+ "\n",
"with warnings.catch_warnings():\n",
" warnings.simplefilter(\"error\")\n",
- " masked_time = timeit.timeit(lambda: wide_flow.logit_draws(\n",
- " b, logsums=2, compile_watch=True, as_dataarray=True, draws=wide_draws, mask=mask\n",
- " ), number=1)\n",
- " raw_time = timeit.timeit(lambda: wide_flow.logit_draws(\n",
- " b, logsums=2, compile_watch=True, as_dataarray=True, draws=wide_draws\n",
- " ), number=1)\n",
+ " masked_time = timeit.timeit(\n",
+ " lambda: wide_flow.logit_draws(\n",
+ " b,\n",
+ " logsums=2,\n",
+ " compile_watch=True,\n",
+ " as_dataarray=True,\n",
+ " draws=wide_draws,\n",
+ " mask=mask,\n",
+ " ),\n",
+ " number=1,\n",
+ " )\n",
+ " raw_time = timeit.timeit(\n",
+ " lambda: wide_flow.logit_draws(\n",
+ " b, logsums=2, compile_watch=True, as_dataarray=True, draws=wide_draws\n",
+ " ),\n",
+ " number=1,\n",
+ " )\n",
"assert masked_time * 2 < raw_time # generous buffer, should be nearly 7 times faster\n",
- "assert len(wide_flow.cache_misses['_imnl_plus1d']) == 3"
+ "assert len(wide_flow.cache_misses[\"_imnl_plus1d\"]) == 3"
]
}
],
diff --git a/_sources/walkthrough/sparse.ipynb b/_sources/walkthrough/sparse.ipynb
index b011add..2601301 100644
--- a/_sources/walkthrough/sparse.ipynb
+++ b/_sources/walkthrough/sparse.ipynb
@@ -17,7 +17,7 @@
"source": [
"import numpy as np\n",
"import pandas as pd\n",
- "import xarray as xr\n",
+ "\n",
"import sharrow as sh"
]
},
@@ -106,10 +106,10 @@
"outputs": [],
"source": [
"skims.redirection.set(\n",
- " maz_taz, \n",
- " map_to='otaz', \n",
+ " maz_taz,\n",
+ " map_to=\"otaz\",\n",
" name=\"omaz\",\n",
- " map_also={'dtaz': \"dmaz\"}, \n",
+ " map_also={\"dtaz\": \"dmaz\"},\n",
")"
]
},
@@ -141,9 +141,9 @@
"outputs": [],
"source": [
"skims.redirection.sparse_blender(\n",
- " 'DISTWALK', \n",
- " maz_to_maz_walk.OMAZ, \n",
- " maz_to_maz_walk.DMAZ, \n",
+ " \"DISTWALK\",\n",
+ " maz_to_maz_walk.OMAZ,\n",
+ " maz_to_maz_walk.DMAZ,\n",
" maz_to_maz_walk.DISTWALK,\n",
" max_blend_distance=1.0,\n",
" index=maz_taz.index,\n",
@@ -170,10 +170,12 @@
"metadata": {},
"outputs": [],
"source": [
- "trips = pd.DataFrame({\n",
- " 'orig_maz': [100, 100, 100, 200, 200],\n",
- " 'dest_maz': [100, 101, 103, 201, 202],\n",
- "})\n",
+ "trips = pd.DataFrame(\n",
+ " {\n",
+ " \"orig_maz\": [100, 100, 100, 200, 200],\n",
+ " \"dest_maz\": [100, 101, 103, 201, 202],\n",
+ " }\n",
+ ")\n",
"trips"
]
},
@@ -199,7 +201,7 @@
" relationships=(\n",
" \"base.orig_maz @ skims.omaz\",\n",
" \"base.dest_maz @ skims.dmaz\",\n",
- " )\n",
+ " ),\n",
")"
]
},
@@ -218,9 +220,12 @@
"metadata": {},
"outputs": [],
"source": [
- "flow = tree.setup_flow({\n",
- " 'plain_distance': 'DISTWALK',\n",
- "}, boundscheck=True)"
+ "flow = tree.setup_flow(\n",
+ " {\n",
+ " \"plain_distance\": \"DISTWALK\",\n",
+ " },\n",
+ " boundscheck=True,\n",
+ ")"
]
},
{
@@ -256,15 +261,20 @@
"source": [
"# TEST\n",
"from pytest import approx\n",
+ "\n",
"sparse_dat = np.array([0.01, 0.2, np.nan, 3.2, np.nan])\n",
- "dense_dat = np.array([0.12,0.12,0.12,0.17,0.17])\n",
- "def blend(s,d, max_s):\n",
+ "dense_dat = np.array([0.12, 0.12, 0.12, 0.17, 0.17])\n",
+ "\n",
+ "\n",
+ "def blend(s, d, max_s):\n",
" out = np.zeros_like(d)\n",
- " ratio = s/max_s\n",
- " out = d*ratio + s*(1-ratio)\n",
- " out = np.where(s>max_s, d, out)\n",
+ " ratio = s / max_s\n",
+ " out = d * ratio + s * (1 - ratio)\n",
+ " out = np.where(s > max_s, d, out)\n",
" out = np.where(np.isnan(s), d, out)\n",
" return out\n",
+ "\n",
+ "\n",
"assert blend(sparse_dat, dense_dat, 1.0) == approx(flow.load().ravel())"
]
},
@@ -283,11 +293,13 @@
"metadata": {},
"outputs": [],
"source": [
- "flow2 = tree.setup_flow({\n",
- " 'plain_distance': 'DISTWALK',\n",
- " 'clip_distance': 'DISTWALK.clip(upper=0.15)',\n",
- " 'square_distance': 'DISTWALK**2',\n",
- "})"
+ "flow2 = tree.setup_flow(\n",
+ " {\n",
+ " \"plain_distance\": \"DISTWALK\",\n",
+ " \"clip_distance\": \"DISTWALK.clip(upper=0.15)\",\n",
+ " \"square_distance\": \"DISTWALK**2\",\n",
+ " }\n",
+ ")"
]
},
{
@@ -312,12 +324,17 @@
"outputs": [],
"source": [
"# TEST\n",
- "assert flow2.load_dataframe().values == approx(np.array([\n",
- " [ 1.1100e-02, 1.1100e-02, 1.2321e-04],\n",
- " [ 1.8400e-01, 1.5000e-01, 3.3856e-02],\n",
- " [ 1.2000e-01, 1.2000e-01, 1.4400e-02],\n",
- " [ 1.7000e-01, 1.5000e-01, 2.8900e-02],\n",
- " [ 1.7000e-01, 1.5000e-01, 2.8900e-02]], dtype=np.float32)\n",
+ "assert flow2.load_dataframe().values == approx(\n",
+ " np.array(\n",
+ " [\n",
+ " [1.1100e-02, 1.1100e-02, 1.2321e-04],\n",
+ " [1.8400e-01, 1.5000e-01, 3.3856e-02],\n",
+ " [1.2000e-01, 1.2000e-01, 1.4400e-02],\n",
+ " [1.7000e-01, 1.5000e-01, 2.8900e-02],\n",
+ " [1.7000e-01, 1.5000e-01, 2.8900e-02],\n",
+ " ],\n",
+ " dtype=np.float32,\n",
+ " )\n",
")"
]
},
@@ -348,7 +365,7 @@
"skims.at(\n",
" omaz=trips.orig_maz,\n",
" dmaz=trips.dest_maz,\n",
- " _names=['DIST', 'DISTWALK'],\n",
+ " _names=[\"DIST\", \"DISTWALK\"],\n",
")"
]
},
@@ -367,24 +384,26 @@
"out = skims.at(\n",
" omaz=trips.orig_maz,\n",
" dmaz=trips.dest_maz,\n",
- " _names=['DIST', 'DISTWALK'], _load=True,\n",
+ " _names=[\"DIST\", \"DISTWALK\"],\n",
+ " _load=True,\n",
")\n",
"np.testing.assert_array_almost_equal(\n",
- " out['DIST'].to_numpy(), \n",
- " np.array([0.12, 0.12, 0.12, 0.17, 0.17], dtype=np.float32)\n",
+ " out[\"DIST\"].to_numpy(), np.array([0.12, 0.12, 0.12, 0.17, 0.17], dtype=np.float32)\n",
")\n",
"np.testing.assert_array_almost_equal(\n",
- " out['DISTWALK'].to_numpy(), \n",
- " np.array([0.0111, 0.184, 0.12, 0.17, 0.17], dtype=np.float32)\n",
+ " out[\"DISTWALK\"].to_numpy(),\n",
+ " np.array([0.0111, 0.184, 0.12, 0.17, 0.17], dtype=np.float32),\n",
")\n",
"\n",
"from pytest import raises\n",
+ "\n",
"with raises(NotImplementedError):\n",
" skims.at(\n",
" omaz=trips.orig_maz,\n",
" dmaz=trips.dest_maz,\n",
- " time_period=['AM', 'AM', 'AM', 'AM', 'AM'],\n",
- " _names=['DIST', 'DISTWALK', 'SOV_TIME'], _load=True,\n",
+ " time_period=[\"AM\", \"AM\", \"AM\", \"AM\", \"AM\"],\n",
+ " _names=[\"DIST\", \"DISTWALK\", \"SOV_TIME\"],\n",
+ " _load=True,\n",
" )"
]
},
@@ -396,9 +415,9 @@
"outputs": [],
"source": [
"skims.iat(\n",
- " omaz=[ 0, 0, 0, 100, 100],\n",
- " dmaz=[ 0, 1, 3, 101, 102],\n",
- " _names=['DIST', 'DISTWALK'],\n",
+ " omaz=[0, 0, 0, 100, 100],\n",
+ " dmaz=[0, 1, 3, 101, 102],\n",
+ " _names=[\"DIST\", \"DISTWALK\"],\n",
")"
]
},
@@ -415,18 +434,18 @@
"source": [
"# TEST\n",
"out = skims.iat(\n",
- " omaz=[ 0, 0, 0, 100, 100],\n",
- " dmaz=[ 0, 1, 3, 101, 102],\n",
- " _names=['DIST', 'DISTWALK'], _load=True,\n",
+ " omaz=[0, 0, 0, 100, 100],\n",
+ " dmaz=[0, 1, 3, 101, 102],\n",
+ " _names=[\"DIST\", \"DISTWALK\"],\n",
+ " _load=True,\n",
")\n",
"np.testing.assert_array_almost_equal(\n",
- " out['DIST'].to_numpy(), \n",
- " np.array([0.12, 0.12, 0.12, 0.17, 0.17], dtype=np.float32)\n",
+ " out[\"DIST\"].to_numpy(), np.array([0.12, 0.12, 0.12, 0.17, 0.17], dtype=np.float32)\n",
")\n",
"np.testing.assert_array_almost_equal(\n",
- " out['DISTWALK'].to_numpy(), \n",
- " np.array([0.0111, 0.184, 0.12, 0.17, 0.17], dtype=np.float32)\n",
- ")\n"
+ " out[\"DISTWALK\"].to_numpy(),\n",
+ " np.array([0.0111, 0.184, 0.12, 0.17, 0.17], dtype=np.float32),\n",
+ ")"
]
},
{
@@ -446,9 +465,10 @@
"outputs": [],
"source": [
"skims.at(\n",
- " otaz=[1,1,1,16,16],\n",
- " dtaz=[1,1,1,16,16],\n",
- " _names=['DIST', 'DISTWALK'], _load=True,\n",
+ " otaz=[1, 1, 1, 16, 16],\n",
+ " dtaz=[1, 1, 1, 16, 16],\n",
+ " _names=[\"DIST\", \"DISTWALK\"],\n",
+ " _load=True,\n",
")"
]
},
@@ -460,9 +480,9 @@
"outputs": [],
"source": [
"skims.at(\n",
- " otaz=[1,1,1,16,16],\n",
- " dtaz=[1,1,1,16,16],\n",
- " _name='DISTWALK',\n",
+ " otaz=[1, 1, 1, 16, 16],\n",
+ " dtaz=[1, 1, 1, 16, 16],\n",
+ " _name=\"DISTWALK\",\n",
")"
]
},
@@ -478,44 +498,47 @@
"outputs": [],
"source": [
"# TEST\n",
- "import sys\n",
- "if sys.version_info > (3,8):\n",
- " import secrets\n",
- " token = \"skims-with-sparse\" + secrets.token_hex(5)\n",
- " readback0 = skims.shm.to_shared_memory(token)\n",
- " assert readback0.attrs == skims.attrs\n",
- " readback = sh.Dataset.shm.from_shared_memory(token)\n",
- " assert readback.attrs == skims.attrs\n",
- " \n",
- " out = readback.iat(\n",
- " omaz=[ 0, 0, 0, 100, 100],\n",
- " dmaz=[ 0, 1, 3, 101, 102],\n",
- " _names=['DIST', 'DISTWALK'], _load=True,\n",
- " )\n",
- " np.testing.assert_array_almost_equal(\n",
- " out['DIST'].to_numpy(), \n",
- " np.array([0.12, 0.12, 0.12, 0.17, 0.17], dtype=np.float32)\n",
- " )\n",
- " np.testing.assert_array_almost_equal(\n",
- " out['DISTWALK'].to_numpy(), \n",
- " np.array([0.0111, 0.184, 0.12, 0.17, 0.17], dtype=np.float32)\n",
- " )\n",
+ "import secrets\n",
"\n",
- " out = readback.at(\n",
- " omaz=trips.orig_maz,\n",
- " dmaz=trips.dest_maz,\n",
- " _names=['DIST', 'DISTWALK'], _load=True,\n",
- " )\n",
- " np.testing.assert_array_almost_equal(\n",
- " out['DIST'].to_numpy(), \n",
- " np.array([0.12, 0.12, 0.12, 0.17, 0.17], dtype=np.float32)\n",
- " )\n",
- " np.testing.assert_array_almost_equal(\n",
- " out['DISTWALK'].to_numpy(), \n",
- " np.array([0.0111, 0.184, 0.12, 0.17, 0.17], dtype=np.float32)\n",
- " )\n",
- " \n",
- " assert readback.redirection.blenders == {'DISTWALK': {'max_blend_distance': 1.0, 'blend_distance_name': None}}\n"
+ "token = \"skims-with-sparse\" + secrets.token_hex(5)\n",
+ "readback0 = skims.shm.to_shared_memory(token)\n",
+ "assert readback0.attrs == skims.attrs\n",
+ "readback = sh.Dataset.shm.from_shared_memory(token)\n",
+ "assert readback.attrs == skims.attrs\n",
+ "\n",
+ "out = readback.iat(\n",
+ " omaz=[0, 0, 0, 100, 100],\n",
+ " dmaz=[0, 1, 3, 101, 102],\n",
+ " _names=[\"DIST\", \"DISTWALK\"],\n",
+ " _load=True,\n",
+ ")\n",
+ "np.testing.assert_array_almost_equal(\n",
+ " out[\"DIST\"].to_numpy(),\n",
+ " np.array([0.12, 0.12, 0.12, 0.17, 0.17], dtype=np.float32),\n",
+ ")\n",
+ "np.testing.assert_array_almost_equal(\n",
+ " out[\"DISTWALK\"].to_numpy(),\n",
+ " np.array([0.0111, 0.184, 0.12, 0.17, 0.17], dtype=np.float32),\n",
+ ")\n",
+ "\n",
+ "out = readback.at(\n",
+ " omaz=trips.orig_maz,\n",
+ " dmaz=trips.dest_maz,\n",
+ " _names=[\"DIST\", \"DISTWALK\"],\n",
+ " _load=True,\n",
+ ")\n",
+ "np.testing.assert_array_almost_equal(\n",
+ " out[\"DIST\"].to_numpy(),\n",
+ " np.array([0.12, 0.12, 0.12, 0.17, 0.17], dtype=np.float32),\n",
+ ")\n",
+ "np.testing.assert_array_almost_equal(\n",
+ " out[\"DISTWALK\"].to_numpy(),\n",
+ " np.array([0.0111, 0.184, 0.12, 0.17, 0.17], dtype=np.float32),\n",
+ ")\n",
+ "\n",
+ "assert readback.redirection.blenders == {\n",
+ " \"DISTWALK\": {\"max_blend_distance\": 1.0, \"blend_distance_name\": None}\n",
+ "}"
]
},
{
@@ -530,7 +553,9 @@
"outputs": [],
"source": [
"# TEST\n",
- "assert skims.redirection.blenders == {'DISTWALK': {'max_blend_distance': 1.0, 'blend_distance_name': None}}"
+ "assert skims.redirection.blenders == {\n",
+ " \"DISTWALK\": {\"max_blend_distance\": 1.0, \"blend_distance_name\": None}\n",
+ "}"
]
},
{
@@ -546,24 +571,28 @@
"source": [
"# TEST\n",
"# reverse skims in sparse\n",
- "flow3 = tree.setup_flow({\n",
- " 'plain_distance': 'DISTWALK',\n",
- " 'reverse_distance': 'skims.reverse(\"DISTWALK\")',\n",
- "})\n",
+ "flow3 = tree.setup_flow(\n",
+ " {\n",
+ " \"plain_distance\": \"DISTWALK\",\n",
+ " \"reverse_distance\": 'skims.reverse(\"DISTWALK\")',\n",
+ " }\n",
+ ")\n",
"\n",
- "assert flow3.load() == approx(np.array([[ 0.0111, 0.0111],\n",
- " [ 0.184 , 0.12 ],\n",
- " [ 0.12 , 0.12 ],\n",
- " [ 0.17 , 0.17 ],\n",
- " [ 0.17 , 0.17 ]], dtype=np.float32))\n",
+ "assert flow3.load() == approx(\n",
+ " np.array(\n",
+ " [[0.0111, 0.0111], [0.184, 0.12], [0.12, 0.12], [0.17, 0.17], [0.17, 0.17]],\n",
+ " dtype=np.float32,\n",
+ " )\n",
+ ")\n",
"\n",
"z = skims.iat(\n",
- " omaz=[ 0, 1, 3, 101, 102],\n",
- " dmaz=[ 0, 0, 0, 100, 100],\n",
- " _names=['DIST', 'DISTWALK'], _load=True,\n",
+ " omaz=[0, 1, 3, 101, 102],\n",
+ " dmaz=[0, 0, 0, 100, 100],\n",
+ " _names=[\"DIST\", \"DISTWALK\"],\n",
+ " _load=True,\n",
")\n",
- "assert z['DISTWALK'].data == approx(np.array([ 0.0111, 0.12 , 0.12 , 0.17 , 0.17 ]))\n",
- "assert z['DIST'].data == approx(np.array([ 0.12, 0.12 , 0.12 , 0.17 , 0.17 ]))"
+ "assert z[\"DISTWALK\"].data == approx(np.array([0.0111, 0.12, 0.12, 0.17, 0.17]))\n",
+ "assert z[\"DIST\"].data == approx(np.array([0.12, 0.12, 0.12, 0.17, 0.17]))"
]
}
],
diff --git a/_sources/walkthrough/two-dim.ipynb b/_sources/walkthrough/two-dim.ipynb
index ac627c5..0eef0f7 100644
--- a/_sources/walkthrough/two-dim.ipynb
+++ b/_sources/walkthrough/two-dim.ipynb
@@ -19,10 +19,10 @@
"outputs": [],
"source": [
"import numpy as np\n",
- "import pandas as pd\n",
"import xarray as xr\n",
"\n",
"import sharrow as sh\n",
+ "\n",
"sh.__version__"
]
},
@@ -39,6 +39,7 @@
"source": [
"# TEST check versions\n",
"import packaging\n",
+ "\n",
"assert packaging.version.parse(xr.__version__) >= packaging.version.parse(\"0.20.2\")"
]
},
@@ -83,7 +84,7 @@
"source": [
"# test households content\n",
"assert len(households) == 5000\n",
- "assert \"income\" in households \n",
+ "assert \"income\" in households\n",
"assert households.index.name == \"HHID\""
]
},
@@ -111,7 +112,7 @@
"source": [
"assert len(persons) == 8212\n",
"assert \"household_id\" in persons\n",
- "assert persons.index.name == 'PERID'"
+ "assert persons.index.name == \"PERID\""
]
},
{
@@ -180,7 +181,7 @@
"metadata": {},
"outputs": [],
"source": [
- "workers = persons.query(\"pemploy in [1,2]\").rename_axis(index='WORKERID')\n",
+ "workers = persons.query(\"pemploy in [1,2]\").rename_axis(index=\"WORKERID\")\n",
"workers"
]
},
@@ -215,8 +216,8 @@
"metadata": {},
"outputs": [],
"source": [
- "skims_am = skims.sel(time_period='AM')\n",
- "skims_pm = skims.sel(time_period='PM')"
+ "skims_am = skims.sel(time_period=\"AM\")\n",
+ "skims_pm = skims.sel(time_period=\"PM\")"
]
},
{
@@ -246,7 +247,7 @@
"outputs": [],
"source": [
"base = sh.dataset.from_named_objects(\n",
- " workers.index, \n",
+ " workers.index,\n",
" landuse.index,\n",
")"
]
@@ -279,7 +280,7 @@
"metadata": {},
"outputs": [],
"source": [
- "tree = sh.DataTree(base=base, dim_order=('WORKERID', 'TAZ'))"
+ "tree = sh.DataTree(base=base, dim_order=(\"WORKERID\", \"TAZ\"))"
]
},
{
@@ -294,7 +295,7 @@
"outputs": [],
"source": [
"# TEST tree_dest attributes\n",
- "assert tree.dim_order == ('WORKERID', 'TAZ')\n",
+ "assert tree.dim_order == (\"WORKERID\", \"TAZ\")\n",
"assert tree.shape == (4361, 25)"
]
},
@@ -317,7 +318,7 @@
"metadata": {},
"outputs": [],
"source": [
- "tree.add_dataset('person', persons, \"base.WORKERID @ person.PERID\")"
+ "tree.add_dataset(\"person\", persons, \"base.WORKERID @ person.PERID\")"
]
},
{
@@ -337,8 +338,8 @@
"metadata": {},
"outputs": [],
"source": [
- "tree.add_dataset('landuse', landuse, \"base.TAZ @ landuse.TAZ\")\n",
- "tree.add_dataset('hh', households, \"person.household_id @ hh.HHID\")"
+ "tree.add_dataset(\"landuse\", landuse, \"base.TAZ @ landuse.TAZ\")\n",
+ "tree.add_dataset(\"hh\", households, \"person.household_id @ hh.HHID\")"
]
},
{
@@ -360,17 +361,17 @@
"outputs": [],
"source": [
"tree.add_dataset(\n",
- " 'odskims', \n",
- " skims_am, \n",
+ " \"odskims\",\n",
+ " skims_am,\n",
" relationships=(\n",
- " \"hh.TAZ @ odskims.otaz\", \n",
+ " \"hh.TAZ @ odskims.otaz\",\n",
" \"base.TAZ @ odskims.dtaz\",\n",
" ),\n",
")\n",
"\n",
"tree.add_dataset(\n",
- " 'doskims', \n",
- " skims_pm, \n",
+ " \"doskims\",\n",
+ " skims_pm,\n",
" relationships=(\n",
" \"base.TAZ @ doskims.otaz\",\n",
" \"hh.TAZ @ doskims.dtaz\",\n",
@@ -399,10 +400,10 @@
"outputs": [],
"source": [
"definition = {\n",
- " 'round_trip_dist': 'odskims.DIST + doskims.DIST',\n",
- " 'round_trip_dist_first_mile': 'clip(odskims.DIST, 0, 1) + clip(doskims.DIST, 0, 1)',\n",
- " 'round_trip_dist_addl_miles': 'clip(odskims.DIST-1, 0, None) + clip(doskims.DIST-1, 0, None)',\n",
- " 'size_term': 'log(TOTPOP + 0.5*EMPRES)',\n",
+ " \"round_trip_dist\": \"odskims.DIST + doskims.DIST\",\n",
+ " \"round_trip_dist_first_mile\": \"clip(odskims.DIST, 0, 1) + clip(doskims.DIST, 0, 1)\",\n",
+ " \"round_trip_dist_addl_miles\": \"clip(odskims.DIST-1, 0, None) + clip(doskims.DIST-1, 0, None)\",\n",
+ " \"size_term\": \"log(TOTPOP + 0.5*EMPRES)\",\n",
"}\n",
"\n",
"flow = tree.setup_flow(definition)"
@@ -440,37 +441,46 @@
"source": [
"# TEST\n",
"assert arr.shape == (4361, 25, 4)\n",
- "expected = np.array([\n",
- " [[ 0.61 , 0.61 , 0. , 4.610157],\n",
- " [ 0.28 , 0.28 , 0. , 5.681878],\n",
- " [ 0.56 , 0.56 , 0. , 6.368187],\n",
- " [ 0.53 , 0.53 , 0. , 5.741399],\n",
- " [ 1.23 , 1.23 , 0. , 7.17549 ]],\n",
- "\n",
- " [[ 1.19 , 1.19 , 0. , 4.610157],\n",
- " [ 1.49 , 1.49 , 0. , 5.681878],\n",
- " [ 1.88 , 1.85 , 0.03 , 6.368187],\n",
- " [ 1.36 , 1.36 , 0. , 5.741399],\n",
- " [ 1.93 , 1.93 , 0. , 7.17549 ]],\n",
- "\n",
- " [[ 1.19 , 1.19 , 0. , 4.610157],\n",
- " [ 1.49 , 1.49 , 0. , 5.681878],\n",
- " [ 1.88 , 1.85 , 0.03 , 6.368187],\n",
- " [ 1.36 , 1.36 , 0. , 5.741399],\n",
- " [ 1.93 , 1.93 , 0. , 7.17549 ]],\n",
- "\n",
- " [[ 0.24 , 0.24 , 0. , 4.610157],\n",
- " [ 0.61 , 0.61 , 0. , 5.681878],\n",
- " [ 1.01 , 1.01 , 0. , 6.368187],\n",
- " [ 0.75 , 0.75 , 0. , 5.741399],\n",
- " [ 1.38 , 1.38 , 0. , 7.17549 ]],\n",
- "\n",
- " [[ 0.61 , 0.61 , 0. , 4.610157],\n",
- " [ 0.28 , 0.28 , 0. , 5.681878],\n",
- " [ 0.56 , 0.56 , 0. , 6.368187],\n",
- " [ 0.53 , 0.53 , 0. , 5.741399],\n",
- " [ 1.23 , 1.23 , 0. , 7.17549 ]],\n",
- "], dtype=np.float32)\n",
+ "expected = np.array(\n",
+ " [\n",
+ " [\n",
+ " [0.61, 0.61, 0.0, 4.610157],\n",
+ " [0.28, 0.28, 0.0, 5.681878],\n",
+ " [0.56, 0.56, 0.0, 6.368187],\n",
+ " [0.53, 0.53, 0.0, 5.741399],\n",
+ " [1.23, 1.23, 0.0, 7.17549],\n",
+ " ],\n",
+ " [\n",
+ " [1.19, 1.19, 0.0, 4.610157],\n",
+ " [1.49, 1.49, 0.0, 5.681878],\n",
+ " [1.88, 1.85, 0.03, 6.368187],\n",
+ " [1.36, 1.36, 0.0, 5.741399],\n",
+ " [1.93, 1.93, 0.0, 7.17549],\n",
+ " ],\n",
+ " [\n",
+ " [1.19, 1.19, 0.0, 4.610157],\n",
+ " [1.49, 1.49, 0.0, 5.681878],\n",
+ " [1.88, 1.85, 0.03, 6.368187],\n",
+ " [1.36, 1.36, 0.0, 5.741399],\n",
+ " [1.93, 1.93, 0.0, 7.17549],\n",
+ " ],\n",
+ " [\n",
+ " [0.24, 0.24, 0.0, 4.610157],\n",
+ " [0.61, 0.61, 0.0, 5.681878],\n",
+ " [1.01, 1.01, 0.0, 6.368187],\n",
+ " [0.75, 0.75, 0.0, 5.741399],\n",
+ " [1.38, 1.38, 0.0, 7.17549],\n",
+ " ],\n",
+ " [\n",
+ " [0.61, 0.61, 0.0, 4.610157],\n",
+ " [0.28, 0.28, 0.0, 5.681878],\n",
+ " [0.56, 0.56, 0.0, 6.368187],\n",
+ " [0.53, 0.53, 0.0, 5.741399],\n",
+ " [1.23, 1.23, 0.0, 7.17549],\n",
+ " ],\n",
+ " ],\n",
+ " dtype=np.float32,\n",
+ ")\n",
"\n",
"np.testing.assert_array_almost_equal(arr[:5, :5, :], expected)"
]
@@ -529,10 +539,20 @@
"source": [
"# TEST\n",
"assert isinstance(arr_pretty, xr.DataArray)\n",
- "assert arr_pretty.dims == ('WORKERID', 'TAZ', 'expressions')\n",
+ "assert arr_pretty.dims == (\"WORKERID\", \"TAZ\", \"expressions\")\n",
"assert arr_pretty.shape == (4361, 25, 4)\n",
- "assert all(arr_pretty.expressions == np.array(['round_trip_dist', 'round_trip_dist_first_mile',\n",
- " 'round_trip_dist_addl_miles', 'size_term'], dtype='= 0 &&
+ !jQuery(node.parentNode).hasClass(className) &&
+ !jQuery(node.parentNode).hasClass("nohighlight")) {
+ var span;
+ var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
+ if (isInSVG) {
+ span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
+ } else {
+ span = document.createElement("span");
+ span.className = className;
+ }
+ span.appendChild(document.createTextNode(val.substr(pos, text.length)));
+ node.parentNode.insertBefore(span, node.parentNode.insertBefore(
+ document.createTextNode(val.substr(pos + text.length)),
+ node.nextSibling));
+ node.nodeValue = val.substr(0, pos);
+ if (isInSVG) {
+ var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
+ var bbox = node.parentElement.getBBox();
+ rect.x.baseVal.value = bbox.x;
+ rect.y.baseVal.value = bbox.y;
+ rect.width.baseVal.value = bbox.width;
+ rect.height.baseVal.value = bbox.height;
+ rect.setAttribute('class', className);
+ addItems.push({
+ "parent": node.parentNode,
+ "target": rect});
+ }
+ }
+ }
+ else if (!jQuery(node).is("button, select, textarea")) {
+ jQuery.each(node.childNodes, function() {
+ highlight(this, addItems);
+ });
+ }
+ }
+ var addItems = [];
+ var result = this.each(function() {
+ highlight(this, addItems);
+ });
+ for (var i = 0; i < addItems.length; ++i) {
+ jQuery(addItems[i].parent).before(addItems[i].target);
+ }
+ return result;
+};
+
+/*
+ * backward compatibility for jQuery.browser
+ * This will be supported until firefox bug is fixed.
+ */
+if (!jQuery.browser) {
+ jQuery.uaMatch = function(ua) {
+ ua = ua.toLowerCase();
+
+ var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
+ /(webkit)[ \/]([\w.]+)/.exec(ua) ||
+ /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
+ /(msie) ([\w.]+)/.exec(ua) ||
+ ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
+ [];
+
+ return {
+ browser: match[ 1 ] || "",
+ version: match[ 2 ] || "0"
+ };
+ };
+ jQuery.browser = {};
+ jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
+}
diff --git a/_static/basic.css b/_static/basic.css
index d54be80..9e364ed 100644
--- a/_static/basic.css
+++ b/_static/basic.css
@@ -222,7 +222,7 @@ table.modindextable td {
/* -- general body styles --------------------------------------------------- */
div.body {
- min-width: 450px;
+ min-width: 360px;
max-width: 800px;
}
@@ -237,16 +237,6 @@ a.headerlink {
visibility: hidden;
}
-a.brackets:before,
-span.brackets > a:before{
- content: "[";
-}
-
-a.brackets:after,
-span.brackets > a:after {
- content: "]";
-}
-
h1:hover > a.headerlink,
h2:hover > a.headerlink,
h3:hover > a.headerlink,
@@ -334,12 +324,16 @@ aside.sidebar {
p.sidebar-title {
font-weight: bold;
}
+nav.contents,
+aside.topic,
div.admonition, div.topic, blockquote {
clear: left;
}
/* -- topics ---------------------------------------------------------------- */
+nav.contents,
+aside.topic,
div.topic {
border: 1px solid #ccc;
@@ -379,6 +373,9 @@ div.body p.centered {
div.sidebar > :last-child,
aside.sidebar > :last-child,
+nav.contents > :last-child,
+aside.topic > :last-child,
+
div.topic > :last-child,
div.admonition > :last-child {
margin-bottom: 0;
@@ -386,6 +383,9 @@ div.admonition > :last-child {
div.sidebar::after,
aside.sidebar::after,
+nav.contents::after,
+aside.topic::after,
+
div.topic::after,
div.admonition::after,
blockquote::after {
@@ -428,10 +428,6 @@ table.docutils td, table.docutils th {
border-bottom: 1px solid #aaa;
}
-table.footnote td, table.footnote th {
- border: 0 !important;
-}
-
th {
text-align: left;
padding-right: 5px;
@@ -615,6 +611,7 @@ ul.simple p {
margin-bottom: 0;
}
+/* Docutils 0.17 and older (footnotes & citations) */
dl.footnote > dt,
dl.citation > dt {
float: left;
@@ -632,6 +629,33 @@ dl.citation > dd:after {
clear: both;
}
+/* Docutils 0.18+ (footnotes & citations) */
+aside.footnote > span,
+div.citation > span {
+ float: left;
+}
+aside.footnote > span:last-of-type,
+div.citation > span:last-of-type {
+ padding-right: 0.5em;
+}
+aside.footnote > p {
+ margin-left: 2em;
+}
+div.citation > p {
+ margin-left: 4em;
+}
+aside.footnote > p:last-of-type,
+div.citation > p:last-of-type {
+ margin-bottom: 0em;
+}
+aside.footnote > p:last-of-type:after,
+div.citation > p:last-of-type:after {
+ content: "";
+ clear: both;
+}
+
+/* Footnotes & citations ends */
+
dl.field-list {
display: grid;
grid-template-columns: fit-content(30%) auto;
diff --git a/_static/doctools.js b/_static/doctools.js
index e1bfd70..c3db08d 100644
--- a/_static/doctools.js
+++ b/_static/doctools.js
@@ -2,357 +2,263 @@
* doctools.js
* ~~~~~~~~~~~
*
- * Sphinx JavaScript utilities for all documentation.
+ * Base JavaScript utilities for all Sphinx HTML documentation.
*
* :copyright: Copyright 2007-2022 by the Sphinx team, see AUTHORS.
* :license: BSD, see LICENSE for details.
*
*/
+"use strict";
-/**
- * select a different prefix for underscore
- */
-$u = _.noConflict();
-
-/**
- * make the code below compatible with browsers without
- * an installed firebug like debugger
-if (!window.console || !console.firebug) {
- var names = ["log", "debug", "info", "warn", "error", "assert", "dir",
- "dirxml", "group", "groupEnd", "time", "timeEnd", "count", "trace",
- "profile", "profileEnd"];
- window.console = {};
- for (var i = 0; i < names.length; ++i)
- window.console[names[i]] = function() {};
-}
- */
-
-/**
- * small helper function to urldecode strings
- *
- * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/decodeURIComponent#Decoding_query_parameters_from_a_URL
- */
-jQuery.urldecode = function(x) {
- if (!x) {
- return x
+const _ready = (callback) => {
+ if (document.readyState !== "loading") {
+ callback();
+ } else {
+ document.addEventListener("DOMContentLoaded", callback);
}
- return decodeURIComponent(x.replace(/\+/g, ' '));
};
/**
- * small helper function to urlencode strings
+ * highlight a given string on a node by wrapping it in
+ * span elements with the given class name.
*/
-jQuery.urlencode = encodeURIComponent;
+const _highlight = (node, addItems, text, className) => {
+ if (node.nodeType === Node.TEXT_NODE) {
+ const val = node.nodeValue;
+ const parent = node.parentNode;
+ const pos = val.toLowerCase().indexOf(text);
+ if (
+ pos >= 0 &&
+ !parent.classList.contains(className) &&
+ !parent.classList.contains("nohighlight")
+ ) {
+ let span;
-/**
- * This function returns the parsed url parameters of the
- * current request. Multiple values per key are supported,
- * it will always return arrays of strings for the value parts.
- */
-jQuery.getQueryParameters = function(s) {
- if (typeof s === 'undefined')
- s = document.location.search;
- var parts = s.substr(s.indexOf('?') + 1).split('&');
- var result = {};
- for (var i = 0; i < parts.length; i++) {
- var tmp = parts[i].split('=', 2);
- var key = jQuery.urldecode(tmp[0]);
- var value = jQuery.urldecode(tmp[1]);
- if (key in result)
- result[key].push(value);
- else
- result[key] = [value];
- }
- return result;
-};
+ const closestNode = parent.closest("body, svg, foreignObject");
+ const isInSVG = closestNode && closestNode.matches("svg");
+ if (isInSVG) {
+ span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
+ } else {
+ span = document.createElement("span");
+ span.classList.add(className);
+ }
-/**
- * highlight a given string on a jquery object by wrapping it in
- * span elements with the given class name.
- */
-jQuery.fn.highlightText = function(text, className) {
- function highlight(node, addItems) {
- if (node.nodeType === 3) {
- var val = node.nodeValue;
- var pos = val.toLowerCase().indexOf(text);
- if (pos >= 0 &&
- !jQuery(node.parentNode).hasClass(className) &&
- !jQuery(node.parentNode).hasClass("nohighlight")) {
- var span;
- var isInSVG = jQuery(node).closest("body, svg, foreignObject").is("svg");
- if (isInSVG) {
- span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
- } else {
- span = document.createElement("span");
- span.className = className;
- }
- span.appendChild(document.createTextNode(val.substr(pos, text.length)));
- node.parentNode.insertBefore(span, node.parentNode.insertBefore(
+ span.appendChild(document.createTextNode(val.substr(pos, text.length)));
+ parent.insertBefore(
+ span,
+ parent.insertBefore(
document.createTextNode(val.substr(pos + text.length)),
- node.nextSibling));
- node.nodeValue = val.substr(0, pos);
- if (isInSVG) {
- var rect = document.createElementNS("http://www.w3.org/2000/svg", "rect");
- var bbox = node.parentElement.getBBox();
- rect.x.baseVal.value = bbox.x;
- rect.y.baseVal.value = bbox.y;
- rect.width.baseVal.value = bbox.width;
- rect.height.baseVal.value = bbox.height;
- rect.setAttribute('class', className);
- addItems.push({
- "parent": node.parentNode,
- "target": rect});
- }
+ node.nextSibling
+ )
+ );
+ node.nodeValue = val.substr(0, pos);
+
+ if (isInSVG) {
+ const rect = document.createElementNS(
+ "http://www.w3.org/2000/svg",
+ "rect"
+ );
+ const bbox = parent.getBBox();
+ rect.x.baseVal.value = bbox.x;
+ rect.y.baseVal.value = bbox.y;
+ rect.width.baseVal.value = bbox.width;
+ rect.height.baseVal.value = bbox.height;
+ rect.setAttribute("class", className);
+ addItems.push({ parent: parent, target: rect });
}
}
- else if (!jQuery(node).is("button, select, textarea")) {
- jQuery.each(node.childNodes, function() {
- highlight(this, addItems);
- });
- }
+ } else if (node.matches && !node.matches("button, select, textarea")) {
+ node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
}
- var addItems = [];
- var result = this.each(function() {
- highlight(this, addItems);
- });
- for (var i = 0; i < addItems.length; ++i) {
- jQuery(addItems[i].parent).before(addItems[i].target);
- }
- return result;
};
-
-/*
- * backward compatibility for jQuery.browser
- * This will be supported until firefox bug is fixed.
- */
-if (!jQuery.browser) {
- jQuery.uaMatch = function(ua) {
- ua = ua.toLowerCase();
-
- var match = /(chrome)[ \/]([\w.]+)/.exec(ua) ||
- /(webkit)[ \/]([\w.]+)/.exec(ua) ||
- /(opera)(?:.*version|)[ \/]([\w.]+)/.exec(ua) ||
- /(msie) ([\w.]+)/.exec(ua) ||
- ua.indexOf("compatible") < 0 && /(mozilla)(?:.*? rv:([\w.]+)|)/.exec(ua) ||
- [];
-
- return {
- browser: match[ 1 ] || "",
- version: match[ 2 ] || "0"
- };
- };
- jQuery.browser = {};
- jQuery.browser[jQuery.uaMatch(navigator.userAgent).browser] = true;
-}
+const _highlightText = (thisNode, text, className) => {
+ let addItems = [];
+ _highlight(thisNode, addItems, text, className);
+ addItems.forEach((obj) =>
+ obj.parent.insertAdjacentElement("beforebegin", obj.target)
+ );
+};
/**
* Small JavaScript module for the documentation.
*/
-var Documentation = {
-
- init : function() {
- this.fixFirefoxAnchorBug();
- this.highlightSearchWords();
- this.initIndexTable();
- this.initOnKeyListeners();
+const Documentation = {
+ init: () => {
+ Documentation.highlightSearchWords();
+ Documentation.initDomainIndexTable();
+ Documentation.initOnKeyListeners();
},
/**
* i18n support
*/
- TRANSLATIONS : {},
- PLURAL_EXPR : function(n) { return n === 1 ? 0 : 1; },
- LOCALE : 'unknown',
+ TRANSLATIONS: {},
+ PLURAL_EXPR: (n) => (n === 1 ? 0 : 1),
+ LOCALE: "unknown",
// gettext and ngettext don't access this so that the functions
// can safely bound to a different name (_ = Documentation.gettext)
- gettext : function(string) {
- var translated = Documentation.TRANSLATIONS[string];
- if (typeof translated === 'undefined')
- return string;
- return (typeof translated === 'string') ? translated : translated[0];
- },
-
- ngettext : function(singular, plural, n) {
- var translated = Documentation.TRANSLATIONS[singular];
- if (typeof translated === 'undefined')
- return (n == 1) ? singular : plural;
- return translated[Documentation.PLURALEXPR(n)];
- },
-
- addTranslations : function(catalog) {
- for (var key in catalog.messages)
- this.TRANSLATIONS[key] = catalog.messages[key];
- this.PLURAL_EXPR = new Function('n', 'return +(' + catalog.plural_expr + ')');
- this.LOCALE = catalog.locale;
+ gettext: (string) => {
+ const translated = Documentation.TRANSLATIONS[string];
+ switch (typeof translated) {
+ case "undefined":
+ return string; // no translation
+ case "string":
+ return translated; // translation exists
+ default:
+ return translated[0]; // (singular, plural) translation tuple exists
+ }
},
- /**
- * add context elements like header anchor links
- */
- addContextElements : function() {
- $('div[id] > :header:first').each(function() {
- $('\u00B6').
- attr('href', '#' + this.id).
- attr('title', _('Permalink to this headline')).
- appendTo(this);
- });
- $('dt[id]').each(function() {
- $('\u00B6').
- attr('href', '#' + this.id).
- attr('title', _('Permalink to this definition')).
- appendTo(this);
- });
+ ngettext: (singular, plural, n) => {
+ const translated = Documentation.TRANSLATIONS[singular];
+ if (typeof translated !== "undefined")
+ return translated[Documentation.PLURAL_EXPR(n)];
+ return n === 1 ? singular : plural;
},
- /**
- * workaround a firefox stupidity
- * see: https://bugzilla.mozilla.org/show_bug.cgi?id=645075
- */
- fixFirefoxAnchorBug : function() {
- if (document.location.hash && $.browser.mozilla)
- window.setTimeout(function() {
- document.location.href += '';
- }, 10);
+ addTranslations: (catalog) => {
+ Object.assign(Documentation.TRANSLATIONS, catalog.messages);
+ Documentation.PLURAL_EXPR = new Function(
+ "n",
+ `return (${catalog.plural_expr})`
+ );
+ Documentation.LOCALE = catalog.locale;
},
/**
* highlight the search words provided in the url in the text
*/
- highlightSearchWords : function() {
- var params = $.getQueryParameters();
- var terms = (params.highlight) ? params.highlight[0].split(/\s+/) : [];
- if (terms.length) {
- var body = $('div.body');
- if (!body.length) {
- body = $('body');
- }
- window.setTimeout(function() {
- $.each(terms, function() {
- body.highlightText(this.toLowerCase(), 'highlighted');
- });
- }, 10);
- $('' + _('Hide Search Matches') + '
')
- .appendTo($('#searchbox'));
- }
- },
+ highlightSearchWords: () => {
+ const highlight =
+ new URLSearchParams(window.location.search).get("highlight") || "";
+ const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
+ if (terms.length === 0) return; // nothing to do
- /**
- * init the domain index toggle buttons
- */
- initIndexTable : function() {
- var togglers = $('img.toggler').click(function() {
- var src = $(this).attr('src');
- var idnum = $(this).attr('id').substr(7);
- $('tr.cg-' + idnum).toggle();
- if (src.substr(-9) === 'minus.png')
- $(this).attr('src', src.substr(0, src.length-9) + 'plus.png');
- else
- $(this).attr('src', src.substr(0, src.length-8) + 'minus.png');
- }).css('display', '');
- if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) {
- togglers.click();
- }
+ // There should never be more than one element matching "div.body"
+ const divBody = document.querySelectorAll("div.body");
+ const body = divBody.length ? divBody[0] : document.querySelector("body");
+ window.setTimeout(() => {
+ terms.forEach((term) => _highlightText(body, term, "highlighted"));
+ }, 10);
+
+ const searchBox = document.getElementById("searchbox");
+ if (searchBox === null) return;
+ searchBox.appendChild(
+ document
+ .createRange()
+ .createContextualFragment(
+ '' +
+ '' +
+ Documentation.gettext("Hide Search Matches") +
+ "
"
+ )
+ );
},
/**
* helper function to hide the search marks again
*/
- hideSearchWords : function() {
- $('#searchbox .highlight-link').fadeOut(300);
- $('span.highlighted').removeClass('highlighted');
- var url = new URL(window.location);
- url.searchParams.delete('highlight');
- window.history.replaceState({}, '', url);
+ hideSearchWords: () => {
+ document
+ .querySelectorAll("#searchbox .highlight-link")
+ .forEach((el) => el.remove());
+ document
+ .querySelectorAll("span.highlighted")
+ .forEach((el) => el.classList.remove("highlighted"));
+ const url = new URL(window.location);
+ url.searchParams.delete("highlight");
+ window.history.replaceState({}, "", url);
},
- /**
+ /**
* helper function to focus on search bar
*/
- focusSearchBar : function() {
- $('input[name=q]').first().focus();
+ focusSearchBar: () => {
+ document.querySelectorAll("input[name=q]")[0]?.focus();
},
/**
- * make the url absolute
+ * Initialise the domain index toggle buttons
*/
- makeURL : function(relativeURL) {
- return DOCUMENTATION_OPTIONS.URL_ROOT + '/' + relativeURL;
- },
+ initDomainIndexTable: () => {
+ const toggler = (el) => {
+ const idNumber = el.id.substr(7);
+ const toggledRows = document.querySelectorAll(`tr.cg-${idNumber}`);
+ if (el.src.substr(-9) === "minus.png") {
+ el.src = `${el.src.substr(0, el.src.length - 9)}plus.png`;
+ toggledRows.forEach((el) => (el.style.display = "none"));
+ } else {
+ el.src = `${el.src.substr(0, el.src.length - 8)}minus.png`;
+ toggledRows.forEach((el) => (el.style.display = ""));
+ }
+ };
- /**
- * get the current relative url
- */
- getCurrentURL : function() {
- var path = document.location.pathname;
- var parts = path.split(/\//);
- $.each(DOCUMENTATION_OPTIONS.URL_ROOT.split(/\//), function() {
- if (this === '..')
- parts.pop();
- });
- var url = parts.join('/');
- return path.substring(url.lastIndexOf('/') + 1, path.length - 1);
+ const togglerElements = document.querySelectorAll("img.toggler");
+ togglerElements.forEach((el) =>
+ el.addEventListener("click", (event) => toggler(event.currentTarget))
+ );
+ togglerElements.forEach((el) => (el.style.display = ""));
+ if (DOCUMENTATION_OPTIONS.COLLAPSE_INDEX) togglerElements.forEach(toggler);
},
- initOnKeyListeners: function() {
+ initOnKeyListeners: () => {
// only install a listener if it is really needed
- if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
- !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS)
- return;
+ if (
+ !DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS &&
+ !DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS
+ )
+ return;
- $(document).keydown(function(event) {
- var activeElementType = document.activeElement.tagName;
- // don't navigate when in search box, textarea, dropdown or button
- if (activeElementType !== 'TEXTAREA' && activeElementType !== 'INPUT' && activeElementType !== 'SELECT'
- && activeElementType !== 'BUTTON') {
- if (event.altKey || event.ctrlKey || event.metaKey)
- return;
+ const blacklistedElements = new Set([
+ "TEXTAREA",
+ "INPUT",
+ "SELECT",
+ "BUTTON",
+ ]);
+ document.addEventListener("keydown", (event) => {
+ if (blacklistedElements.has(document.activeElement.tagName)) return; // bail for input elements
+ if (event.altKey || event.ctrlKey || event.metaKey) return; // bail with special keys
- if (!event.shiftKey) {
- switch (event.key) {
- case 'ArrowLeft':
- if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS)
- break;
- var prevHref = $('link[rel="prev"]').prop('href');
- if (prevHref) {
- window.location.href = prevHref;
- return false;
- }
- break;
- case 'ArrowRight':
- if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS)
- break;
- var nextHref = $('link[rel="next"]').prop('href');
- if (nextHref) {
- window.location.href = nextHref;
- return false;
- }
- break;
- case 'Escape':
- if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS)
- break;
- Documentation.hideSearchWords();
- return false;
- }
- }
-
- // some keyboard layouts may need Shift to get /
+ if (!event.shiftKey) {
switch (event.key) {
- case '/':
- if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS)
- break;
- Documentation.focusSearchBar();
- return false;
+ case "ArrowLeft":
+ if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
+
+ const prevLink = document.querySelector('link[rel="prev"]');
+ if (prevLink && prevLink.href) {
+ window.location.href = prevLink.href;
+ event.preventDefault();
+ }
+ break;
+ case "ArrowRight":
+ if (!DOCUMENTATION_OPTIONS.NAVIGATION_WITH_KEYS) break;
+
+ const nextLink = document.querySelector('link[rel="next"]');
+ if (nextLink && nextLink.href) {
+ window.location.href = nextLink.href;
+ event.preventDefault();
+ }
+ break;
+ case "Escape":
+ if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
+ Documentation.hideSearchWords();
+ event.preventDefault();
}
}
+
+ // some keyboard layouts may need Shift to get /
+ switch (event.key) {
+ case "/":
+ if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) break;
+ Documentation.focusSearchBar();
+ event.preventDefault();
+ }
});
- }
+ },
};
// quick alias for translations
-_ = Documentation.gettext;
+const _ = Documentation.gettext;
-$(document).ready(function() {
- Documentation.init();
-});
+_ready(Documentation.init);
diff --git a/_static/documentation_options.js b/_static/documentation_options.js
index 877e3c3..162a6ba 100644
--- a/_static/documentation_options.js
+++ b/_static/documentation_options.js
@@ -1,14 +1,14 @@
var DOCUMENTATION_OPTIONS = {
URL_ROOT: document.getElementById("documentation_options").getAttribute('data-url_root'),
VERSION: '',
- LANGUAGE: 'None',
+ LANGUAGE: 'en',
COLLAPSE_INDEX: false,
BUILDER: 'html',
FILE_SUFFIX: '.html',
LINK_SUFFIX: '.html',
HAS_SOURCE: true,
SOURCELINK_SUFFIX: '',
- NAVIGATION_WITH_KEYS: true,
+ NAVIGATION_WITH_KEYS: false,
SHOW_SEARCH_SUMMARY: true,
- ENABLE_SEARCH_SHORTCUTS: true,
+ ENABLE_SEARCH_SHORTCUTS: false,
};
\ No newline at end of file
diff --git a/_static/jquery-3.5.1.js b/_static/jquery-3.6.0.js
similarity index 98%
rename from _static/jquery-3.5.1.js
rename to _static/jquery-3.6.0.js
index 5093733..fc6c299 100644
--- a/_static/jquery-3.5.1.js
+++ b/_static/jquery-3.6.0.js
@@ -1,15 +1,15 @@
/*!
- * jQuery JavaScript Library v3.5.1
+ * jQuery JavaScript Library v3.6.0
* https://jquery.com/
*
* Includes Sizzle.js
* https://sizzlejs.com/
*
- * Copyright JS Foundation and other contributors
+ * Copyright OpenJS Foundation and other contributors
* Released under the MIT license
* https://jquery.org/license
*
- * Date: 2020-05-04T22:49Z
+ * Date: 2021-03-02T17:08Z
*/
( function( global, factory ) {
@@ -76,12 +76,16 @@ var support = {};
var isFunction = function isFunction( obj ) {
- // Support: Chrome <=57, Firefox <=52
- // In some browsers, typeof returns "function" for HTML