Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Leaf region will only trigger if the region exists #48

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -130,6 +130,7 @@ def _zero_descendants(id_, cell_type, x_result, deltas, hierarchy_info):
deltas.loc[desc_names, cell_type] = 0.0

L.info("Preparing variables layout ...")
#Create 2 df-s with only np.nans, next we will update this based on assumptions
x_result = pd.DataFrame(
{cell_type: np.full(len(id_counts), KEEP) for cell_type in cell_types},
index=id_counts.index, # indexed by integer identifiers
Expand Down Expand Up @@ -157,6 +158,7 @@ def _zero_descendants(id_, cell_type, x_result, deltas, hierarchy_info):
region_counts.at[region_name, "gad67+_standard_deviation"], 0.0, atol=stddev_tolerance
) and np.isclose(region_counts.at[region_name, "gad67+"], 0.0, atol=stddev_tolerance):
for cell_type in cell_types:
#(Basically, if GAD is 0 then every other inh neuron type should be zero..)
_zero_descendants(id_, cell_type, x_result, deltas, hierarchy_info)

# Set the (possibly non-zero) cell count estimates which are given with certainty.
Expand All @@ -178,6 +180,18 @@ def _zero_descendants(id_, cell_type, x_result, deltas, hierarchy_info):
# are used as definitive estimates.
x_result.at[id_, cell_type] = id_counts.at[id_, cell_type]
deltas.at[region_name, cell_type] = 0.0
# IF the region is not empty of leaf regions
# AND all its leaf regions are np.nan
# AND the region's cell count is 0:
# Revert standard deviation to np.inf i.e. var is omitted
# Since cell count i.e. literature value was changed to 0 we change it to np.nan
if (
not x_result.loc[desc_only, cell_type].empty and
x_result.loc[desc_only, cell_type].isnull().all() and
x_result.loc[id_, cell_type] == 0
):
deltas.at[region_name, cell_type] = SKIP
x_result.at[id_, cell_type] = np.nan

return x_result, deltas

Expand Down Expand Up @@ -481,9 +495,11 @@ def _check_variables_consistency(
AtlasDensitiesError if on the the following assumptions is violated:
- if cell count estimate of a region is known with certainty for a given cell type,
then the cell count of every descendant region is also known with certainty.
- a cell count estimate which is given for certain does not
- a neuron subtype count estimate which is given for certain is greater than
its total neuron count estimate counterpart.
"""
cell_count_tolerance = 1e-2 # absolute tolerance to rule out round-off errors
# pylint: disable=too-many-nested-blocks
for region_name, id_, id_set in zip(
deltas.index, hierarchy_info.index, hierarchy_info["descendant_id_set"]
):
Expand All @@ -492,7 +508,7 @@ def _check_variables_consistency(
for desc_id in id_set:
if np.isnan(x_result.loc[desc_id, cell_type]):
raise AtlasDensitiesError(
f"Cell count estimate of region named '{region_name}' for cell type "
f"Cell count estimate of region '{region_name}' for cell type "
f"{cell_type} was given for certain whereas the cell count of "
f"descendant id {desc_id} is not certain."
)
Expand Down
4 changes: 2 additions & 2 deletions setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@
"click>=7.0,<=8.1.3",
"cgal-pybind>=0.1.1",
"joblib>=1.3.0",
"numpy>=1.15.0",
"numpy>=1.15.0,<=1.24.0", # to reduce errors when pandas uses new numpy
"openpyxl>=3.0.3",
"pandas>=1.0.3",
"pandas>=1.0.3,<=2.0.0", # because https://github.com/pandas-dev/pandas/pull/54954 has broken csv parsing
"PyYAML>=5.3.1",
# Since version 1.6.0, scipy.optimize.linprog has fast, new methods for large, sparse problems
# from the HiGHS library. We use the "highs" method in the densities module.
Expand Down
2 changes: 1 addition & 1 deletion tests/densities/test_excitatory_inhibitory_splitting.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def test_make_excitatory_density():
res = tested.make_excitatory_density(neuron_density, inhibitory_density)

assert res.shape == neuron_density.shape
assert np.sum(res.raw) == np.product(neuron_density.shape)
assert np.sum(res.raw) == np.prod(neuron_density.shape)

# this would create negative densities; make sure they are clipped to zero
res = tested.make_excitatory_density(inhibitory_density, neuron_density)
Expand Down
Loading