diff --git a/.github/workflows/pythonapp.yml b/.github/workflows/pythonapp.yml index 8724685..823f229 100644 --- a/.github/workflows/pythonapp.yml +++ b/.github/workflows/pythonapp.yml @@ -2,9 +2,6 @@ name: Python Application on: push: - branches: - - merger - - main pull_request: branches: - main @@ -13,27 +10,30 @@ jobs: lint: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.x - uses: actions/setup-python@v1 + - uses: actions/checkout@v4 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 with: - python-version: 3.x + python-version: 3.11 - name: Install dependencies run: | python3 -m pip install --upgrade pip virtualenv wheel setuptools - name: Lint with pycodestyle run: | python3 -m pip install flake8 - python3 -m flake8 . --count --ignore=E125,E126,E127,E128,E203,E402,E741,E731,W503,F401,W504,F841 --show-source --statistics --max-line-length=120 --exclude=__pycache__,.tox,.git/,doc/ + python3 -m flake8 . --count --ignore=E125,E126,E127,E128,E203,E226,E402,E741,E731,W503,F401,W504,F841 --show-source --statistics --max-line-length=120 --exclude=__pycache__,.tox,.git/,doc/ linux: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.x - uses: actions/setup-python@v1 + - uses: actions/checkout@v4 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 with: - python-version: 3.x + python-version: 3.11 + - name: Install dependencies + run: | + python3 -m pip install --upgrade pip virtualenv wheel setuptools - name: Make sdist run: python3 setup.py sdist --formats=gztar - name: Install dependencies @@ -65,11 +65,11 @@ jobs: docs: runs-on: ubuntu-latest steps: - - uses: actions/checkout@v2 - - name: Set up Python 3.x - uses: actions/setup-python@v1 + - uses: actions/checkout@v4 + - name: Set up Python 3.11 + uses: actions/setup-python@v5 with: - python-version: 3.x + python-version: 3.11 - name: Install dependencies run: | python3 -m pip install --upgrade pip virtualenv wheel setuptools m2r2 diff --git a/sapicore/__init__.py b/sapicore/__init__.py index 5dfaddd..3cacdf9 100644 --- a/sapicore/__init__.py +++ b/sapicore/__init__.py @@ -2,4 +2,4 @@ ----------- """ -__version__ = "0.3.0" +__version__ = "0.3.3" diff --git a/sapicore/data/__init__.py b/sapicore/data/__init__.py index 29fe2ed..34d67dd 100644 --- a/sapicore/data/__init__.py +++ b/sapicore/data/__init__.py @@ -61,10 +61,10 @@ def __init__(self, name: str = "", labels: list | NDArray | Tensor = None, axis: self.axis = axis self.labels = np.array(labels) - def __getitem__(self, index: slice) -> Tensor: + def __getitem__(self, index: Any) -> Tensor: return self.labels[index] - def __setitem__(self, index: slice, values: Any): + def __setitem__(self, index: Any, values: Any): self.labels[index] = np.array(values) @@ -205,11 +205,11 @@ def __init__( # passes silently if not implemented by the user. self._standardize() - def __getitem__(self, index: slice): + def __getitem__(self, index: Any): """Calls :meth:`access` to slice into the data or access specific file(s), returning the value(s) at `index`.""" return self.access(index) - def __setitem__(self, index: slice, values: Tensor): + def __setitem__(self, index: Any, values: Tensor): """Sets buffer values at the given indices to `values`.""" self.modify(index, values) @@ -289,7 +289,7 @@ def _standardize(self): """ pass - def access(self, index: slice, axis: int = None) -> Tensor: + def access(self, index: Any, axis: int = None) -> Tensor: """Specifies how to access data by mapping indices to actual samples (e.g., from file(s) in `root`). The default implementation slices into `self.buffer` to accommodate the trivial cases where the user has @@ -301,7 +301,7 @@ def access(self, index: slice, axis: int = None) -> Tensor: Parameters ---------- - index: slice + index: Any Index(es) to slice into. axis: int, optional @@ -315,7 +315,7 @@ def access(self, index: slice, axis: int = None) -> Tensor: """ return self.buffer.index_select(axis, torch.as_tensor(index)) if axis is not None else self.buffer[index] - def load(self, indices: slice = None): + def load(self, indices: Any = None): """Populates the `buffer` tensor buffer and/or `descriptors` attribute table by loading one or more files into memory, potentially selecting only `indices`. @@ -323,7 +323,7 @@ def load(self, indices: slice = None): Parameters ---------- - indices: slice + indices: Any Specific indices to include, one for each file. Returns @@ -340,7 +340,7 @@ def load(self, indices: slice = None): """ pass - def modify(self, index: slice, values: Tensor): + def modify(self, index: Any, values: Tensor): """Set or modify data values at the given indices to `values`. The default implementation edits the `buffer` field of this :class:`Data` object. @@ -348,7 +348,7 @@ def modify(self, index: slice, values: Tensor): Parameters ---------- - index: slice + index: Any Indices to modify. values: Tensor @@ -453,13 +453,13 @@ def sample(self, method: Callable, axis: int = 0, **kwargs): # trim buffer and labels, returning a new partial dataset without mutating the original. return self.trim(index=subset, axis=axis) - def trim(self, index: slice, axis: int = None): + def trim(self, index: Any, axis: int = None): """Trims this instance by selecting `indices`, potentially along `axis`, returning a subset of the original dataset in terms of both buffer entries and labels/descriptors. Does not mutate the underlying object. Parameters ---------- - index: slice + index: Any Index(es) to retain. axis: int, optional diff --git a/sapicore/data/sampling/__init__.py b/sapicore/data/sampling/__init__.py index ba91978..895a832 100644 --- a/sapicore/data/sampling/__init__.py +++ b/sapicore/data/sampling/__init__.py @@ -56,20 +56,11 @@ def __call__(self, frame: DataFrame, group_keys: str | list[str], n: int | float frame["index"] = frame.index grouped = frame.groupby(group_keys, group_keys=False) - if self.stratified: - # convert `n` to fraction if need be. - if isinstance(n, int): - frac = len(frame["index"].tolist()) * n + # convert `n` to integer if need be. + if isinstance(n, float): + n = int(n * len(frame["index"].tolist())) - # perform stratified sampling of `frac` out of every group. - subset = grouped.apply(lambda x: x.sample(frac=frac, replace=self.replace)) - - else: - # convert `n` to integer if need be. - if isinstance(n, float): - n = int(n * len(frame["index"].tolist())) - - subset = grouped.apply(lambda x: x.sample(n, replace=self.replace)) + subset = grouped.apply(lambda x: x.sample(n, replace=self.replace)) return subset["index"].tolist() diff --git a/sapicore/engine/component/__init__.py b/sapicore/engine/component/__init__.py index 3db0559..bb46b9e 100644 --- a/sapicore/engine/component/__init__.py +++ b/sapicore/engine/component/__init__.py @@ -58,6 +58,10 @@ def __init__(self, identifier: str = None, configuration: dict = None, device: s self.simulation_step = 0 self.dt = DT + # we don't know what attributes derivative component classes might introduce, but we want them initialized. + for key, value in kwargs.items(): + setattr(self, key, value) + def configure(self, configuration: dict[str, Any] = None, log_destination: str = ""): """Applies a configuration to this object by adding the keys of `configuration` as instance attributes, initializing their values, and updating the `_config_props_` tuple to reflect the new keys. diff --git a/sapicore/engine/network/__init__.py b/sapicore/engine/network/__init__.py index 21891d8..51500aa 100644 --- a/sapicore/engine/network/__init__.py +++ b/sapicore/engine/network/__init__.py @@ -314,19 +314,6 @@ def add_data_hook(self, data_dir: str, steps: int, *args: Component) -> list: return hooks - # To micromanage the forward/backward sweeps, subclass Network and override summation(), forward(), backward(). - @staticmethod - def summation(synaptic_input: list[torch.tensor]) -> torch.tensor: - """Adds up inputs from multiple synapse objects onto the same ensemble, given as rows. - - Note - ---- - If your model requires some preprocessing of inputs to the postsynaptic neuron, it can be implemented - by overriding this method. - - """ - return torch.sum(torch.vstack(synaptic_input), dim=0) - def backward(self) -> None: """Processes a backward sweep for this network object. @@ -377,9 +364,14 @@ def forward(self, data: torch.tensor) -> dict: ensemble_ref = self.graph.nodes[ensemble]["reference"] if ensemble_ref.identifier not in self.roots: - # apply a summation function to synaptic data flowing into this ensemble (torch.sum by default). + # apply an aggregation function to synaptic data flowing into this ensemble. if incoming_synapses: - integrated_data = self.summation([synapse.output for synapse in incoming_synapses]).to(self.device) + inputs = [synapse.output for synapse in incoming_synapses] + ids = [synapse.identifier for synapse in incoming_synapses] + + # aggregation is (micro)managed at the neuron level; torch.sum is used by default. + integrated_data = ensemble_ref.aggregate(inputs, identifiers=ids).to(self.device) + else: integrated_data = ensemble_ref.input @@ -388,7 +380,8 @@ def forward(self, data: torch.tensor) -> dict: external = [data[self.roots.index(ensemble_ref.identifier)]] if isinstance(data, list) else [data] feedback = [synapse.output for synapse in incoming_synapses] - integrated_data = self.summation(external + feedback) + ids = [f"ext{z}" for z in range(len(external))] + [synapse.identifier for synapse in incoming_synapses] + integrated_data = ensemble_ref.aggregate(external + feedback, identifiers=ids) # forward current ensemble. ensemble_ref(integrated_data) diff --git a/sapicore/engine/neuron/__init__.py b/sapicore/engine/neuron/__init__.py index 8273865..5c31055 100644 --- a/sapicore/engine/neuron/__init__.py +++ b/sapicore/engine/neuron/__init__.py @@ -56,7 +56,7 @@ class Neuron(Component): Warning ------- When defining `equation` for a custom neuron model, the present value of `voltage` should NOT be added to the - right hand side. Do NOT multiply by DT. These operations will be performed as part of the generic Euler forward. + right hand side. Do NOT multiply by DT. These operations will be performed within the Integrator. """ @@ -84,7 +84,7 @@ def __init__(self, equation: Callable = None, integrator: Integrator = None, **k def num_units(self): """Number of functional units represented by this object. - Neurons are singletons by coercion, as they are meant to express unit dynamics. + Neurons are singletons by coercion, as they are meant to express and encapsulate unit dynamics. Derivatives of :class:`~engine.ensemble.Ensemble` can modify this property and duplicate units as necessary. """ @@ -108,7 +108,7 @@ def forward(self, data: Tensor) -> dict: Raises ------ NotImplementedError - The forward method must be implemented by each derived class. + The forward method must be implemented by derivative classes. """ raise NotImplementedError @@ -138,3 +138,28 @@ def inject(self, current: Tensor): """ self.voltage = self.voltage + current + + @staticmethod + def aggregate(inputs: list[Tensor], identifiers: list[str] = None) -> Tensor: + """Determines how presynaptic inputs from multiple sources should be aggregated. + + By default, neurons sum their inputs. However, many use cases may require more sophistication. + Shunting inhibition, for instance, can be expressed with torch.div (or torch.prod, if the source + synapse is expected to send the inverse). + + Parameters + ---------- + inputs: list of Tensor + Input arriving at this layer, synaptic or external. + + identifiers: list of str, optional + Labels by which to micromanage input aggregation. Since some inputs may not be + synaptic, users are responsible for passing identifiers in an order matching that of the input tensors. + + Note + ---- + If your model requires identifier-dependent preprocessing of synaptic inputs to this neuron (e.g., a + combination of addition and multiplication), it can be implemented by overriding this method. + + """ + return torch.sum(torch.vstack(inputs), dim=0) diff --git a/sapicore/engine/neuron/analog/__init__.py b/sapicore/engine/neuron/analog/__init__.py index 4b03353..513efab 100644 --- a/sapicore/engine/neuron/analog/__init__.py +++ b/sapicore/engine/neuron/analog/__init__.py @@ -6,7 +6,7 @@ Analog neurons may perform normalization or provide otherwise transformed input to downstream layers. """ -from torch import tensor, Tensor +from torch import Tensor from sapicore.engine.neuron import Neuron __all__ = ("AnalogNeuron",) @@ -31,7 +31,10 @@ def __init__(self, **kwargs): super().__init__(**kwargs) def forward(self, data: Tensor) -> dict: - """Adds input `data` to the numeric state stored in the instance attribute tensor `voltage`. + """Updates the numeric state stored in the instance attribute tensor `voltage` to `data`. + + These default analog neurons integrate the total input impinging on them on every simulation step. + Parameters ---------- @@ -50,9 +53,9 @@ def forward(self, data: Tensor) -> dict: """ # update internal representation of input current for tensorboard logging purposes. - self.input = tensor([data.detach().clone()]) if not data.size() else data.detach().clone() + self.input = data + self.voltage = data - self.voltage = self.voltage.add(data) self.simulation_step += 1 # return current state(s) of loggable attributes as a dictionary. diff --git a/sapicore/engine/neuron/spiking/LIF.py b/sapicore/engine/neuron/spiking/LIF.py index 8ea7f54..34088f4 100644 --- a/sapicore/engine/neuron/spiking/LIF.py +++ b/sapicore/engine/neuron/spiking/LIF.py @@ -34,6 +34,12 @@ class LIFNeuron(SpikingNeuron): tau_ref: float or Tensor Refractory period (e.g., 1.0). + cycle_length: int, optional + Oscillatory cycle period, required to time optional resetting of the refractory period. + + release_phase: int, optional + Oscillation phase at which to release all neurons from refractory mode, if required. + References ---------- `LIF Tutorial `_ @@ -110,5 +116,11 @@ def forward(self, data: Tensor) -> dict: self.refractory_steps = relu(self.refractory_steps - 1) self.simulation_step += 1 + if hasattr(self, "release_phase") and hasattr(self, "cycle_length"): + # voltage will start to accumulate at a particular phase, canceling the refractory period across units. + if self.simulation_step % self.cycle_length == self.release_phase: + self.refractory_steps = torch.zeros_like(self.refractory_steps) + self.voltage = self.volt_rest + # return current state(s) of loggable attributes as a dictionary. return self.loggable_state() diff --git a/sapicore/engine/synapse/STDP.py b/sapicore/engine/synapse/STDP.py index 66e2dce..bea8d41 100644 --- a/sapicore/engine/synapse/STDP.py +++ b/sapicore/engine/synapse/STDP.py @@ -85,6 +85,11 @@ def __init__( # in typical cases, should generally be on during training and off during testing. self.learning = True + # keep track of stale spike pairs, so that weights are only updated on new spike events. + # alternatively, set `is_discontinuous` false to update weights on every simulation step. + self.is_discontinuous = kwargs.get("is_discontinuous", True) + self.stale = torch.ones_like(self.weights).bool() + def update_weights(self) -> Tensor: """STDP weight update implementation. @@ -102,27 +107,36 @@ def update_weights(self) -> Tensor: -self.dst_last_spiked + self.simulation_step ) - # initialize dW matrix--remember the format is dst.num_units by src.num_units. - delta_weight = torch.zeros(self.matrix_shape, dtype=torch.float, device=self.device) - - # subtract postsynaptic last spike time stamps from presynaptic. - # transpose dst_last_spiked to a column vector, extend column-wise, then add its negative to src_last_spiked. - dst_tr = self.dst_last_spiked.reshape(self.dst_last_spiked.shape[0], 1) - delta_spike = -dst_tr.repeat(dst_tr.shape[1], self.src_ensemble.num_units) + self.src_last_spiked + # ensure action is only taken on pre-post pairs for whom one or both units spiked in this cycle. + if self.is_discontinuous: + self.stale[:] = True + self.stale[:, torch.argwhere(torch.as_tensor(self.src_last_spiked == self.simulation_step))] = False + self.stale[torch.argwhere(torch.as_tensor(self.dst_last_spiked == self.simulation_step)), :] = False + else: + self.stale[:] = False - # spike time differences for the potentiation and depression cases (pre < post, pre > post, respectively). - ltp_diffs = (delta_spike < 0.0).int() * delta_spike - ltd_diffs = (delta_spike > 0.0).int() * delta_spike + # initialize delta weight matrix (destination X source). + delta_weight = torch.zeros(self.matrix_shape, dtype=torch.float, device=self.device) - # add to total delta weight matrix (diffs are in simulation steps, tau are in ms). - delta_weight = delta_weight + (delta_spike < 0.0).int() * ( - self.alpha_plus * torch.exp(ltp_diffs / (self.tau_plus / self.dt)) - ) - delta_weight = delta_weight + (delta_spike > 0.0).int() * ( - -self.alpha_minus * torch.exp(-ltd_diffs / (self.tau_minus / self.dt)) - ) + if torch.any(~self.stale) or not self.is_discontinuous: + # subtract postsynaptic last spike time stamps from presynaptic. + # transpose dst_last_spiked to a column vector, extend column-wise, then subtract from src_last_spiked. + dst_tr = self.dst_last_spiked.reshape(self.dst_last_spiked.shape[0], 1) + delta_spike = -dst_tr.repeat(dst_tr.shape[1], self.src_ensemble.num_units) + self.src_last_spiked + + # spike time differences for the potentiation and depression cases (pre < post, pre > post, respectively). + ltp_diffs = (delta_spike < 0.0).int() * delta_spike + ltd_diffs = (delta_spike > 0.0).int() * delta_spike + + # add to total delta weight matrix (diffs are in simulation steps, tau are in ms). + delta_weight = delta_weight + ~self.stale * (delta_spike < 0.0).int() * ( + self.alpha_plus * torch.exp(ltp_diffs / (self.tau_plus / self.dt)) + ) + delta_weight = delta_weight + ~self.stale * (delta_spike > 0.0).int() * ( + -self.alpha_minus * torch.exp(-ltd_diffs / (self.tau_minus / self.dt)) + ) + self.weights = self.weights.add(delta_weight) - self.weights = self.weights.add(delta_weight) return delta_weight def forward(self, data: Tensor) -> dict: diff --git a/sapicore/engine/synapse/__init__.py b/sapicore/engine/synapse/__init__.py index 954cf3d..d8e6a82 100644 --- a/sapicore/engine/synapse/__init__.py +++ b/sapicore/engine/synapse/__init__.py @@ -321,18 +321,16 @@ def forward(self, data: Tensor) -> dict: # enforce weight limits. self.weights = torch.clamp(self.weights, self.weight_min, self.weight_max) - # mask non-connections (repeated on every iteration in case mask was updated during simulation). - self.weights = self.weights.multiply_(self.connections) - if self.simple_delays: - self.output = torch.matmul(self.weights, self.delayed_data) + # mask non-connections (repeated on every iteration in case mask was updated during simulation). + self.output = torch.matmul(self.weights * self.connections, self.delayed_data) else: self.delayed_data = self.delayed_data.reshape(self.matrix_shape) # this loop is unavoidable, as N vector multiplications with different delayed_data are needed. for i in range(self.matrix_shape[0]): - self.output[i] = torch.matmul(self.weights[i, :], self.delayed_data[i, :]) + self.output[i] = torch.matmul(self.weights[i, :] * self.connections[i, :], self.delayed_data[i, :]) # advance simulation step and return output dictionary. self.simulation_step += 1 diff --git a/sapicore/tests/engine/ensemble/test_ensemble.py b/sapicore/tests/engine/ensemble/test_ensemble.py index 10974aa..8c5fba1 100644 --- a/sapicore/tests/engine/ensemble/test_ensemble.py +++ b/sapicore/tests/engine/ensemble/test_ensemble.py @@ -41,18 +41,6 @@ def test_analog(self, arg_: AnalogEnsemble | OscillatorEnsemble): assert torch.all(add_same_shape["voltage"].eq(init_voltage + 1)) assert torch.all(add_same_shape["voltage"].eq(arg_.voltage)) - # adding a data tensor of a different shape should raise a runtime error if `num_units` > 1. - if arg_.num_units > 1: - with pytest.raises(RuntimeError): - arg_.forward(data=torch.ones((516, 516), device=TEST_DEVICE)) - else: - arg_.forward(data=torch.ones((516, 516), device=TEST_DEVICE)) - - # when data tensor not on same device. - if torch.cuda.is_available(): - with pytest.raises(RuntimeError): - arg_.forward(data=torch.zeros(1)) - @pytest.mark.parametrize( "arg_", [ diff --git a/sapicore/tests/engine/neuron/test_neuron.py b/sapicore/tests/engine/neuron/test_neuron.py index ac288f6..7a20034 100644 --- a/sapicore/tests/engine/neuron/test_neuron.py +++ b/sapicore/tests/engine/neuron/test_neuron.py @@ -57,23 +57,16 @@ def test_analog(self, arg_: AnalogNeuron): # verify behavior of trivial forward method. init_voltage = arg_.voltage - # adding a compatible shape data should increase `voltage` buffer values. + # adding a compatible shape data should update `voltage` buffer values. add_same_shape = arg_.forward(data=torch.ones_like(init_voltage, device=TEST_DEVICE)) - assert add_same_shape["voltage"] == init_voltage + 1.0 - assert add_same_shape["voltage"] == arg_.voltage # adding a data tensor of a different shape should overwrite content of `voltage` buffer. add_diff_len = arg_.forward( data=torch.ones((init_voltage.shape[0] + 2, init_voltage.shape[0] + 2), device=TEST_DEVICE) ) - assert torch.all(add_diff_len["voltage"].eq(torch.ones_like(add_same_shape["voltage"]) + 1)) + assert torch.all(add_diff_len["voltage"].eq(torch.ones_like(add_same_shape["voltage"]))) assert torch.all(add_diff_len["voltage"].eq(arg_.voltage)) - # when data tensor not on same device. - if torch.cuda.is_available(): - with pytest.raises(RuntimeError): - arg_.forward(data=torch.zeros(1, device=torch.device("cpu"))) - @pytest.mark.parametrize( "arg_", [ diff --git a/setup.py b/setup.py index 266c66b..81ca761 100644 --- a/setup.py +++ b/setup.py @@ -15,9 +15,6 @@ "tac29@cornell.edu", classifiers=[ "Development Status :: 4 - Beta", - "Programming Language :: Python :: 3.7", - "Programming Language :: Python :: 3.8", - "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.11", ], @@ -39,6 +36,7 @@ "alive_progress", "tree-config", "pytest", + "setuptools", ], extras_require={ "dev": ["coverage", "flake8", "sphinx<7.0.0", "sphinx-rtd-theme", "m2r2"],