diff --git a/neuralpredictors/layers/encoders/firing_rate.py b/neuralpredictors/layers/encoders/firing_rate.py index 4a27d5e8..59dc0699 100644 --- a/neuralpredictors/layers/encoders/firing_rate.py +++ b/neuralpredictors/layers/encoders/firing_rate.py @@ -39,7 +39,9 @@ def __init__( self.offset = elu_offset if nonlinearity_type != "elu" and not np.isclose(elu_offset, 0.0): - warnings.warn("If `nonlinearity_type` is not 'elu', `elu_offset` will be ignored") + warnings.warn( + "If `nonlinearity_type` is not 'elu', `elu_offset` will be ignored" + ) if nonlinearity_type == "elu": self.nonlinearity_fn = nn.ELU() elif nonlinearity_type == "identity": @@ -80,7 +82,7 @@ def forward( x = self.modulator[data_key](x, behavior=behavior) if self.nonlinearity_type == "elu": - return self.nonlinearity_fn(x + self.offset) + 1 + return self.nonlinearity_fn(x) + 1 + self.offset else: return self.nonlinearity_fn(x) diff --git a/neuralpredictors/measures/zero_inflated_losses.py b/neuralpredictors/measures/zero_inflated_losses.py index 918d7e5e..9069332a 100644 --- a/neuralpredictors/measures/zero_inflated_losses.py +++ b/neuralpredictors/measures/zero_inflated_losses.py @@ -17,8 +17,8 @@ def forward(self, target, output, **kwargs): if loc.requires_grad: self.multi_clamp(loc, [0.0] * neurons_n, target.max(dim=0)[0]) - zero_mask = target < loc - nonzero_mask = target >= loc + zero_mask = target <= loc + nonzero_mask = target > loc # spike loss spike_logl = torch.log(1 - q) - torch.log(loc)