From 8a786d5a698ba5332426ded6a72fc0244ddfb12f Mon Sep 17 00:00:00 2001 From: Marcel Stimberg Date: Tue, 10 Sep 2024 11:16:46 +0200 Subject: [PATCH] test random functions separately --- brian2/tests/test_GSL.py | 682 ----- brian2/tests/test_base.py | 105 - brian2/tests/test_clocks.py | 66 - brian2/tests/test_codegen.py | 657 ----- brian2/tests/test_codestrings.py | 109 - brian2/tests/test_complex_examples.py | 43 - brian2/tests/test_devices.py | 120 - brian2/tests/test_equations.py | 678 ----- brian2/tests/test_functions.py | 1200 -------- brian2/tests/test_logger.py | 173 -- brian2/tests/test_memory.py | 140 - brian2/tests/test_monitor.py | 727 ----- brian2/tests/test_morphology.py | 1601 ----------- brian2/tests/test_namespaces.py | 163 -- brian2/tests/test_network.py | 1865 ------------ brian2/tests/test_neurongroup.py | 15 +- brian2/tests/test_numpy_codegen.py | 24 - brian2/tests/test_parsing.py | 586 ---- brian2/tests/test_poissongroup.py | 131 - brian2/tests/test_poissoninput.py | 109 - brian2/tests/test_preferences.py | 342 --- brian2/tests/test_refractory.py | 386 --- brian2/tests/test_spatialneuron.py | 996 ------- brian2/tests/test_spikegenerator.py | 455 --- brian2/tests/test_spikequeue.py | 65 - brian2/tests/test_stateupdaters.py | 988 ------- brian2/tests/test_subgroup.py | 926 ------ brian2/tests/test_synapses.py | 3776 ------------------------- brian2/tests/test_thresholder.py | 28 - brian2/tests/test_timedarray.py | 138 - brian2/tests/test_units.py | 1579 ----------- brian2/tests/test_utils.py | 39 - brian2/tests/test_variables.py | 80 - 33 files changed, 11 insertions(+), 18981 deletions(-) delete mode 100644 brian2/tests/test_GSL.py delete mode 100644 brian2/tests/test_base.py delete mode 100644 brian2/tests/test_clocks.py delete mode 100644 brian2/tests/test_codegen.py delete mode 100644 brian2/tests/test_codestrings.py delete mode 100644 brian2/tests/test_complex_examples.py delete mode 100644 brian2/tests/test_devices.py delete mode 100644 brian2/tests/test_equations.py delete mode 100644 brian2/tests/test_functions.py delete mode 100644 brian2/tests/test_logger.py delete mode 100644 brian2/tests/test_memory.py delete mode 100644 brian2/tests/test_monitor.py delete mode 100644 brian2/tests/test_morphology.py delete mode 100644 brian2/tests/test_namespaces.py delete mode 100644 brian2/tests/test_network.py delete mode 100644 brian2/tests/test_numpy_codegen.py delete mode 100644 brian2/tests/test_parsing.py delete mode 100644 brian2/tests/test_poissongroup.py delete mode 100644 brian2/tests/test_poissoninput.py delete mode 100644 brian2/tests/test_preferences.py delete mode 100644 brian2/tests/test_refractory.py delete mode 100644 brian2/tests/test_spatialneuron.py delete mode 100644 brian2/tests/test_spikegenerator.py delete mode 100644 brian2/tests/test_spikequeue.py delete mode 100644 brian2/tests/test_stateupdaters.py delete mode 100644 brian2/tests/test_subgroup.py delete mode 100644 brian2/tests/test_synapses.py delete mode 100644 brian2/tests/test_thresholder.py delete mode 100644 brian2/tests/test_timedarray.py delete mode 100644 brian2/tests/test_units.py delete mode 100644 brian2/tests/test_utils.py delete mode 100644 brian2/tests/test_variables.py diff --git a/brian2/tests/test_GSL.py b/brian2/tests/test_GSL.py deleted file mode 100644 index d8c7e7635..000000000 --- a/brian2/tests/test_GSL.py +++ /dev/null @@ -1,682 +0,0 @@ -import functools - -import pytest - -from brian2 import * -from brian2.codegen.runtime.GSLcython_rt import IntegrationError -from brian2.core.preferences import PreferenceError -from brian2.stateupdaters.base import UnsupportedEquationsException -from brian2.tests.utils import exc_isinstance - -max_difference = 0.1 * mV - -pytestmark = pytest.mark.gsl - - -def skip_if_not_implemented(func): - @functools.wraps(func) - def wrapped(): - try: - func() - except (BrianObjectException, NotImplementedError) as exc: - if not ( - isinstance(exc, NotImplementedError) - or isinstance(exc.__cause__, NotImplementedError) - ): - raise - pytest.skip("GSL support for numpy has not been implemented yet") - - return wrapped - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_stateupdater_basic(): - # just the adaptive_threshold example: run for exponential_euler and GSL and see - # if results are comparable (same amount of spikes and spikes > 0) - eqs = """ - dv/dt = -v/(10*ms) : volt - dvt/dt = (10*mV-vt)/(15*ms) : volt - """ - reset = """ - v = 0*mV - vt += 3*mV - """ - neurons_conventional = NeuronGroup( - 1, model=eqs, reset=reset, threshold="v>vt", method="exponential_euler" - ) - neurons_GSL = NeuronGroup(1, model=eqs, reset=reset, threshold="v>vt", method="gsl") - neurons_conventional.vt = 10 * mV - neurons_GSL.vt = 10 * mV - # 50 'different' neurons so no neuron spikes more than once per dt - P = SpikeGeneratorGroup(1, [0] * 50, np.arange(50) / 50.0 * 100 * ms) - C_conventional = Synapses(P, neurons_conventional, on_pre="v += 3*mV") - C_GSL = Synapses(P, neurons_GSL, on_pre="v += 3*mV") - C_conventional.connect() - C_GSL.connect() - SM_conventional = SpikeMonitor(neurons_conventional, variables="v") - SM_GSL = SpikeMonitor(neurons_GSL, variables="v") - net = Network( - neurons_conventional, - neurons_GSL, - P, - C_conventional, - C_GSL, - SM_conventional, - SM_GSL, - ) - net.run(100 * ms) - assert ( - SM_conventional.num_spikes > 0 - ), "simulation should produce spiking, but no spikes monitored" - assert SM_conventional.num_spikes == SM_GSL.num_spikes, ( - "GSL_statupdater produced different number of spikes than integration with ", - "exponential euler", - ) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_different_clocks(): - vt = 10 * mV - eqs = "dv/dt = -v/(10*ms) : volt" - neurons = NeuronGroup(1, model=eqs, threshold="v>vt", method="gsl", dt=0.2 * ms) - # for this test just check if it compiles - run(0 * ms) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_default_function(): - # phase_locking example - tau = 20 * ms - n = 100 - b = 1.2 # constant current mean, the modulation varies - freq = 10 * Hz - eqs = """ - dv/dt = (-v + a * sin(2 * pi * freq * t) + b) / tau : 1 - a : 1 - """ - vrand = rand(n) - neurons_conventional = NeuronGroup( - n, model=eqs, threshold="v > 1", reset="v = 0", method="exponential_euler" - ) - neurons_GSL = NeuronGroup( - n, model=eqs, threshold="v > 1", reset="v = 0", method="gsl" - ) - neurons_conventional.v = vrand - neurons_GSL.v = vrand - neurons_conventional.a = "0.05 + 0.7*i/n" - neurons_GSL.a = "0.05 + 0.7*i/n" - - trace_conventional = StateMonitor(neurons_conventional, "v", record=50) - trace_GSL = StateMonitor(neurons_GSL, "v", record=50) - net = Network(neurons_conventional, neurons_GSL, trace_conventional, trace_GSL) - net.run(10 * ms) - - assert ( - max(trace_conventional.v[0] - trace_GSL.v[0]) < max_difference / mV - ), "difference between conventional and GSL output is larger than max_difference" - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_user_defined_function(): - # phase_locking example with user_defined sin - eqs = """ - dv/dt = (-v + a * sin(2 * pi * freq * t) + b) / tau : 1 - a : 1 - """ - - @implementation( - "cpp", - """ - double user_sin(double phase) - { - return sin(phase); - } - """, - ) - @implementation( - "cython", - """ - cdef double user_sin(double phase): - return sin(phase) - """, - ) - @check_units(phase=1, result=1) - def user_sin(phase): - raise Exception - - tau = 20 * ms - n = 100 - b = 1.2 # constant current mean, the modulation varies - freq = 10 * Hz - eqs = """ - dv/dt = (-v + a * user_sin(2 * pi * freq * t) + b) / tau : 1 - a : 1 - """ - vrand = rand(n) - neurons_conventional = NeuronGroup( - n, model=eqs, threshold="v > 1", reset="v = 0", method="exponential_euler" - ) - neurons_GSL = NeuronGroup( - n, model=eqs, threshold="v > 1", reset="v = 0", method="gsl" - ) - neurons_conventional.v = vrand - neurons_GSL.v = vrand - neurons_conventional.a = "0.05 + 0.7*i/n" - neurons_GSL.a = "0.05 + 0.7*i/n" - - trace_conventional = StateMonitor(neurons_conventional, "v", record=50) - trace_GSL = StateMonitor(neurons_GSL, "v", record=50) - net = Network(neurons_conventional, neurons_GSL, trace_conventional, trace_GSL) - net.run(10 * ms) - - assert ( - max(trace_conventional.v[0] - trace_GSL.v[0]) < max_difference / mV - ), "difference between conventional and GSL output is larger than max_difference" - # assert not all(trace_conventional.v[0]==trace_GSL.v[0]), \ - # ('output of GSL stateupdater is exactly the same as Brians stateupdater (unlikely to be right)') - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_x_variable(): - neurons = NeuronGroup( - 2, "dx/dt = 300*Hz : 1", threshold="x>1", reset="x=0", method="gsl" - ) - # just testing compilation - run(0 * ms) - - -@pytest.mark.codegen_independent -def test_GSL_failing_directory(): - def set_dir(arg): - prefs.GSL.directory = arg - - with pytest.raises(PreferenceError): - set_dir(1) - with pytest.raises(PreferenceError): - set_dir("/usr/") - with pytest.raises(PreferenceError): - set_dir("/usr/blablabla/") - - -@pytest.mark.codegen_independent -@skip_if_not_implemented -def test_GSL_stochastic(): - tau = 20 * ms - sigma = 0.015 - eqs = """ - dx/dt = (1.1 - x) / tau + sigma * (2 / tau)**.5 * xi : 1 - """ - neuron = NeuronGroup(1, eqs, method="gsl") - net = Network(neuron) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms, namespace={"tau": tau, "sigma": sigma}) - assert exc_isinstance( - exc, UnsupportedEquationsException, raise_not_implemented=True - ) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_error_dimension_mismatch_unit(): - eqs = """ - dv/dt = (v0 - v)/(10*ms) : volt - v0 : volt - """ - options = {"absolute_error_per_variable": {"v": 1 * nS}} - neuron = NeuronGroup( - 1, - eqs, - threshold="v > 10*mV", - reset="v = 0*mV", - method="gsl", - method_options=options, - ) - net = Network(neuron) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms, namespace={}) - assert exc_isinstance(exc, DimensionMismatchError, raise_not_implemented=True) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_error_dimension_mismatch_dimensionless1(): - eqs = """ - dv/dt = (v0 - v)/(10*ms) : 1 - v0 : 1 - """ - options = {"absolute_error_per_variable": {"v": 1 * mV}} - neuron = NeuronGroup( - 1, eqs, threshold="v > 10", reset="v = 0", method="gsl", method_options=options - ) - net = Network(neuron) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms, namespace={}) - assert exc_isinstance(exc, DimensionMismatchError, raise_not_implemented=True) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_error_dimension_mismatch_dimensionless2(): - eqs = """ - dv/dt = (v0 - v)/(10*ms) : volt - v0 : volt - """ - options = {"absolute_error_per_variable": {"v": 1e-3}} - neuron = NeuronGroup( - 1, - eqs, - threshold="v > 10*mV", - reset="v = 0*mV", - method="gsl", - method_options=options, - ) - net = Network(neuron) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms, namespace={}) - assert exc_isinstance(exc, DimensionMismatchError, raise_not_implemented=True) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_error_nonexisting_variable(): - eqs = """ - dv/dt = (v0 - v)/(10*ms) : volt - v0 : volt - """ - options = {"absolute_error_per_variable": {"dummy": 1e-3 * mV}} - neuron = NeuronGroup( - 1, - eqs, - threshold="v > 10*mV", - reset="v = 0*mV", - method="gsl", - method_options=options, - ) - net = Network(neuron) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms, namespace={}) - assert exc_isinstance(exc, KeyError, raise_not_implemented=True) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_error_incorrect_error_format(): - eqs = """ - dv/dt = (v0 - v)/(10*ms) : volt - v0 : volt - """ - options = {"absolute_error_per_variable": object()} - neuron = NeuronGroup( - 1, - eqs, - threshold="v > 10*mV", - reset="v = 0*mV", - method="gsl", - method_options=options, - ) - net = Network(neuron) - options2 = {"absolute_error": "not a float"} - neuron2 = NeuronGroup( - 1, - eqs, - threshold="v > 10*mV", - reset="v = 0*mV", - method="gsl", - method_options=options2, - ) - net2 = Network(neuron2) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms, namespace={}) - assert exc_isinstance(exc, TypeError, raise_not_implemented=True) - with pytest.raises(BrianObjectException) as exc: - net2.run(0 * ms, namespace={}) - assert exc_isinstance(exc, TypeError, raise_not_implemented=True) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_error_nonODE_variable(): - eqs = """ - dv/dt = (v0 - v)/(10*ms) : volt - v0 : volt - """ - options = {"absolute_error_per_variable": {"v0": 1e-3 * mV}} - neuron = NeuronGroup( - 1, - eqs, - threshold="v > 10*mV", - reset="v = 0*mV", - method="gsl", - method_options=options, - ) - net = Network(neuron) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms, namespace={}) - assert exc_isinstance(exc, KeyError, raise_not_implemented=True) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_error_bounds(): - runtime = 50 * ms - error1 = 1e-2 * volt - error2 = 1e-4 * volt - error3 = 1e-6 * volt # default error - eqs = """ - dv/dt = (stimulus(t) + -v)/(.1*ms) : volt - """ - stimulus = TimedArray(rand(int(runtime / (10 * ms))) * 3 * volt, dt=5 * ms) - neuron1 = NeuronGroup( - 1, - model=eqs, - reset="v=0*mV", - threshold="v>10*volt", - method="gsl", - method_options={"absolute_error_per_variable": {"v": error1}}, - dt=1 * ms, - ) - neuron2 = NeuronGroup( - 1, - model=eqs, - reset="v=0*mV", - threshold="v>10*volt", - method="gsl", - method_options={"absolute_error_per_variable": {"v": error2}}, - dt=1 * ms, - ) - neuron3 = NeuronGroup( - 1, - model=eqs, - reset="v=0*mV", - threshold="v>10*volt", - method="gsl", - method_options={"absolute_error_per_variable": {}}, - dt=1 * ms, - ) # Uses default error - neuron_control = NeuronGroup(1, model=eqs, method="linear", dt=1 * ms) - mon1 = StateMonitor(neuron1, "v", record=True) - mon2 = StateMonitor(neuron2, "v", record=True) - mon3 = StateMonitor(neuron3, "v", record=True) - mon_control = StateMonitor(neuron_control, "v", record=True) - run(runtime) - err1 = abs(mon1.v[0] - mon_control.v[0]) - err2 = abs(mon2.v[0] - mon_control.v[0]) - err3 = abs(mon3.v[0] - mon_control.v[0]) - assert ( - max(err1) < error1 - ), f"Error bound exceeded, error bound: {error1:e}, obtained error: {max(err1):e}" - assert max(err2) < error2, "Error bound exceeded" - assert max(err3) < error3, "Error bound exceeded" - assert max(err1) > max( - err2 - ), "The simulation with smaller error bound produced a bigger maximum error" - assert max(err2) > max( - err3 - ), "The simulation with smaller error bound produced a bigger maximum error" - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_non_autonomous(): - eqs = """dv/dt = sin(2*pi*freq*t)/ms : 1 - freq : Hz""" - neuron = NeuronGroup(10, eqs, method="gsl") - neuron.freq = "i*10*Hz + 10*Hz" - neuron2 = NeuronGroup(10, eqs, method="euler") - neuron2.freq = "i*10*Hz + 10*Hz" - mon = StateMonitor(neuron, "v", record=True) - mon2 = StateMonitor(neuron2, "v", record=True) - run(20 * ms) - abs_err = np.abs(mon.v.T - mon2.v.T) - max_allowed = 1000 * np.finfo(prefs.core.default_float_dtype).eps - assert np.max(abs_err) < max_allowed - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_non_autonomous(): - eqs = """dv/dt = sin(2*pi*freq*t)/ms : 1 - freq : Hz""" - neuron = NeuronGroup(10, eqs, method="gsl") - neuron.freq = "i*10*Hz + 10*Hz" - neuron2 = NeuronGroup(10, eqs, method="euler") - neuron2.freq = "i*10*Hz + 10*Hz" - mon = StateMonitor(neuron, "v", record=True) - mon2 = StateMonitor(neuron2, "v", record=True) - run(20 * ms) - abs_err = np.abs(mon.v.T - mon2.v.T) - max_allowed = 1000 * np.finfo(prefs.core.default_float_dtype).eps - assert np.max(abs_err) < max_allowed - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_refractory(): - eqs = """dv/dt = 99.99*Hz : 1 (unless refractory)""" - neuron = NeuronGroup( - 1, eqs, method="gsl", threshold="v>1", reset="v=0", refractory=3 * ms - ) - neuron2 = NeuronGroup( - 1, eqs, method="euler", threshold="v>1", reset="v=0", refractory=3 * ms - ) - mon = SpikeMonitor(neuron, "v") - mon2 = SpikeMonitor(neuron2, "v") - run(20 * ms) - assert mon.count[0] == mon2.count[0] - - -@skip_if_not_implemented -def test_GSL_save_step_count(): - eqs = """ - dv/dt = -v/(.1*ms) : volt - """ - neuron = NeuronGroup( - 1, model=eqs, method="gsl", method_options={"save_step_count": True}, dt=1 * ms - ) - run(0 * ms) - mon = StateMonitor(neuron, "_step_count", record=True, when="end") - run(10 * ms) - assert mon._step_count[0][0] > 0, "Monitor did not save GSL step count" - - -HH_namespace = { - "Cm": 1 * ufarad * cm**-2, - "gl": 5e-5 * siemens * cm**-2, - "El": -65 * mV, - "EK": -90 * mV, - "ENa": 50 * mV, - "g_na": 100 * msiemens * cm**-2, - "g_kd": 30 * msiemens * cm**-2, - "VT": -63 * mV, -} - -HH_eqs = Equations( - """ - dv/dt = (gl*(El-v) - g_na*(m*m*m)*h*(v-ENa) - g_kd*(n*n*n*n)*(v-EK) + I)/Cm : volt - dm/dt = 0.32*(mV**-1)*(13.*mV-v+VT)/ - (exp((13.*mV-v+VT)/(4.*mV))-1.)/ms*(1-m)-0.28*(mV**-1)*(v-VT-40.*mV)/ - (exp((v-VT-40.*mV)/(5.*mV))-1.)/ms*m : 1 - dn/dt = 0.032*(mV**-1)*(15.*mV-v+VT)/ - (exp((15.*mV-v+VT)/(5.*mV))-1.)/ms*(1.-n)-.5*exp((10.*mV-v+VT)/(40.*mV))/ms*n : 1 - dh/dt = 0.128*exp((17.*mV-v+VT)/(18.*mV))/ms*(1.-h)-4./(1+exp((40.*mV-v+VT)/(5.*mV)))/ms*h : 1 - I : amp/metre**2 - """ -) - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_fixed_timestep_big_dt_small_error(): - # should raise integration error - neuron = NeuronGroup( - 1, - model=HH_eqs, - threshold="v > -40*mV", - refractory="v > -40*mV", - method="gsl", - method_options={"adaptable_timestep": False, "absolute_error": 1e-12}, - dt=0.001 * ms, - namespace=HH_namespace, - ) - neuron.I = 0.7 * nA / (20000 * umetre**2) - neuron.v = HH_namespace["El"] - net = Network(neuron) - with pytest.raises((BrianObjectException, RuntimeError)): - net.run(10 * ms) - - -@pytest.mark.codegen_independent -@skip_if_not_implemented -def test_GSL_internal_variable(): - with pytest.raises(SyntaxError): - Equations("d_p/dt = 300*Hz : 1") - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_method_options_neurongroup(): - neuron1 = NeuronGroup( - 1, - model="dp/dt = 300*Hz : 1", - method="gsl", - method_options={"adaptable_timestep": True}, - ) - neuron2 = NeuronGroup( - 1, - model="dp/dt = 300*Hz : 1", - method="gsl", - method_options={"adaptable_timestep": False}, - ) - run(0 * ms) - assert "if (gsl_odeiv2_driver_apply_fixed_step" not in str( - neuron1.state_updater.codeobj.code - ), "This neuron should not call gsl_odeiv2_driver_apply_fixed_step()" - assert "if (gsl_odeiv2_driver_apply_fixed_step" in str( - neuron2.state_updater.codeobj.code - ), "This neuron should call gsl_odeiv2_driver_apply_fixed_step()" - - -@pytest.mark.standalone_compatible -@skip_if_not_implemented -def test_GSL_method_options_spatialneuron(): - morpho = Soma(30 * um) - eqs = """ - Im = g * v : amp/meter**2 - dg/dt = siemens/metre**2/second : siemens/metre**2 - """ - neuron1 = SpatialNeuron( - morphology=morpho, - model=eqs, - Cm=1 * uF / cm**2, - Ri=100 * ohm * cm, - method="gsl_rkf45", - method_options={"adaptable_timestep": True}, - ) - neuron2 = SpatialNeuron( - morphology=morpho, - model=eqs, - Cm=1 * uF / cm**2, - Ri=100 * ohm * cm, - method="gsl_rkf45", - method_options={"adaptable_timestep": False}, - ) - run(0 * ms) - assert "if (gsl_odeiv2_driver_apply_fixed_step" not in str( - neuron1.state_updater.codeobj.code - ), "This neuron should not call gsl_odeiv2_driver_apply_fixed_step()" - assert "if (gsl_odeiv2_driver_apply_fixed_step" in str( - neuron2.state_updater.codeobj.code - ), "This neuron should call gsl_odeiv2_driver_apply_fixed_step()" - - -@skip_if_not_implemented -def test_GSL_method_options_synapses(): - N = 1000 - taum = 10 * ms - taupre = 20 * ms - taupost = taupre - Ee = 0 * mV - vt = -54 * mV - vr = -60 * mV - El = -74 * mV - taue = 5 * ms - F = 15 * Hz - gmax = 0.01 - dApre = 0.01 - dApost = -dApre * taupre / taupost * 1.05 - dApost *= gmax - dApre *= gmax - eqs_neurons = """ - dv/dt = (ge * (Ee-vr) + El - v) / taum : volt - dge/dt = -ge / taue : 1 - """ - poisson_input = PoissonGroup(N, rates=F) - neurons = NeuronGroup( - 1, eqs_neurons, threshold="v>vt", reset="v = vr", method="gsl_rkf45" - ) - S1 = Synapses( - poisson_input, - neurons, - """ - w : 1 - dApre/dt = -Apre / taupre : 1 (clock-driven) - dApost/dt = -Apost / taupost : 1 (clock-driven) - """, - method="gsl_rkf45", - method_options={"adaptable_timestep": True}, - ) - S2 = Synapses( - poisson_input, - neurons, - """ - w : 1 - dApre/dt = -Apre / taupre : 1 (clock-driven) - dApost/dt = -Apost / taupost : 1 (clock-driven) - """, - method="gsl_rkf45", - method_options={"adaptable_timestep": False}, - ) - run(0 * ms) - assert "if (gsl_odeiv2_driver_apply_fixed_step" not in str( - S1.state_updater.codeobj.code - ), "This state_updater should not call gsl_odeiv2_driver_apply_fixed_step()" - assert "if (gsl_odeiv2_driver_apply_fixed_step" in str( - S2.state_updater.codeobj.code - ), "This state_updater should call gsl_odeiv2_driver_apply_fixed_step()" - - -if __name__ == "__main__": - from _pytest.outcomes import Skipped - - for t in [ - test_GSL_stateupdater_basic, - test_GSL_different_clocks, - test_GSL_default_function, - test_GSL_user_defined_function, - test_GSL_x_variable, - test_GSL_failing_directory, - test_GSL_stochastic, - test_GSL_error_dimension_mismatch_unit, - test_GSL_error_dimension_mismatch_dimensionless1, - test_GSL_error_dimension_mismatch_dimensionless2, - test_GSL_error_nonexisting_variable, - test_GSL_error_incorrect_error_format, - test_GSL_error_nonODE_variable, - test_GSL_error_bounds, - test_GSL_non_autonomous, - test_GSL_refractory, - test_GSL_save_step_count, - test_GSL_fixed_timestep_big_dt_small_error, - test_GSL_method_options_neurongroup, - test_GSL_method_options_spatialneuron, - test_GSL_method_options_synapses, - ]: - try: - t() - except Skipped as ex: - print(f"Skipped: {t.__name__} ({str(ex)})") diff --git a/brian2/tests/test_base.py b/brian2/tests/test_base.py deleted file mode 100644 index be2a3f68e..000000000 --- a/brian2/tests/test_base.py +++ /dev/null @@ -1,105 +0,0 @@ -import pytest -from numpy.testing import assert_equal - -from brian2 import * -from brian2.devices.device import reinit_and_delete -from brian2.tests.utils import assert_allclose - - -class DerivedBrianObject(BrianObject): - def __init__(self, name="derivedbrianobject*"): - super().__init__(name=name) - - def __str__(self): - return self.name - - __repr__ = __str__ - - -@pytest.mark.codegen_independent -def test_base(): - x = DerivedBrianObject("x") - y = DerivedBrianObject("y") - assert_equal(x.when, "start") - assert_equal(x.order, 0) - assert_equal(len(x.contained_objects), 0) - with pytest.raises(AttributeError): - setattr(x, "contained_objects", []) - x.contained_objects.append(y) - assert_equal(len(x.contained_objects), 1) - assert x.contained_objects[0] is y - - assert_equal(x.active, True) - assert_equal(y.active, True) - y.active = False - assert_equal(x.active, True) - assert_equal(y.active, False) - y.active = True - assert_equal(x.active, True) - assert_equal(y.active, True) - x.active = False - assert_equal(x.active, False) - assert_equal(y.active, False) - - -@pytest.mark.codegen_independent -def test_names(): - obj = BrianObject() - obj2 = BrianObject() - obj3 = DerivedBrianObject() - assert_equal(obj.name, "brianobject") - assert_equal(obj2.name, "brianobject_1") - assert_equal(obj3.name, "derivedbrianobject") - - -@pytest.mark.codegen_independent -def test_duplicate_names(): - # duplicate names are allowed, as long as they are not part of the - # same network - obj1 = BrianObject(name="name1") - obj2 = BrianObject(name="name2") - obj3 = BrianObject(name="name") - obj4 = BrianObject(name="name") - net = Network(obj1, obj2) - # all is good - net.run(0 * ms) - net = Network(obj3, obj4) - with pytest.raises(ValueError): - net.run(0 * ms) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_active_flag(): - G = NeuronGroup(1, "dv/dt = 1/ms : 1") - mon = StateMonitor(G, "v", record=0) - mon.active = False - run(1 * ms) - mon.active = True - G.active = False - run(1 * ms) - device.build(direct_call=False, **device.build_options) - # Monitor should start recording at 1ms - # Neurongroup should not integrate after 1ms (but should have integrated before) - assert_allclose(mon[0].t[0], 1 * ms) - assert_allclose(mon[0].v, 1.0) - - -@pytest.mark.codegen_independent -def test_version(): - # Check that determining the Brian version works correctly - import brian2 - - version = brian2.__version__ - assert version.startswith("2.") - - # Check that the version tuple is correct - version_tuple = brian2.__version_tuple__ - assert version_tuple == tuple(int(i) for i in version.split(".")[:3]) - - -if __name__ == "__main__": - test_base() - test_names() - test_duplicate_names() - test_active_flag() diff --git a/brian2/tests/test_clocks.py b/brian2/tests/test_clocks.py deleted file mode 100644 index ef9249dd5..000000000 --- a/brian2/tests/test_clocks.py +++ /dev/null @@ -1,66 +0,0 @@ -import pytest -from numpy.testing import assert_array_equal, assert_equal - -from brian2 import * -from brian2.utils.logger import catch_logs - - -@pytest.mark.codegen_independent -def test_clock_attributes(): - clock = Clock(dt=1 * ms) - assert_array_equal(clock.t, 0 * second) - assert_array_equal(clock.timestep, 0) - assert_array_equal(clock.dt, 1 * ms) - - -@pytest.mark.codegen_independent -def test_clock_dt_change(): - clock = Clock(dt=1 * ms) - # at time 0s, all dt changes should be allowed - clock.dt = 0.75 * ms - clock._set_t_update_dt() - clock.dt = 2.5 * ms - clock._set_t_update_dt() - clock.dt = 1 * ms - clock._set_t_update_dt() - - # at 0.1ms only changes that are still representable as an integer of the - # current time 1s are allowed - clock.dt = 0.1 * ms - clock._set_t_update_dt() - - clock.dt = 0.05 * ms - clock._set_t_update_dt(target_t=0.1 * ms) - clock.dt = 0.1 * ms - clock._set_t_update_dt(target_t=0.1 * ms) - clock.dt = 0.3 * ms - with pytest.raises(ValueError): - clock._set_t_update_dt(target_t=0.1 * ms) - - -@pytest.mark.codegen_independent -def test_defaultclock(): - defaultclock.dt = 1 * ms - assert_equal(defaultclock.dt, 1 * ms) - assert defaultclock.name == "defaultclock" - - -@pytest.mark.codegen_independent -def test_set_interval_warning(): - clock = Clock(dt=0.1 * ms) - with catch_logs() as logs: - clock.set_interval(0 * second, 1000 * second) # no problem - assert len(logs) == 0 - with catch_logs() as logs: - clock.set_interval(0 * second, 10000000000 * second) # too long - assert len(logs) == 1 - assert logs[0][1].endswith("many_timesteps") - - -if __name__ == "__main__": - test_clock_attributes() - restore_initial_state() - test_clock_dt_change() - restore_initial_state() - test_defaultclock() - test_set_interval_warning() diff --git a/brian2/tests/test_codegen.py b/brian2/tests/test_codegen.py deleted file mode 100644 index fdc03cdf2..000000000 --- a/brian2/tests/test_codegen.py +++ /dev/null @@ -1,657 +0,0 @@ -import json -import os -import platform -import socket -from collections import namedtuple - -import numpy as np -import pytest - -from brian2 import _cache_dirs_and_extensions, clear_cache, prefs -from brian2.codegen.codeobject import CodeObject -from brian2.codegen.cpp_prefs import compiler_supports_c99, get_compiler_and_args -from brian2.codegen.generators.cython_generator import CythonNodeRenderer -from brian2.codegen.optimisation import optimise_statements -from brian2.codegen.runtime.cython_rt import CythonCodeObject -from brian2.codegen.statements import Statement -from brian2.codegen.translation import ( - analyse_identifiers, - get_identifiers_recursively, - make_statements, - parse_statement, -) -from brian2.core.functions import DEFAULT_CONSTANTS, DEFAULT_FUNCTIONS, Function -from brian2.core.variables import ArrayVariable, Constant, Subexpression, Variable -from brian2.devices.device import auto_target, device -from brian2.parsing.rendering import CPPNodeRenderer, NodeRenderer, NumpyNodeRenderer -from brian2.parsing.sympytools import str_to_sympy, sympy_to_str -from brian2.units import ms, second -from brian2.units.fundamentalunits import Unit -from brian2.utils.logger import catch_logs - -FakeGroup = namedtuple("FakeGroup", ["variables"]) - - -@pytest.mark.codegen_independent -def test_auto_target(): - # very basic test that the "auto" codegen target is useable - assert issubclass(auto_target(), CodeObject) - - -@pytest.mark.codegen_independent -def test_analyse_identifiers(): - """ - Test that the analyse_identifiers function works on a simple clear example. - """ - code = """ - a = b+c - d = e+f - """ - known = { - "b": Variable(name="b"), - "c": Variable(name="c"), - "d": Variable(name="d"), - "g": Variable(name="g"), - } - - defined, used_known, dependent = analyse_identifiers(code, known) - assert "a" in defined # There might be an additional constant added by the - # loop-invariant optimisation - assert used_known == {"b", "c", "d"} - assert dependent == {"e", "f"} - - -@pytest.mark.codegen_independent -def test_get_identifiers_recursively(): - """ - Test finding identifiers including subexpressions. - """ - variables = { - "sub1": Subexpression( - name="sub1", - dtype=np.float32, - expr="sub2 * z", - owner=FakeGroup(variables={}), - device=None, - ), - "sub2": Subexpression( - name="sub2", - dtype=np.float32, - expr="5 + y", - owner=FakeGroup(variables={}), - device=None, - ), - "x": Variable(name="x"), - } - identifiers = get_identifiers_recursively(["_x = sub1 + x"], variables) - assert identifiers == {"x", "_x", "y", "z", "sub1", "sub2"} - - -@pytest.mark.codegen_independent -def test_write_to_subexpression(): - variables = { - "a": Subexpression( - name="a", - dtype=np.float32, - owner=FakeGroup(variables={}), - device=None, - expr="2*z", - ), - "z": Variable(name="z"), - } - - # Writing to a subexpression is not allowed - code = "a = z" - with pytest.raises(SyntaxError): - make_statements(code, variables, np.float32) - - -@pytest.mark.codegen_independent -def test_repeated_subexpressions(): - variables = { - "a": Subexpression( - name="a", - dtype=np.float32, - owner=FakeGroup(variables={}), - device=None, - expr="2*z", - ), - "x": Variable(name="x"), - "y": Variable(name="y"), - "z": Variable(name="z"), - } - # subexpression a (referring to z) is used twice, but can be reused the - # second time (no change to z) - code = """ - x = a - y = a - """ - scalar_stmts, vector_stmts = make_statements(code, variables, np.float32) - assert len(scalar_stmts) == 0 - assert [stmt.var for stmt in vector_stmts] == ["a", "x", "y"] - assert vector_stmts[0].constant - - code = """ - x = a - z *= 2 - """ - scalar_stmts, vector_stmts = make_statements(code, variables, np.float32) - assert len(scalar_stmts) == 0 - assert [stmt.var for stmt in vector_stmts] == ["a", "x", "z"] - # Note that we currently do not mark the subexpression as constant in this - # case, because its use after the "z *=2" line would actually redefine it. - # Our algorithm is currently not smart enough to detect that it is actually - # not used afterwards - - # a refers to z, therefore we have to redefine a after z changed, and a - # cannot be constant - code = """ - x = a - z *= 2 - y = a - """ - scalar_stmts, vector_stmts = make_statements(code, variables, np.float32) - assert len(scalar_stmts) == 0 - assert [stmt.var for stmt in vector_stmts] == ["a", "x", "z", "a", "y"] - assert not any(stmt.constant for stmt in vector_stmts) - - -@pytest.mark.codegen_independent -def test_nested_subexpressions(): - """ - This test checks that code translation works with nested subexpressions. - """ - code = """ - x = a + b + c - c = 1 - x = a + b + c - d = 1 - x = a + b + c - """ - variables = { - "a": Subexpression( - name="a", - dtype=np.float32, - owner=FakeGroup(variables={}), - device=None, - expr="b*b+d", - ), - "b": Subexpression( - name="b", - dtype=np.float32, - owner=FakeGroup(variables={}), - device=None, - expr="c*c*c", - ), - "c": Variable(name="c"), - "d": Variable(name="d"), - } - scalar_stmts, vector_stmts = make_statements(code, variables, np.float32) - assert len(scalar_stmts) == 0 - evalorder = "".join(stmt.var for stmt in vector_stmts) - # This is the order that variables ought to be evaluated in (note that - # previously this test did not expect the last "b" evaluation, because its - # value did not change (c was not changed). We have since removed this - # subexpression caching, because it did not seem to apply in practical - # use cases) - assert evalorder == "baxcbaxdbax" - - -@pytest.mark.codegen_independent -def test_apply_loop_invariant_optimisation(): - variables = { - "v": Variable("v", scalar=False), - "w": Variable("w", scalar=False), - "dt": Constant("dt", dimensions=second.dim, value=0.1 * ms), - "tau": Constant("tau", dimensions=second.dim, value=10 * ms), - "exp": DEFAULT_FUNCTIONS["exp"], - } - statements = [ - Statement("v", "=", "dt*w*exp(-dt/tau)/tau + v*exp(-dt/tau)", "", np.float32), - Statement("w", "=", "w*exp(-dt/tau)", "", np.float32), - ] - scalar, vector = optimise_statements([], statements, variables) - # The optimisation should pull out at least exp(-dt / tau) - assert len(scalar) >= 1 - assert np.issubdtype(scalar[0].dtype, np.floating) - assert scalar[0].var == "_lio_1" - assert len(vector) == 2 - assert all("_lio_" in stmt.expr for stmt in vector) - - -@pytest.mark.codegen_independent -def test_apply_loop_invariant_optimisation_integer(): - variables = { - "v": Variable("v", scalar=False), - "N": Constant("N", 10), - "b": Variable("b", scalar=True, dtype=int), - "c": Variable("c", scalar=True, dtype=int), - "d": Variable("d", scalar=True, dtype=int), - "y": Variable("y", scalar=True, dtype=float), - "z": Variable("z", scalar=True, dtype=float), - "w": Variable("w", scalar=True, dtype=float), - } - statements = [ - Statement("v", "=", "v % (2*3*N)", "", np.float32), - # integer version doesn't get rewritten but float version does - Statement("a", ":=", "b//(c//d)", "", int), - Statement("x", ":=", "y/(z/w)", "", float), - ] - scalar, vector = optimise_statements([], statements, variables) - assert len(scalar) == 3 - assert np.issubdtype(scalar[0].dtype, np.signedinteger) - assert scalar[0].var == "_lio_1" - expr = scalar[0].expr.replace(" ", "") - assert expr == "6*N" or expr == "N*6" - assert np.issubdtype(scalar[1].dtype, np.signedinteger) - assert scalar[1].var == "_lio_2" - expr = scalar[1].expr.replace(" ", "") - assert expr == "b//(c//d)" - assert np.issubdtype(scalar[2].dtype, np.floating) - assert scalar[2].var == "_lio_3" - expr = scalar[2].expr.replace(" ", "") - assert expr == "(y*w)/z" or expr == "(w*y)/z" - - -@pytest.mark.codegen_independent -def test_apply_loop_invariant_optimisation_boolean(): - variables = { - "v1": Variable("v1", scalar=False), - "v2": Variable("v2", scalar=False), - "N": Constant("N", 10), - "b": Variable("b", scalar=True, dtype=bool), - "c": Variable("c", scalar=True, dtype=bool), - "int": DEFAULT_FUNCTIONS["int"], - "foo": Function( - lambda x: None, - arg_units=[Unit(1)], - return_unit=Unit(1), - arg_types=["boolean"], - return_type="float", - stateless=False, - ), - } - # The calls for "foo" cannot be pulled out, since foo is marked as stateful - statements = [ - Statement("v1", "=", "1.0*int(b and c)", "", np.float32), - Statement("v1", "=", "1.0*foo(b and c)", "", np.float32), - Statement("v2", "=", "int(not b and True)", "", np.float32), - Statement("v2", "=", "foo(not b and True)", "", np.float32), - ] - scalar, vector = optimise_statements([], statements, variables) - assert len(scalar) == 4 - assert scalar[0].expr == "1.0 * int(b and c)" - assert scalar[1].expr == "b and c" - assert scalar[2].expr == "int((not b) and True)" - assert scalar[3].expr == "(not b) and True" - assert len(vector) == 4 - assert vector[0].expr == "_lio_1" - assert vector[1].expr == "foo(_lio_2)" - assert vector[2].expr == "_lio_3" - assert vector[3].expr == "foo(_lio_4)" - - -@pytest.mark.codegen_independent -def test_apply_loop_invariant_optimisation_no_optimisation(): - variables = { - "v1": Variable("v1", scalar=False), - "v2": Variable("v2", scalar=False), - "N": Constant("N", 10), - "s1": Variable("s1", scalar=True, dtype=float), - "s2": Variable("s2", scalar=True, dtype=float), - "rand": DEFAULT_FUNCTIONS["rand"], - } - statements = [ - # This should not be simplified to 0! - Statement("v1", "=", "rand() - rand()", "", float), - Statement("v1", "=", "3*rand() - 3*rand()", "", float), - Statement("v1", "=", "3*rand() - ((1+2)*rand())", "", float), - # This should not pull out rand()*N - Statement("v1", "=", "s1*rand()*N", "", float), - Statement("v1", "=", "s2*rand()*N", "", float), - # This is not important mathematically, but it would change the numbers - # that are generated - Statement("v1", "=", "0*rand()*N", "", float), - Statement("v1", "=", "0/rand()*N", "", float), - ] - scalar, vector = optimise_statements([], statements, variables) - for vs in vector[:3]: - assert ( - vs.expr.count("rand()") == 2 - ), f"Expression should still contain two rand() calls, but got {str(vs)}" - for vs in vector[3:]: - assert ( - vs.expr.count("rand()") == 1 - ), f"Expression should still contain a rand() call, but got {str(vs)}" - - -@pytest.mark.codegen_independent -def test_apply_loop_invariant_optimisation_simplification(): - variables = { - "v1": Variable("v1", scalar=False), - "v2": Variable("v2", scalar=False), - "i1": Variable("i1", scalar=False, dtype=int), - "N": Constant("N", 10), - } - statements = [ - # Should be simplified to 0.0 - Statement("v1", "=", "v1 - v1", "", float), - Statement("v1", "=", "N*v1 - N*v1", "", float), - Statement("v1", "=", "v1*N * 0", "", float), - Statement("v1", "=", "v1 * 0", "", float), - Statement("v1", "=", "v1 * 0.0", "", float), - Statement("v1", "=", "0.0 / (v1*N)", "", float), - # Should be simplified to 0 - Statement("i1", "=", "i1*N * 0", "", int), - Statement("i1", "=", "0 * i1", "", int), - Statement("i1", "=", "0 * i1*N", "", int), - Statement("i1", "=", "i1 * 0", "", int), - # Should be simplified to v1*N - Statement("v2", "=", "0 + v1*N", "", float), - Statement("v2", "=", "v1*N + 0.0", "", float), - Statement("v2", "=", "v1*N - 0", "", float), - Statement("v2", "=", "v1*N - 0.0", "", float), - Statement("v2", "=", "1 * v1*N", "", float), - Statement("v2", "=", "1.0 * v1*N", "", float), - Statement("v2", "=", "v1*N / 1.0", "", float), - Statement("v2", "=", "v1*N / 1", "", float), - # Should be simplified to i1 - Statement("i1", "=", "i1*1", "", int), - Statement("i1", "=", "i1//1", "", int), - Statement("i1", "=", "i1+0", "", int), - Statement("i1", "=", "0+i1", "", int), - Statement("i1", "=", "i1-0", "", int), - # Should *not* be simplified (because it would change the type, - # important for integer division, for example) - Statement("v1", "=", "i1*1.0", "", float), - Statement("v1", "=", "1.0*i1", "", float), - Statement("v1", "=", "i1/1.0", "", float), - Statement("v1", "=", "i1/1", "", float), - Statement("v1", "=", "i1+0.0", "", float), - Statement("v1", "=", "0.0+i1", "", float), - Statement("v1", "=", "i1-0.0", "", float), - ## Should *not* be simplified, flooring division by 1 changes the value - Statement("v1", "=", "v2//1.0", "", float), - Statement("i1", "=", "i1//1.0", "", float), # changes type - ] - scalar, vector = optimise_statements([], statements, variables) - assert len(scalar) == 0 - for s in vector[:6]: - assert s.expr == "0.0" - for s in vector[6:10]: - assert s.expr == "0", s.expr # integer - for s in vector[10:18]: - expr = s.expr.replace(" ", "") - assert expr == "v1*N" or expr == "N*v1" - for s in vector[18:23]: - expr = s.expr.replace(" ", "") - assert expr == "i1" - for s in vector[23:27]: - expr = s.expr.replace(" ", "") - assert expr == "1.0*i1" or expr == "i1*1.0" or expr == "i1/1.0" - for s in vector[27:30]: - expr = s.expr.replace(" ", "") - assert expr == "0.0+i1" or expr == "i1+0.0" - for s in vector[30:31]: - expr = s.expr.replace(" ", "") - assert expr == "v2//1.0" or expr == "v2//1" - for s in vector[31:]: - expr = s.expr.replace(" ", "") - assert expr == "i1//1.0" - - -@pytest.mark.codegen_independent -def test_apply_loop_invariant_optimisation_constant_evaluation(): - variables = { - "v1": Variable("v1", scalar=False), - "v2": Variable("v2", scalar=False), - "i1": Variable("i1", scalar=False, dtype=int), - "N": Constant("N", 10), - "s1": Variable("s1", scalar=True, dtype=float), - "s2": Variable("s2", scalar=True, dtype=float), - "exp": DEFAULT_FUNCTIONS["exp"], - } - statements = [ - Statement("v1", "=", "v1 * (1 + 2 + 3)", "", float), - Statement("v1", "=", "exp(N)*v1", "", float), - Statement("v1", "=", "exp(0)*v1", "", float), - ] - scalar, vector = optimise_statements([], statements, variables) - # exp(N) should be pulled out of the vector statements, the rest should be - # evaluated in place - assert len(scalar) == 1 - assert scalar[0].expr == "exp(N)" - assert len(vector) == 3 - expr = vector[0].expr.replace(" ", "") - assert expr == "_lio_1*v1" or "v1*_lio_1" - expr = vector[1].expr.replace(" ", "") - assert expr == "6.0*v1" or "v1*6.0" - assert vector[2].expr == "v1" - - -@pytest.mark.codegen_independent -def test_automatic_augmented_assignments(): - # We test that statements that could be rewritten as augmented assignments - # are correctly rewritten (using sympy to test for symbolic equality) - variables = { - "x": ArrayVariable("x", owner=None, size=10, device=device), - "y": ArrayVariable("y", owner=None, size=10, device=device), - "z": ArrayVariable("y", owner=None, size=10, device=device), - "b": ArrayVariable("b", owner=None, size=10, dtype=bool, device=device), - "clip": DEFAULT_FUNCTIONS["clip"], - "inf": DEFAULT_CONSTANTS["inf"], - } - statements = [ - # examples that should be rewritten - # Note that using our approach, we will never get -= or /= but always - # the equivalent += or *= statements - ("x = x + 1.0", "x += 1.0"), - ("x = 2.0 * x", "x *= 2.0"), - ("x = x - 3.0", "x += -3.0"), - ("x = x/2.0", "x *= 0.5"), - ("x = y + (x + 1.0)", "x += y + 1.0"), - ("x = x + x", "x *= 2"), - ("x = x + y + z", "x += y + z"), - ("x = x + y + z", "x += y + z"), - # examples that should not be rewritten - ("x = 1.0/x", "x = 1.0/x"), - ("x = 1.0", "x = 1.0"), - ("x = 2.0*(x + 1.0)", "x = 2.0*(x + 1.0)"), - ("x = clip(x + y, 0.0, inf)", "x = clip(x + y, 0.0, inf)"), - ("b = b or False", "b = b or False"), - ] - for orig, rewritten in statements: - scalar, vector = make_statements(orig, variables, np.float32) - try: # we augment the assertion error with the original statement - assert ( - len(scalar) == 0 - ), f"Did not expect any scalar statements but got {str(scalar)}" - assert ( - len(vector) == 1 - ), f"Did expect a single statement but got {str(vector)}" - statement = vector[0] - expected_var, expected_op, expected_expr, _ = parse_statement(rewritten) - assert ( - expected_var == statement.var - ), f"expected write to variable {expected_var}, not to {statement.var}" - assert ( - expected_op == statement.op - ), f"expected operation {expected_op}, not {statement.op}" - # Compare the two expressions using sympy to allow for different order etc. - sympy_expected = str_to_sympy(expected_expr) - sympy_actual = str_to_sympy(statement.expr) - assert sympy_expected == sympy_actual, ( - f"RHS expressions '{sympy_to_str(sympy_expected)}' and" - f" '{sympy_to_str(sympy_actual)}' are not identical" - ) - except AssertionError as ex: - raise AssertionError( - f"Transformation for statement '{orig}' gave an unexpected result: {ex}" - ) - - -@pytest.mark.codegen_independent -@pytest.mark.parametrize( - "s", - [ - "x, y = 3", - "x * y", - "x = ", - "x.a = 3", - "x++", - "x[0] = 3", - "dx/dt = -v / tau", - "v == 3*mV", - ], -) -def test_incorrect_statements(s): - with pytest.raises(ValueError): - parse_statement(s) - - -def test_clear_cache(): - target = prefs.codegen.target - if target == "numpy": - assert "numpy" not in _cache_dirs_and_extensions - with pytest.raises(ValueError): - clear_cache("numpy") - else: - assert target in _cache_dirs_and_extensions - cache_dir, _ = _cache_dirs_and_extensions[target] - # Create a file that should not be there - fname = os.path.join(cache_dir, "some_file.py") - open(fname, "w").close() - # clear_cache should refuse to clear the directory - with pytest.raises(IOError): - clear_cache(target) - - os.remove(fname) - - -@pytest.mark.skipif( - platform.system() == "Windows", - reason="CC and CXX variables are ignored on Windows.", -) -def test_compiler_error(): - # In particular on OSX with clang in a conda environment, compilation might fail. - # Switching to a system gcc might help in such cases. Make sure that the error - # message mentions that. - old_CC = os.environ.get("CC", None) - old_CXX = os.environ.get("CXX", None) - os.environ.update({"CC": "non-existing-compiler", "CXX": "non-existing-compiler++"}) - try: - with catch_logs() as l: - assert not CythonCodeObject.is_available() - assert len(l) > 0 # There are additional warnings about compiler flags - last_warning = l[-1] - assert last_warning[1].endswith(".failed_compile_test") - assert "CC" in last_warning[2] and "CXX" in last_warning[2] - - finally: - if old_CC: - os.environ["CC"] = old_CC - else: - del os.environ["CC"] - if old_CXX: - os.environ["CXX"] = old_CXX - else: - del os.environ["CXX"] - - -def test_compiler_c99(): - # On a user's computer, we do not know whether the compiler actually - # has C99 support, so we just check whether the test does not raise an - # error - - # The compiler check previously created spurious '-.o' files (see #1348) - if os.path.exists("-.o"): - os.remove("-.o") - c99_support = compiler_supports_c99() - assert not os.path.exists("-.o") - # On our Azure test server we know that the compilers support C99 - if os.environ.get("AGENT_OS", ""): - assert c99_support - - -def test_cpp_flags_support(): - from distutils.ccompiler import get_default_compiler - - from brian2.codegen.cpp_prefs import _compiler_flag_compatibility - - _compiler_flag_compatibility.clear() # make sure cache is empty - compiler = get_default_compiler() - if compiler == "msvc": - pytest.skip("No flag support check for msvc") - old_prefs = prefs["codegen.cpp.extra_compile_args"] - - # Should always be supported - prefs["codegen.cpp.extra_compile_args"] = ["-w"] - _, compile_args = get_compiler_and_args() - assert compile_args == prefs["codegen.cpp.extra_compile_args"] - - # Should never be supported and raise a warning - prefs["codegen.cpp.extra_compile_args"] = ["-invalidxyz"] - with catch_logs() as l: - _, compile_args = get_compiler_and_args() - assert len(l) == 1 and l[0][0] == "WARNING" - assert compile_args == [] - - prefs["codegen.cpp.extra_compile_args"] = old_prefs - - -@pytest.mark.skipif( - platform.system() != "Windows", reason="MSVC flags are only relevant on Windows" -) -@pytest.mark.skipif( - prefs["codegen.target"] == "numpy", reason="Test only relevant for compiled code" -) -def test_msvc_flags(): - # Very basic test that flags are stored to disk - import brian2.codegen.cpp_prefs as cpp_prefs - - user_dir = os.path.join(os.path.expanduser("~"), ".brian") - flag_file = os.path.join(user_dir, "cpu_flags.txt") - assert len(cpp_prefs.msvc_arch_flag) - assert os.path.exists(flag_file) - with open(flag_file, encoding="utf-8") as f: - previously_stored_flags = json.load(f) - hostname = socket.gethostname() - assert hostname in previously_stored_flags - assert len(previously_stored_flags[hostname]) - - -@pytest.mark.codegen_independent -@pytest.mark.parametrize( - "renderer", - [ - NodeRenderer(), - NumpyNodeRenderer(), - CythonNodeRenderer(), - CPPNodeRenderer(), - ], -) -def test_number_rendering(renderer): - import ast - - for number in [0.5, np.float32(0.5), np.float64(0.5)]: - # In numpy 2.0, repr(np.float64(0.5)) is 'np.float64(0.5)' - node = ast.Constant(value=number) - assert renderer.render_node(node) == "0.5" - - -if __name__ == "__main__": - test_auto_target() - test_analyse_identifiers() - test_get_identifiers_recursively() - test_write_to_subexpression() - test_repeated_subexpressions() - test_nested_subexpressions() - test_apply_loop_invariant_optimisation() - test_apply_loop_invariant_optimisation_integer() - test_apply_loop_invariant_optimisation_boolean() - test_apply_loop_invariant_optimisation_no_optimisation() - test_apply_loop_invariant_optimisation_simplification() - test_apply_loop_invariant_optimisation_constant_evaluation() - test_automatic_augmented_assignments() - test_clear_cache() - test_msvc_flags() diff --git a/brian2/tests/test_codestrings.py b/brian2/tests/test_codestrings.py deleted file mode 100644 index eb84075fa..000000000 --- a/brian2/tests/test_codestrings.py +++ /dev/null @@ -1,109 +0,0 @@ -import numpy as np -import pytest -import sympy -from numpy.testing import assert_equal - -import brian2 -from brian2 import ( - DimensionMismatchError, - Expression, - Hz, - Statements, - get_dimensions, - ms, - mV, - second, - volt, -) -from brian2.core.preferences import prefs -from brian2.utils.logger import catch_logs - - -def sympy_equals(expr1, expr2): - """ - Test that whether two string expressions are equal using sympy, allowing - e.g. for ``sympy_equals("x * x", "x ** 2") == True``. - """ - s_expr1 = sympy.nsimplify(sympy.sympify(expr1).expand()) - s_expr2 = sympy.nsimplify(sympy.sympify(expr2).expand()) - return s_expr1 == s_expr2 - - -@pytest.mark.codegen_independent -def test_expr_creation(): - """ - Test creating expressions. - """ - expr = Expression("v > 5 * mV") - assert expr.code == "v > 5 * mV" - assert ( - "v" in expr.identifiers - and "mV" in expr.identifiers - and not "V" in expr.identifiers - ) - with pytest.raises(SyntaxError): - Expression("v 5 * mV") - - -@pytest.mark.codegen_independent -def test_split_stochastic(): - tau = 5 * ms - expr = Expression("(-v + I) / tau") - # No stochastic part - assert expr.split_stochastic() == (expr, None) - - # No non-stochastic part -- note that it should return 0 and not None - expr = Expression("sigma*xi/tau**.5") - non_stochastic, stochastic = expr.split_stochastic() - assert sympy_equals(non_stochastic.code, 0) - assert "xi" in stochastic - assert len(stochastic) == 1 - assert sympy_equals(stochastic["xi"].code, "sigma/tau**.5") - - expr = Expression("(-v + I) / tau + sigma*xi/tau**.5") - non_stochastic, stochastic = expr.split_stochastic() - assert "xi" in stochastic - assert len(stochastic) == 1 - assert sympy_equals(non_stochastic.code, "(-v + I) / tau") - assert sympy_equals(stochastic["xi"].code, "sigma/tau**.5") - - expr = Expression("(-v + I) / tau + sigma*xi_1/tau**.5 + xi_2*sigma2/sqrt(tau_2)") - non_stochastic, stochastic = expr.split_stochastic() - assert set(stochastic.keys()) == {"xi_1", "xi_2"} - assert sympy_equals(non_stochastic.code, "(-v + I) / tau") - assert sympy_equals(stochastic["xi_1"].code, "sigma/tau**.5") - assert sympy_equals(stochastic["xi_2"].code, "sigma2/tau_2**.5") - - expr = Expression("-v / tau + 1 / xi") - with pytest.raises(ValueError): - expr.split_stochastic() - - -@pytest.mark.codegen_independent -def test_str_repr(): - """ - Test the string representation of expressions and statements. Assumes that - __str__ returns the complete expression/statement string and __repr__ a - string of the form "Expression(...)" or "Statements(...)" that can be - evaluated. - """ - expr_string = "(v - I)/ tau" - expr = Expression(expr_string) - - # use sympy to check for equivalence of expressions (terms may have be - # re-arranged by sympy) - assert sympy_equals(expr_string, str(expr)) - assert sympy_equals(expr_string, eval(repr(expr)).code) - - # Use exact string equivalence for statements - statement_string = "v += w" - statement = Statements(statement_string) - - assert str(statement) == "v += w" - assert repr(statement) == "Statements('v += w')" - - -if __name__ == "__main__": - test_expr_creation() - test_split_stochastic() - test_str_repr() diff --git a/brian2/tests/test_complex_examples.py b/brian2/tests/test_complex_examples.py deleted file mode 100644 index 9b918461f..000000000 --- a/brian2/tests/test_complex_examples.py +++ /dev/null @@ -1,43 +0,0 @@ -import pytest - -from brian2 import * -from brian2.devices.device import reinit_and_delete - - -@pytest.mark.standalone_compatible -def test_cuba(): - taum = 20 * ms - taue = 5 * ms - taui = 10 * ms - Vt = -50 * mV - Vr = -60 * mV - El = -49 * mV - - eqs = """ - dv/dt = (ge+gi-(v-El))/taum : volt (unless refractory) - dge/dt = -ge/taue : volt - dgi/dt = -gi/taui : volt - """ - - P = NeuronGroup(4000, eqs, threshold="v>Vt", reset="v = Vr", refractory=5 * ms) - P.v = "Vr + rand() * (Vt - Vr)" - P.ge = 0 * mV - P.gi = 0 * mV - - we = (60 * 0.27 / 10) * mV # excitatory synaptic weight (voltage) - wi = (-20 * 4.5 / 10) * mV # inhibitory synaptic weight - Ce = Synapses(P, P, on_pre="ge += we") - Ci = Synapses(P, P, on_pre="gi += wi") - Ce.connect("i<3200", p=0.02) - Ci.connect("i>=3200", p=0.02) - - s_mon = SpikeMonitor(P) - - run(10 * ms) - - assert len(Ce) > 0 - assert len(Ci) > 0 - - -if __name__ == "__main__": - test_cuba() diff --git a/brian2/tests/test_devices.py b/brian2/tests/test_devices.py deleted file mode 100644 index 38f68e20b..000000000 --- a/brian2/tests/test_devices.py +++ /dev/null @@ -1,120 +0,0 @@ -import numpy as np -import pytest -from numpy.testing import assert_equal - -from brian2.core.magic import run -from brian2.devices.device import ( - Device, - RuntimeDevice, - all_devices, - get_device, - reset_device, - runtime_device, - set_device, -) -from brian2.groups.neurongroup import NeuronGroup -from brian2.units import ms - - -class ATestDevice(Device): - def activate(self, build_on_run, **kwargs): - super().activate(build_on_run, **kwargs) - self.build_on_run = build_on_run - self._options = kwargs - - # These functions are needed during the setup of the defaultclock - def get_value(self, var): - return np.array([0.0001]) - - def add_array(self, var): - pass - - def init_with_zeros(self, var, dtype): - pass - - def fill_with_array(self, var, arr): - pass - - -@pytest.mark.codegen_independent -def test_set_reset_device_implicit(): - from brian2.devices import device_module - - old_prev_devices = list(device_module.previous_devices) - device_module.previous_devices = [] - test_device1 = ATestDevice() - all_devices["test1"] = test_device1 - test_device2 = ATestDevice() - all_devices["test2"] = test_device2 - - set_device("test1", build_on_run=False, my_opt=1) - set_device("test2", build_on_run=True, my_opt=2) - assert get_device() is test_device2 - assert get_device()._options["my_opt"] == 2 - assert get_device().build_on_run - - reset_device() - assert get_device() is test_device1 - assert get_device()._options["my_opt"] == 1 - assert not get_device().build_on_run - - reset_device() - assert get_device() is runtime_device - - reset_device() # If there is no previous device, will reset to runtime device - assert get_device() is runtime_device - del all_devices["test1"] - del all_devices["test2"] - device_module.previous_devices = old_prev_devices - - -@pytest.mark.codegen_independent -def test_set_reset_device_explicit(): - original_device = get_device() - test_device1 = ATestDevice() - all_devices["test1"] = test_device1 - test_device2 = ATestDevice() - all_devices["test2"] = test_device2 - test_device3 = ATestDevice() - all_devices["test3"] = test_device3 - - set_device("test1", build_on_run=False, my_opt=1) - set_device("test2", build_on_run=True, my_opt=2) - set_device("test3", build_on_run=False, my_opt=3) - - reset_device("test1") # Directly jump back to the first device - assert get_device() is test_device1 - assert get_device()._options["my_opt"] == 1 - assert not get_device().build_on_run - - del all_devices["test1"] - del all_devices["test2"] - del all_devices["test3"] - reset_device(original_device) - - -@pytest.mark.skipif( - not isinstance(get_device(), RuntimeDevice), - reason="Getting/setting random number state only supported for runtime device.", -) -def test_get_set_random_generator_state(): - group = NeuronGroup(10, "dv/dt = -v/(10*ms) + (10*ms)**-0.5*xi : 1", method="euler") - group.v = "rand()" - run(10 * ms) - assert np.var(group.v) > 0 # very basic test for randomness ;) - old_v = np.array(group.v) - random_state = get_device().get_random_state() - group.v = "rand()" - run(10 * ms) - assert np.var(group.v - old_v) > 0 # just checking for *some* difference - old_v = np.array(group.v) - get_device().set_random_state(random_state) - group.v = "rand()" - run(10 * ms) - assert_equal(group.v, old_v) - - -if __name__ == "__main__": - test_set_reset_device_implicit() - test_set_reset_device_explicit() - test_get_set_random_generator_state() diff --git a/brian2/tests/test_equations.py b/brian2/tests/test_equations.py deleted file mode 100644 index ed985c465..000000000 --- a/brian2/tests/test_equations.py +++ /dev/null @@ -1,678 +0,0 @@ -import sys - -import numpy as np - -try: - from IPython.lib.pretty import pprint -except ImportError: - pprint = None -import pytest - -from brian2 import Equations, Expression, Hz, Unit, farad, metre, ms, mV, second, volt -from brian2.core.namespace import DEFAULT_UNITS -from brian2.equations.equations import ( - BOOLEAN, - DIFFERENTIAL_EQUATION, - FLOAT, - INTEGER, - PARAMETER, - SUBEXPRESSION, - EquationError, - SingleEquation, - check_identifier_basic, - check_identifier_constants, - check_identifier_functions, - check_identifier_reserved, - check_identifier_units, - dimensions_and_type_from_string, - extract_constant_subexpressions, - parse_string_equations, -) -from brian2.equations.refractory import check_identifier_refractory -from brian2.groups.group import Group -from brian2.units.fundamentalunits import ( - DIMENSIONLESS, - DimensionMismatchError, - get_dimensions, -) - - -# a simple Group for testing -class SimpleGroup(Group): - def __init__(self, variables, namespace=None): - self.variables = variables - self.namespace = namespace - - -@pytest.mark.codegen_independent -def test_utility_functions(): - unit_namespace = DEFAULT_UNITS - - # Some simple tests whether the namespace returned by - # get_default_namespace() makes sense - assert "volt" in unit_namespace - assert "ms" in unit_namespace - assert unit_namespace["ms"] is ms - assert unit_namespace["ms"] is unit_namespace["msecond"] - for unit in unit_namespace.values(): - assert isinstance(unit, Unit) - - assert dimensions_and_type_from_string("second") == (second.dim, FLOAT) - assert dimensions_and_type_from_string("1") == (DIMENSIONLESS, FLOAT) - assert dimensions_and_type_from_string("volt") == (volt.dim, FLOAT) - assert dimensions_and_type_from_string("second ** -1") == (Hz.dim, FLOAT) - assert dimensions_and_type_from_string("farad / metre**2") == ( - (farad / metre**2).dim, - FLOAT, - ) - assert dimensions_and_type_from_string("boolean") == (DIMENSIONLESS, BOOLEAN) - assert dimensions_and_type_from_string("integer") == (DIMENSIONLESS, INTEGER) - with pytest.raises(ValueError): - dimensions_and_type_from_string("metr / second") - with pytest.raises(ValueError): - dimensions_and_type_from_string("metre **") - with pytest.raises(ValueError): - dimensions_and_type_from_string("5") - with pytest.raises(ValueError): - dimensions_and_type_from_string("2 / second") - # Only the use of base units is allowed - with pytest.raises(ValueError): - dimensions_and_type_from_string("farad / cm**2") - - -@pytest.mark.codegen_independent -def test_identifier_checks(): - legal_identifiers = ["v", "Vm", "V", "x", "ge", "g_i", "a2", "gaba_123"] - illegal_identifiers = ["_v", "1v", "ü", "ge!", "v.x", "for", "else", "if"] - - for identifier in legal_identifiers: - try: - check_identifier_basic(identifier) - check_identifier_reserved(identifier) - except ValueError as ex: - raise AssertionError( - f'check complained about identifier "{identifier}": {ex}' - ) - - for identifier in illegal_identifiers: - with pytest.raises(SyntaxError): - check_identifier_basic(identifier) - - for identifier in ("t", "dt", "xi", "i", "N"): - with pytest.raises(SyntaxError): - check_identifier_reserved(identifier) - - for identifier in ("not_refractory", "refractory", "refractory_until"): - with pytest.raises(SyntaxError): - check_identifier_refractory(identifier) - - for identifier in ("exp", "sin", "sqrt"): - with pytest.raises(SyntaxError): - check_identifier_functions(identifier) - - for identifier in ("e", "pi", "inf"): - with pytest.raises(SyntaxError): - check_identifier_constants(identifier) - - for identifier in ("volt", "second", "mV", "nA"): - with pytest.raises(SyntaxError): - check_identifier_units(identifier) - - # Check identifier registry - assert check_identifier_basic in Equations.identifier_checks - assert check_identifier_reserved in Equations.identifier_checks - assert check_identifier_refractory in Equations.identifier_checks - assert check_identifier_functions in Equations.identifier_checks - assert check_identifier_constants in Equations.identifier_checks - assert check_identifier_units in Equations.identifier_checks - - # Set up a dummy identifier check that disallows the variable name - # gaba_123 (that is otherwise valid) - def disallow_gaba_123(identifier): - if identifier == "gaba_123": - raise SyntaxError("I do not like this name") - - Equations.check_identifier("gaba_123") - old_checks = set(Equations.identifier_checks) - Equations.register_identifier_check(disallow_gaba_123) - with pytest.raises(SyntaxError): - Equations.check_identifier("gaba_123") - Equations.identifier_checks = old_checks - - # registering a non-function should not work - with pytest.raises(ValueError): - Equations.register_identifier_check("no function") - - -@pytest.mark.codegen_independent -def test_parse_equations(): - """Test the parsing of equation strings""" - # A simple equation - eqs = parse_string_equations("dv/dt = -v / tau : 1") - assert len(eqs) == 1 and "v" in eqs and eqs["v"].type == DIFFERENTIAL_EQUATION - assert eqs["v"].dim is DIMENSIONLESS - - # A complex one - eqs = parse_string_equations( - """ - dv/dt = -(v + - ge + # excitatory conductance - I # external current - )/ tau : volt - dge/dt = -ge / tau_ge : volt - I = sin(2 * pi * f * t) : volt - f : Hz (constant) - b : boolean - n : integer - """ - ) - assert len(eqs) == 6 - assert "v" in eqs and eqs["v"].type == DIFFERENTIAL_EQUATION - assert "ge" in eqs and eqs["ge"].type == DIFFERENTIAL_EQUATION - assert "I" in eqs and eqs["I"].type == SUBEXPRESSION - assert "f" in eqs and eqs["f"].type == PARAMETER - assert "b" in eqs and eqs["b"].type == PARAMETER - assert "n" in eqs and eqs["n"].type == PARAMETER - assert eqs["f"].var_type == FLOAT - assert eqs["b"].var_type == BOOLEAN - assert eqs["n"].var_type == INTEGER - assert eqs["v"].dim is volt.dim - assert eqs["ge"].dim is volt.dim - assert eqs["I"].dim is volt.dim - assert eqs["f"].dim is Hz.dim - assert eqs["v"].flags == [] - assert eqs["ge"].flags == [] - assert eqs["I"].flags == [] - assert eqs["f"].flags == ["constant"] - - duplicate_eqs = """ - dv/dt = -v / tau : 1 - v = 2 * t : 1 - """ - with pytest.raises(EquationError): - parse_string_equations(duplicate_eqs) - parse_error_eqs = [ - """ - dv/d = -v / tau : 1 - x = 2 * t : 1 - """, - """ - dv/dt = -v / tau : 1 : volt - x = 2 * t : 1 - """, - "dv/dt = -v / tau : 2 * volt", - "dv/dt = v / second : boolean", - ] - for error_eqs in parse_error_eqs: - with pytest.raises((ValueError, EquationError, TypeError)): - parse_string_equations(error_eqs) - - -@pytest.mark.codegen_independent -def test_correct_replacements(): - """Test replacing variables via keyword arguments""" - # replace a variable name with a new name - eqs = Equations("dv/dt = -v / tau : 1", v="V") - # Correct left hand side - assert ("V" in eqs) and not ("v" in eqs) - # Correct right hand side - assert ("V" in eqs["V"].identifiers) and not ("v" in eqs["V"].identifiers) - - # replace a variable name with a value - eqs = Equations("dv/dt = -v / tau : 1", tau=10 * ms) - assert not "tau" in eqs["v"].identifiers - - -@pytest.mark.codegen_independent -def test_wrong_replacements(): - """Tests for replacements that should not work""" - # Replacing a variable name with an illegal new name - with pytest.raises(SyntaxError): - Equations("dv/dt = -v / tau : 1", v="illegal name") - with pytest.raises(SyntaxError): - Equations("dv/dt = -v / tau : 1", v="_reserved") - with pytest.raises(SyntaxError): - Equations("dv/dt = -v / tau : 1", v="t") - - # Replacing a variable name with a value that already exists - with pytest.raises(EquationError): - Equations( - """ - dv/dt = -v / tau : 1 - dx/dt = -x / tau : 1 - """, - v="x", - ) - - # Replacing a model variable name with a value - with pytest.raises(ValueError): - Equations("dv/dt = -v / tau : 1", v=3 * mV) - - # Replacing with an illegal value - with pytest.raises(SyntaxError): - Equations("dv/dt = -v/tau : 1", tau=np.arange(5)) - - -@pytest.mark.codegen_independent -def test_substitute(): - # Check that Equations.substitute returns an independent copy - eqs = Equations("dx/dt = x : 1") - eqs2 = eqs.substitute(x="y") - - # First equation should be unaffected - assert len(eqs) == 1 and "x" in eqs - assert eqs["x"].expr == Expression("x") - - # Second equation should have x substituted by y - assert len(eqs2) == 1 and "y" in eqs2 - assert eqs2["y"].expr == Expression("y") - - -@pytest.mark.codegen_independent -def test_construction_errors(): - """ - Test that the Equations constructor raises errors correctly - """ - # parse error - with pytest.raises(EquationError): - Equations("dv/dt = -v / tau volt") - with pytest.raises(EquationError): - Equations("dv/dt = -v / tau : volt second") - - # incorrect unit definition - with pytest.raises(EquationError): - Equations("dv/dt = -v / tau : mvolt") - with pytest.raises(EquationError): - Equations("dv/dt = -v / tau : voltage") - with pytest.raises(EquationError): - Equations("dv/dt = -v / tau : 1.0*volt") - - # Only a single string or a list of SingleEquation objects is allowed - with pytest.raises(TypeError): - Equations(None) - with pytest.raises(TypeError): - Equations(42) - with pytest.raises(TypeError): - Equations(["dv/dt = -v / tau : volt"]) - - # duplicate variable names - with pytest.raises(EquationError): - Equations( - """ - dv/dt = -v / tau : volt - v = 2 * t/second * volt : volt - """ - ) - - eqs = [ - SingleEquation( - DIFFERENTIAL_EQUATION, "v", volt.dim, expr=Expression("-v / tau") - ), - SingleEquation( - SUBEXPRESSION, "v", volt.dim, expr=Expression("2 * t/second * volt") - ), - ] - with pytest.raises(EquationError): - Equations(eqs) - - # illegal variable names - with pytest.raises(SyntaxError): - Equations("ddt/dt = -dt / tau : volt") - with pytest.raises(SyntaxError): - Equations("dt/dt = -t / tau : volt") - with pytest.raises(SyntaxError): - Equations("dxi/dt = -xi / tau : volt") - with pytest.raises(SyntaxError): - Equations("for : volt") - with pytest.raises((EquationError, SyntaxError)): - Equations("d1a/dt = -1a / tau : volt") - with pytest.raises(SyntaxError): - Equations("d_x/dt = -_x / tau : volt") - - # xi in a subexpression - with pytest.raises(EquationError): - Equations( - """ - dv/dt = -(v + I) / (5 * ms) : volt - I = second**-1*xi**-2*volt : volt - """ - ) - - # more than one xi - with pytest.raises(EquationError): - Equations( - """ - dv/dt = -v / tau + xi/tau**.5 : volt - dx/dt = -x / tau + 2*xi/tau : volt - tau : second - """ - ) - # using not-allowed flags - eqs = Equations("dv/dt = -v / (5 * ms) : volt (flag)") - eqs.check_flags({DIFFERENTIAL_EQUATION: ["flag"]}) # allow this flag - with pytest.raises(ValueError): - eqs.check_flags({DIFFERENTIAL_EQUATION: []}) - with pytest.raises(ValueError): - eqs.check_flags({}) - with pytest.raises(ValueError): - eqs.check_flags({SUBEXPRESSION: ["flag"]}) - with pytest.raises(ValueError): - eqs.check_flags({DIFFERENTIAL_EQUATION: ["otherflag"]}) - eqs = Equations("dv/dt = -v / (5 * ms) : volt (flag1, flag2)") - eqs.check_flags({DIFFERENTIAL_EQUATION: ["flag1", "flag2"]}) # allow both flags - # Don't allow the two flags in combination - with pytest.raises(ValueError): - eqs.check_flags( - {DIFFERENTIAL_EQUATION: ["flag1", "flag2"]}, - incompatible_flags=[("flag1", "flag2")], - ) - eqs = Equations( - """ - dv/dt = -v / (5 * ms) : volt (flag1) - dw/dt = -w / (5 * ms) : volt (flag2) - """ - ) - # They should be allowed when used independently - eqs.check_flags( - {DIFFERENTIAL_EQUATION: ["flag1", "flag2"]}, - incompatible_flags=[("flag1", "flag2")], - ) - - # Circular subexpression - with pytest.raises(ValueError): - Equations( - """ - dv/dt = -(v + w) / (10 * ms) : 1 - w = 2 * x : 1 - x = 3 * w : 1 - """ - ) - - # Boolean/integer differential equations - with pytest.raises(TypeError): - Equations("dv/dt = -v / (10*ms) : boolean") - with pytest.raises(TypeError): - Equations("dv/dt = -v / (10*ms) : integer") - - -@pytest.mark.codegen_independent -def test_unit_checking(): - # dummy Variable class - class S: - def __init__(self, dimensions): - self.dim = get_dimensions(dimensions) - - # inconsistent unit for a differential equation - eqs = Equations("dv/dt = -v : volt") - group = SimpleGroup({"v": S(volt)}) - with pytest.raises(DimensionMismatchError): - eqs.check_units(group, {}) - - eqs = Equations("dv/dt = -v / tau: volt") - group = SimpleGroup(namespace={"tau": 5 * mV}, variables={"v": S(volt)}) - with pytest.raises(DimensionMismatchError): - eqs.check_units(group, {}) - group = SimpleGroup(namespace={"I": 3 * second}, variables={"v": S(volt)}) - eqs = Equations("dv/dt = -(v + I) / (5 * ms): volt") - with pytest.raises(DimensionMismatchError): - eqs.check_units(group, {}) - - eqs = Equations( - """ - dv/dt = -(v + I) / (5 * ms): volt - I : second - """ - ) - group = SimpleGroup(variables={"v": S(volt), "I": S(second)}, namespace={}) - with pytest.raises(DimensionMismatchError): - eqs.check_units(group, {}) - - # inconsistent unit for a subexpression - eqs = Equations( - """ - dv/dt = -v / (5 * ms) : volt - I = 2 * v : amp - """ - ) - group = SimpleGroup(variables={"v": S(volt), "I": S(second)}, namespace={}) - with pytest.raises(DimensionMismatchError): - eqs.check_units(group, {}) - - -@pytest.mark.codegen_independent -def test_properties(): - """ - Test accessing the various properties of equation objects - """ - tau = 10 * ms - eqs = Equations( - """ - dv/dt = -(v + I)/ tau : volt - I = sin(2 * 22/7. * f * t)* volt : volt - f = freq * Hz: Hz - freq : 1 - """ - ) - assert ( - len(eqs.diff_eq_expressions) == 1 - and eqs.diff_eq_expressions[0][0] == "v" - and isinstance(eqs.diff_eq_expressions[0][1], Expression) - ) - assert eqs.diff_eq_names == {"v"} - assert ( - len(eqs.eq_expressions) == 3 - and {name for name, _ in eqs.eq_expressions} == {"v", "I", "f"} - and all((isinstance(expr, Expression) for _, expr in eqs.eq_expressions)) - ) - assert len(eqs.eq_names) == 3 and eqs.eq_names == {"v", "I", "f"} - assert set(eqs.keys()) == {"v", "I", "f", "freq"} - # test that the equations object is iterable itself - assert all(isinstance(eq, SingleEquation) for eq in eqs.values()) - assert all(isinstance(eq, str) for eq in eqs) - assert ( - len(eqs.ordered) == 4 - and all(isinstance(eq, SingleEquation) for eq in eqs.ordered) - and [eq.varname for eq in eqs.ordered] == ["f", "I", "v", "freq"] - ) - assert [eq.unit for eq in eqs.ordered] == [Hz, volt, volt, 1] - assert eqs.names == {"v", "I", "f", "freq"} - assert eqs.parameter_names == {"freq"} - assert eqs.subexpr_names == {"I", "f"} - dimensions = eqs.dimensions - assert set(dimensions.keys()) == {"v", "I", "f", "freq"} - assert dimensions["v"] is volt.dim - assert dimensions["I"] is volt.dim - assert dimensions["f"] is Hz.dim - assert dimensions["freq"] is DIMENSIONLESS - assert eqs.names == set(eqs.dimensions.keys()) - assert eqs.identifiers == {"tau", "volt", "Hz", "sin", "t"} - - # stochastic equations - assert len(eqs.stochastic_variables) == 0 - assert eqs.stochastic_type is None - - eqs = Equations("""dv/dt = -v / tau + 0.1*second**-.5*xi : 1""") - assert eqs.stochastic_variables == {"xi"} - assert eqs.stochastic_type == "additive" - - eqs = Equations( - "dv/dt = -v / tau + 0.1*second**-.5*xi_1 + 0.1*second**-.5*xi_2: 1" - ) - assert eqs.stochastic_variables == {"xi_1", "xi_2"} - assert eqs.stochastic_type == "additive" - - eqs = Equations("dv/dt = -v / tau + 0.1*second**-1.5*xi*t : 1") - assert eqs.stochastic_type == "multiplicative" - - eqs = Equations("dv/dt = -v / tau + 0.1*second**-1.5*xi*v : 1") - assert eqs.stochastic_type == "multiplicative" - - -@pytest.mark.codegen_independent -def test_concatenation(): - eqs1 = Equations( - """ - dv/dt = -(v + I) / tau : volt - I = sin(2*pi*freq*t) : volt - freq : Hz - """ - ) - - # Concatenate two equation objects - eqs2 = Equations("dv/dt = -(v + I) / tau : volt") + Equations( - """ - I = sin(2*pi*freq*t) : volt - freq : Hz - """ - ) - - # Concatenate using "in-place" addition (which is not actually in-place) - eqs3 = Equations("dv/dt = -(v + I) / tau : volt") - eqs3 += Equations( - """ - I = sin(2*pi*freq*t) : volt - freq : Hz - """ - ) - - # Concatenate with a string (will be parsed first) - eqs4 = Equations("dv/dt = -(v + I) / tau : volt") - eqs4 += """I = sin(2*pi*freq*t) : volt - freq : Hz""" - - # Concatenating with something that is not a string should not work - with pytest.raises(TypeError): - eqs4 + 5 - - # The string representation is canonical, therefore it should be identical - # in all cases - assert str(eqs1) == str(eqs2) - assert str(eqs2) == str(eqs3) - assert str(eqs3) == str(eqs4) - - -@pytest.mark.codegen_independent -def test_extract_subexpressions(): - eqs = Equations( - """ - dv/dt = -v / (10*ms) : 1 - s1 = 2*v : 1 - s2 = -v : 1 (constant over dt) - """ - ) - variable, constant = extract_constant_subexpressions(eqs) - assert [var in variable for var in ["v", "s1", "s2"]] - assert variable["s1"].type == SUBEXPRESSION - assert variable["s2"].type == PARAMETER - assert constant["s2"].type == SUBEXPRESSION - - -@pytest.mark.codegen_independent -def test_repeated_construction(): - eqs1 = Equations("dx/dt = x : 1") - eqs2 = Equations("dx/dt = x : 1", x="y") - assert len(eqs1) == 1 - assert "x" in eqs1 - assert eqs1["x"].expr == Expression("x") - assert len(eqs2) == 1 - assert "y" in eqs2 - assert eqs2["y"].expr == Expression("y") - - -@pytest.mark.codegen_independent -def test_str_repr(): - """ - Test the string representation (only that it does not throw errors). - """ - tau = 10 * ms - eqs = Equations( - """ - dv/dt = -(v + I)/ tau : volt (unless refractory) - I = sin(2 * 22/7. * f * t)* volt : volt - f : Hz - """ - ) - assert len(str(eqs)) > 0 - assert len(repr(eqs)) > 0 - - # Test str and repr of SingleEquations explicitly (might already have been - # called by Equations - for eq in eqs.values(): - assert (len(str(eq))) > 0 - assert (len(repr(eq))) > 0 - - -@pytest.mark.codegen_independent -def test_dependency_calculation(): - eqs = Equations( - """ - dv/dt = I_m / C_m : volt - I_m = I_ext + I_pas : amp - I_ext = 1*nA + sin(2*pi*100*Hz*t)*nA : amp - I_pas = g_L*(E_L - v) : amp - """ - ) - deps = eqs.dependencies - assert set(deps.keys()) == {"v", "I_m", "I_ext", "I_pas"} - - # v depends directly on I_m, on I_ext and I_pas via I_m, and on v via I_m -> I_pas - assert len(deps["v"]) == 4 - assert {d.equation.varname for d in deps["v"]} == {"I_m", "I_ext", "I_pas", "v"} - expected_via = { - "I_m": (), - "I_pas": ("I_m",), - "I_ext": ("I_m",), - "v": ("I_m", "I_pas"), - } - assert all([d.via == expected_via[d.equation.varname] for d in deps["v"]]) - - # I_m depends directly on I_ext and I_pas, and on v via I_pas - assert len(deps["I_m"]) == 3 - assert {d.equation.varname for d in deps["I_m"]} == {"I_ext", "I_pas", "v"} - expected_via = {"I_ext": (), "I_pas": (), "v": ("I_pas",)} - assert all([d.via == expected_via[d.equation.varname] for d in deps["I_m"]]) - - # I_ext does not depend on anything - assert len(deps["I_ext"]) == 0 - - # I_pas depends on v directly - assert len(deps["I_pas"]) == 1 - assert deps["I_pas"][0].equation.varname == "v" - assert deps["I_pas"][0].via == () - - -@pytest.mark.codegen_independent -@pytest.mark.skipif(pprint is None, reason="ipython is not installed") -def test_ipython_pprint(): - from io import StringIO - - eqs = Equations( - """ - dv/dt = -(v + I)/ tau : volt (unless refractory) - I = sin(2 * 22/7. * f * t)* volt : volt - f : Hz - """ - ) - # Test ipython's pretty printing - old_stdout = sys.stdout - string_output = StringIO() - sys.stdout = string_output - pprint(eqs) - assert len(string_output.getvalue()) > 0 - sys.stdout = old_stdout - - -if __name__ == "__main__": - test_utility_functions() - test_identifier_checks() - test_parse_equations() - test_correct_replacements() - test_substitute() - test_wrong_replacements() - test_construction_errors() - test_concatenation() - test_unit_checking() - test_properties() - test_extract_subexpressions() - test_repeated_construction() - test_str_repr() diff --git a/brian2/tests/test_functions.py b/brian2/tests/test_functions.py deleted file mode 100644 index b5e364f73..000000000 --- a/brian2/tests/test_functions.py +++ /dev/null @@ -1,1200 +0,0 @@ -import os -import shutil -import tempfile - -import pytest -from numpy.testing import assert_equal - -from brian2 import * -from brian2.codegen.codeobject import CodeObject -from brian2.codegen.cpp_prefs import compiler_supports_c99 -from brian2.codegen.generators import CodeGenerator -from brian2.core.functions import timestep -from brian2.devices import RuntimeDevice -from brian2.parsing.sympytools import str_to_sympy, sympy_to_str -from brian2.tests.utils import assert_allclose, exc_isinstance -from brian2.utils.logger import catch_logs - - -@pytest.mark.codegen_independent -def test_constants_sympy(): - """ - Make sure that symbolic constants are understood correctly by sympy - """ - assert sympy_to_str(str_to_sympy("1.0/inf")) == "0" - assert sympy_to_str(str_to_sympy("sin(pi)")) == "0" - assert sympy_to_str(str_to_sympy("log(e)")) == "1" - - -@pytest.mark.standalone_compatible -def test_constants_values(): - """ - Make sure that symbolic constants use the correct values in code - """ - G = NeuronGroup(3, "v : 1") - G.v[0] = "pi" - G.v[1] = "e" - G.v[2] = "inf" - run(0 * ms) - assert_allclose(G.v[:], [np.pi, np.e, np.inf]) - - -def int_(x): - return array(x, dtype=int) - - -int_.__name__ = "int" - - -@pytest.mark.parametrize( - "func,needs_c99_support", - [ - (cos, False), - (tan, False), - (sinh, False), - (cosh, False), - (tanh, False), - (arcsin, False), - (arccos, False), - (arctan, False), - (log, False), - (log10, False), - (log1p, True), - (exp, False), - (np.sqrt, False), - (expm1, True), - (exprel, True), - (np.ceil, False), - (np.floor, False), - (np.sign, False), - (int_, False), - ], -) -@pytest.mark.standalone_compatible -def test_math_functions(func, needs_c99_support): - """ - Test that math functions give the same result, regardless of whether used - directly or in generated Python or C++ code. - """ - if not get_device() == RuntimeDevice or prefs.codegen.target != "numpy": - if needs_c99_support and not compiler_supports_c99(): - pytest.skip('Support for function "{}" needs a compiler with C99 support.') - test_array = np.array([-1, -0.5, 0, 0.5, 1]) - - with catch_logs() as _: # Let's suppress warnings about illegal values - # Calculate the result directly - numpy_result = func(test_array) - - # Calculate the result in a somewhat complicated way by using a - # subexpression in a NeuronGroup - if func.__name__ == "absolute": - # we want to use the name abs instead of absolute - func_name = "abs" - else: - func_name = func.__name__ - G = NeuronGroup( - len(test_array), - f"""func = {func_name}(variable) : 1 - variable : 1""", - ) - G.variable = test_array - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - net.run(defaultclock.dt) - - assert_allclose( - numpy_result, - mon.func_.flatten(), - err_msg=f"Function {func.__name__} did not return the correct values", - ) - - -@pytest.mark.standalone_compatible -@pytest.mark.parametrize("func,operator", [(np.power, "**"), (np.mod, "%")]) -def test_math_operators(func, operator): - default_dt = defaultclock.dt - test_array = np.array([-1, -0.5, 0, 0.5, 1]) - # Functions/operators - scalar = 3 - - # Calculate the result directly - numpy_result = func(test_array, scalar) - - # Calculate the result in a somewhat complicated way by using a - # subexpression in a NeuronGroup - G = NeuronGroup( - len(test_array), - f"""func = variable {operator} scalar : 1 - variable : 1""", - ) - G.variable = test_array - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - net.run(defaultclock.dt) - - assert_allclose( - numpy_result, - mon.func_.flatten(), - err_msg=f"Function {func.__name__} did not return the correct values", - ) - - -@pytest.mark.standalone_compatible -def test_clip(): - G = NeuronGroup( - 4, - """ - clipexpr1 = clip(integer_var1, 0, 1) : integer - clipexpr2 = clip(integer_var2, -0.5, 1.5) : integer - clipexpr3 = clip(float_var1, 0, 1) : 1 - clipexpr4 = clip(float_var2, -0.5, 1.5) : 1 - integer_var1 : integer - integer_var2 : integer - float_var1 : 1 - float_var2 : 1 - """, - ) - G.integer_var1 = [0, 1, -1, 2] - G.integer_var2 = [0, 1, -1, 2] - G.float_var1 = [0.0, 1.0, -1.0, 2.0] - G.float_var2 = [0.0, 1.0, -1.0, 2.0] - s_mon = StateMonitor( - G, ["clipexpr1", "clipexpr2", "clipexpr3", "clipexpr4"], record=True - ) - run(defaultclock.dt) - assert_equal(s_mon.clipexpr1.flatten(), [0, 1, 0, 1]) - assert_equal(s_mon.clipexpr2.flatten(), [0, 1, 0, 1]) - assert_allclose(s_mon.clipexpr3.flatten(), [0, 1, 0, 1]) - assert_allclose(s_mon.clipexpr4.flatten(), [0, 1, -0.5, 1.5]) - - -@pytest.mark.standalone_compatible -def test_bool_to_int(): - # Test that boolean expressions and variables are correctly converted into - # integers - G = NeuronGroup( - 2, - """ - intexpr1 = int(bool_var) : integer - intexpr2 = int(float_var > 1.0) : integer - bool_var : boolean - float_var : 1 - """, - ) - G.bool_var = [True, False] - G.float_var = [2.0, 0.5] - s_mon = StateMonitor(G, ["intexpr1", "intexpr2"], record=True) - run(defaultclock.dt) - assert_equal(s_mon.intexpr1.flatten(), [1, 0]) - assert_equal(s_mon.intexpr2.flatten(), [1, 0]) - - -@pytest.mark.standalone_compatible -def test_integer_power(): - # See github issue #1500 - G = NeuronGroup( - 3, - """ - intval1 : integer - intval2 : integer - k : integer (constant) - """, - ) - G.k = [0, 1, 2] - G.run_regularly("intval1 = 2**k; intval2 = (-1)**k") - run(defaultclock.dt) - assert_equal(G.intval1[:], [1, 2, 4]) - assert_equal(G.intval2[:], [1, -1, 1]) - - -@pytest.mark.codegen_independent -def test_timestep_function(): - dt = defaultclock.dt_ - # Check that multiples of dt end up in the correct time step - t = np.arange(100000) * dt - assert_equal(timestep(t, dt), np.arange(100000)) - - # Scalar values should stay scalar - ts = timestep(0.0005, 0.0001) - assert np.isscalar(ts) and ts == 5 - - # Length-1 arrays should stay arrays - ts = timestep(np.array([0.0005]), 0.0001) - assert ts.shape == (1,) and ts == 5 - - -@pytest.mark.standalone_compatible -def test_timestep_function_during_run(): - group = NeuronGroup( - 2, - """ref_t : second - ts = timestep(ref_t, dt) + timestep(t, dt) : integer""", - ) - group.ref_t = [-1e4 * second, 5 * defaultclock.dt] - mon = StateMonitor(group, "ts", record=True) - run(5 * defaultclock.dt) - assert all(mon.ts[0] <= -1e4) - assert_equal(mon.ts[1], [5, 6, 7, 8, 9]) - - -@pytest.mark.standalone_compatible -def test_user_defined_function(): - @implementation( - "cpp", - """ - inline double usersin(double x) - { - return sin(x); - } - """, - ) - @implementation( - "cython", - """ - cdef double usersin(double x): - return sin(x) - """, - ) - @check_units(x=1, result=1) - def usersin(x): - return np.sin(x) - - default_dt = defaultclock.dt - test_array = np.array([0, 1, 2, 3]) - G = NeuronGroup( - len(test_array), - """ - func = usersin(variable) : 1 - variable : 1 - """, - ) - G.variable = test_array - mon = StateMonitor(G, "func", record=True) - run(default_dt) - assert_allclose(np.sin(test_array), mon.func_.flatten()) - - -def test_user_defined_function_units(): - """ - Test the preparation of functions for use in code with check_units. - """ - prefs.codegen.target = "numpy" - if prefs.codegen.target != "numpy": - pytest.skip("numpy-only test") - - def nothing_specified(x, y, z): - return x * (y + z) - - no_result_unit = check_units(x=1, y=second, z=second)(nothing_specified) - all_specified = check_units(x=1, y=second, z=second, result=second)( - nothing_specified - ) - consistent_units = check_units(x=None, y=None, z="y", result=lambda x, y, z: x * y)( - nothing_specified - ) - - G = NeuronGroup( - 1, - """ - a : 1 - b : second - c : second - """, - namespace={ - "nothing_specified": nothing_specified, - "no_result_unit": no_result_unit, - "all_specified": all_specified, - "consistent_units": consistent_units, - }, - ) - net = Network(G) - net.run(0 * ms) # make sure we have a clock and therefore a t - G.c = "all_specified(a, b, t)" - G.c = "consistent_units(a, b, t)" - with pytest.raises(ValueError): - setattr(G, "c", "no_result_unit(a, b, t)") - with pytest.raises(KeyError): - setattr(G, "c", "nothing_specified(a, b, t)") - with pytest.raises(DimensionMismatchError): - setattr(G, "a", "all_specified(a, b, t)") - with pytest.raises(DimensionMismatchError): - setattr(G, "a", "all_specified(b, a, t)") - with pytest.raises(DimensionMismatchError): - setattr(G, "a", "consistent_units(a, b, t)") - with pytest.raises(DimensionMismatchError): - setattr(G, "a", "consistent_units(b, a, t)") - - -def test_simple_user_defined_function(): - # Make sure that it's possible to use a Python function directly, without - # additional wrapping - @check_units(x=1, result=1) - def usersin(x): - return np.sin(x) - - usersin.stateless = True - - default_dt = defaultclock.dt - test_array = np.array([0, 1, 2, 3]) - G = NeuronGroup( - len(test_array), - """func = usersin(variable) : 1 - variable : 1""", - codeobj_class=NumpyCodeObject, - ) - G.variable = test_array - mon = StateMonitor(G, "func", record=True, codeobj_class=NumpyCodeObject) - net = Network(G, mon) - net.run(default_dt) - - assert_allclose(np.sin(test_array), mon.func_.flatten()) - - -def test_manual_user_defined_function(): - if prefs.codegen.target != "numpy": - pytest.skip("numpy-only test") - - default_dt = defaultclock.dt - - # User defined function without any decorators - def foo(x, y): - return x + y + 3 * volt - - orig_foo = foo - # Since the function is not annotated with check units, we need to specify - # both the units of the arguments and the return unit - with pytest.raises(ValueError): - Function(foo, return_unit=volt) - with pytest.raises(ValueError): - Function(foo, arg_units=[volt, volt]) - # If the function uses the string syntax for "same units", it needs to - # specify the names of the arguments - with pytest.raises(TypeError): - Function(foo, arg_units=[volt, "x"]) - with pytest.raises(TypeError): - Function(foo, arg_units=[volt, "x"], arg_names=["x"]) # Needs two entries - - foo = Function(foo, arg_units=[volt, volt], return_unit=volt) - - assert foo(1 * volt, 2 * volt) == 6 * volt - - # a can be any unit, b and c need to be the same unit - def bar(a, b, c): - return a * (b + c) - - bar = Function( - bar, - arg_units=[None, None, "b"], - arg_names=["a", "b", "c"], - return_unit=lambda a, b, c: a * b, - ) - assert bar(2, 3 * volt, 5 * volt) == 16 * volt - assert bar(2 * amp, 3 * volt, 5 * volt) == 16 * watt - assert bar(2 * volt, 3 * amp, 5 * amp) == 16 * watt - - with pytest.raises(DimensionMismatchError): - bar(2, 3 * volt, 5 * amp) - - # Incorrect argument units - group = NeuronGroup( - 1, - """ - dv/dt = foo(x, y)/ms : volt - x : 1 - y : 1 - """, - ) - net = Network(group) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms, namespace={"foo": foo}) - assert exc_isinstance(exc, DimensionMismatchError) - - # Incorrect output unit - group = NeuronGroup( - 1, - """ - dv/dt = foo(x, y)/ms : 1 - x : volt - y : volt - """, - ) - net = Network(group) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms, namespace={"foo": foo}) - assert exc_isinstance(exc, DimensionMismatchError) - - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt - """, - ) - G.x = 1 * volt - G.y = 2 * volt - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - net.run(default_dt) - - assert mon[0].func == [6] * volt - - # discard units - foo.implementations.add_numpy_implementation(orig_foo, discard_units=True) - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt - """, - ) - G.x = 1 * volt - G.y = 2 * volt - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - net.run(default_dt) - - assert mon[0].func == [6] * volt - - -@pytest.mark.cpp_standalone -@pytest.mark.standalone_only -def test_manual_user_defined_function_cpp_standalone_compiler_args(): - set_device("cpp_standalone", directory=None) - - @implementation( - "cpp", - """ - static inline double foo(const double x, const double y) - { - return x + y + _THREE; - } - """, # just check whether we can specify the supported compiler args, - # only the define macro is actually used - headers=[], - sources=[], - libraries=[], - include_dirs=[], - library_dirs=[], - runtime_library_dirs=[], - define_macros=[("_THREE", "3")], - ) - @check_units(x=volt, y=volt, result=volt) - def foo(x, y): - return x + y + 3 * volt - - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt - """, - ) - G.x = 1 * volt - G.y = 2 * volt - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - net.run(defaultclock.dt) - assert mon[0].func == [6] * volt - - -@pytest.mark.cpp_standalone -@pytest.mark.standalone_only -def test_manual_user_defined_function_cpp_standalone_wrong_compiler_args1(): - set_device("cpp_standalone", directory=None) - - @implementation( - "cpp", - """ - static inline double foo(const double x, const double y) - { - return x + y + _THREE; - } - """, - some_arg=[], - ) # non-existing argument - @check_units(x=volt, y=volt, result=volt) - def foo(x, y): - return x + y + 3 * volt - - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt - """, - ) - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - with pytest.raises(BrianObjectException) as exc: - net.run(defaultclock.dt, namespace={"foo": foo}) - assert exc_isinstance(exc, ValueError) - - -@pytest.mark.cpp_standalone -@pytest.mark.standalone_only -def test_manual_user_defined_function_cpp_standalone_wrong_compiler_args2(): - set_device("cpp_standalone", directory=None) - - @implementation( - "cpp", - """ - static inline double foo(const double x, const double y) - { - return x + y + _THREE; - } - """, - headers="", - ) # existing argument, wrong value type - @check_units(x=volt, y=volt, result=volt) - def foo(x, y): - return x + y + 3 * volt - - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt - """, - ) - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - with pytest.raises(BrianObjectException) as exc: - net.run(defaultclock.dt, namespace={"foo": foo}) - assert exc_isinstance(exc, TypeError) - - -def test_manual_user_defined_function_cython_compiler_args(): - if prefs.codegen.target != "cython": - pytest.skip("Cython-only test") - - @implementation( - "cython", - """ - cdef double foo(double x, const double y): - return x + y + 3 - """, # just check whether we can specify the supported compiler args, - libraries=[], - include_dirs=[], - library_dirs=[], - runtime_library_dirs=[], - ) - @check_units(x=volt, y=volt, result=volt) - def foo(x, y): - return x + y + 3 * volt - - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt""", - ) - G.x = 1 * volt - G.y = 2 * volt - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - net.run(defaultclock.dt) - assert mon[0].func == [6] * volt - - -def test_manual_user_defined_function_cython_wrong_compiler_args1(): - if prefs.codegen.target != "cython": - pytest.skip("Cython-only test") - - @implementation( - "cython", - """ - cdef double foo(double x, const double y): - return x + y + 3 - """, - some_arg=[], - ) # non-existing argument - @check_units(x=volt, y=volt, result=volt) - def foo(x, y): - return x + y + 3 * volt - - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt - """, - ) - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - with pytest.raises(BrianObjectException) as exc: - net.run(defaultclock.dt, namespace={"foo": foo}) - assert exc_isinstance(exc, ValueError) - - -def test_manual_user_defined_function_cython_wrong_compiler_args2(): - if prefs.codegen.target != "cython": - pytest.skip("Cython-only test") - - @implementation( - "cython", - """ - cdef double foo(double x, const double y): - return x + y + 3 - """, - libraries="cstdio", - ) # existing argument, wrong value type - @check_units(x=volt, y=volt, result=volt) - def foo(x, y): - return x + y + 3 * volt - - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt - """, - ) - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - with pytest.raises(BrianObjectException) as exc: - net.run(defaultclock.dt, namespace={"foo": foo}) - assert exc_isinstance(exc, TypeError) - - -def test_external_function_cython(): - if prefs.codegen.target != "cython": - pytest.skip("Cython-only test") - - this_dir = os.path.abspath(os.path.dirname(__file__)) - - @implementation( - "cython", - "from func_def_cython cimport foo", - sources=[os.path.join(this_dir, "func_def_cython.pyx")], - ) - @check_units(x=volt, y=volt, result=volt) - def foo(x, y): - return x + y + 3 * volt - - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt - """, - ) - G.x = 1 * volt - G.y = 2 * volt - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - net.run(defaultclock.dt) - assert mon[0].func == [6] * volt - - -@pytest.mark.cpp_standalone -@pytest.mark.standalone_only -def test_external_function_cpp_standalone(): - set_device("cpp_standalone", directory=None) - this_dir = os.path.abspath(os.path.dirname(__file__)) - with tempfile.TemporaryDirectory(prefix="brian_testsuite_") as tmpdir: - # copy the test function to the temporary directory - # this avoids issues with the file being in a directory that is not writable - shutil.copy(os.path.join(this_dir, "func_def_cpp.h"), tmpdir) - shutil.copy(os.path.join(this_dir, "func_def_cpp.cpp"), tmpdir) - - @implementation( - "cpp", - "//all code in func_def_cpp.cpp", - headers=['"func_def_cpp.h"'], - include_dirs=[tmpdir], - sources=[os.path.join(tmpdir, "func_def_cpp.cpp")], - ) - @check_units(x=volt, y=volt, result=volt) - def foo(x, y): - return x + y + 3 * volt - - G = NeuronGroup( - 1, - """ - func = foo(x, y) : volt - x : volt - y : volt - """, - ) - G.x = 1 * volt - G.y = 2 * volt - mon = StateMonitor(G, "func", record=True) - net = Network(G, mon) - net.run(defaultclock.dt) - assert mon[0].func == [6] * volt - - -@pytest.mark.codegen_independent -def test_user_defined_function_discarding_units(): - # A function with units that should discard units also inside the function - @implementation("numpy", discard_units=True) - @check_units(v=volt, result=volt) - def foo(v): - return v + 3 * volt # this normally raises an error for unitless v - - assert foo(5 * volt) == 8 * volt - - # Test the function that is used during a run - assert foo.implementations[NumpyCodeObject].get_code(None)(5) == 8 - - -@pytest.mark.codegen_independent -def test_user_defined_function_discarding_units_2(): - # Add a numpy implementation explicitly (as in TimedArray) - unit = volt - - @check_units(v=volt, result=unit) - def foo(v): - return v + 3 * unit # this normally raises an error for unitless v - - foo = Function(pyfunc=foo) - - def unitless_foo(v): - return v + 3 - - foo.implementations.add_implementation("numpy", code=unitless_foo) - - assert foo(5 * volt) == 8 * volt - - # Test the function that is used during a run - assert foo.implementations[NumpyCodeObject].get_code(None)(5) == 8 - - -@pytest.mark.codegen_independent -def test_function_implementation_container(): - import brian2.codegen.targets as targets - - class ACodeGenerator(CodeGenerator): - class_name = "A Language" - - class BCodeGenerator(CodeGenerator): - class_name = "B Language" - - class ACodeObject(CodeObject): - generator_class = ACodeGenerator - class_name = "A" - - class A2CodeObject(CodeObject): - generator_class = ACodeGenerator - class_name = "A2" - - class BCodeObject(CodeObject): - generator_class = BCodeGenerator - class_name = "B" - - # Register the code generation targets - _previous_codegen_targets = set(targets.codegen_targets) - targets.codegen_targets = {ACodeObject, BCodeObject} - - @check_units(x=volt, result=volt) - def foo(x): - return x - - f = Function(foo) - - container = f.implementations - - # inserting into the container with a CodeGenerator class - container.add_implementation(BCodeGenerator, code="implementation B language") - assert container[BCodeGenerator].get_code(None) == "implementation B language" - - # inserting into the container with a CodeObject class - container.add_implementation(ACodeObject, code="implementation A CodeObject") - assert container[ACodeObject].get_code(None) == "implementation A CodeObject" - - # inserting into the container with a name of a CodeGenerator - container.add_implementation("A Language", "implementation A Language") - assert container["A Language"].get_code(None) == "implementation A Language" - assert container[ACodeGenerator].get_code(None) == "implementation A Language" - assert container[A2CodeObject].get_code(None) == "implementation A Language" - - # inserting into the container with a name of a CodeObject - container.add_implementation("B", "implementation B CodeObject") - assert container["B"].get_code(None) == "implementation B CodeObject" - assert container[BCodeObject].get_code(None) == "implementation B CodeObject" - - with pytest.raises(KeyError): - container["unknown"] - - # some basic dictionary properties - assert len(container) == 4 - assert {key for key in container} == { - "A Language", - "B", - ACodeObject, - BCodeGenerator, - } - - # Restore the previous codegeneration targets - targets.codegen_targets = _previous_codegen_targets - - -def test_function_dependencies_cython(): - if prefs.codegen.target != "cython": - pytest.skip("cython-only test") - - @implementation( - "cython", - """ - cdef float foo(float x): - return 42*0.001 - """, - ) - @check_units(x=volt, result=volt) - def foo(x): - return 42 * mV - - # Second function with an independent implementation for numpy and an - # implementation for C++ that makes use of the previous function. - - @implementation( - "cython", - """ - cdef float bar(float x): - return 2*foo(x) - """, - dependencies={"foo": foo}, - ) - @check_units(x=volt, result=volt) - def bar(x): - return 84 * mV - - G = NeuronGroup(5, "v : volt") - G.run_regularly("v = bar(v)") - net = Network(G) - net.run(defaultclock.dt) - - assert_allclose(G.v_[:], 84 * 0.001) - - -def test_function_dependencies_cython_rename(): - if prefs.codegen.target != "cython": - pytest.skip("cython-only test") - - @implementation( - "cython", - """ - cdef float _foo(float x): - return 42*0.001 - """, - name="_foo", - ) - @check_units(x=volt, result=volt) - def foo(x): - return 42 * mV - - # Second function with an independent implementation for numpy and an - # implementation for C++ that makes use of the previous function. - - @implementation( - "cython", - """ - cdef float bar(float x): - return 2*my_foo(x) - """, - dependencies={"my_foo": foo}, - ) - @check_units(x=volt, result=volt) - def bar(x): - return 84 * mV - - G = NeuronGroup(5, "v : volt") - G.run_regularly("v = bar(v)") - net = Network(G) - net.run(defaultclock.dt) - - assert_allclose(G.v_[:], 84 * 0.001) - - -def test_function_dependencies_numpy(): - if prefs.codegen.target != "numpy": - pytest.skip("numpy-only test") - - @implementation( - "cpp", - """ - float foo(float x) - { - return 42*0.001; - }""", - ) - @check_units(x=volt, result=volt) - def foo(x): - return 42 * mV - - # Second function with an independent implementation for C++ and an - # implementation for numpy that makes use of the previous function. - - # Note that we don't need to use the explicit dependencies mechanism for - # numpy, since the Python function stores a reference to the referenced - # function directly - - @implementation( - "cpp", - """ - float bar(float x) - { - return 84*0.001; - }""", - ) - @check_units(x=volt, result=volt) - def bar(x): - return 2 * foo(x) - - G = NeuronGroup(5, "v : volt") - G.run_regularly("v = bar(v)") - net = Network(G) - net.run(defaultclock.dt) - - assert_allclose(G.v_[:], 84 * 0.001) - - -@pytest.mark.standalone_compatible -def test_repeated_function_dependencies(): - # each of the binomial functions adds randn as a depency, see #988 - test_neuron = NeuronGroup( - 1, - "x : 1", - namespace={ - "bino_1": BinomialFunction(10, 0.5), - "bino_2": BinomialFunction(10, 0.6), - }, - ) - test_neuron.x = "bino_1()+bino_2()" - - run(0 * ms) - - -@pytest.mark.standalone_compatible -def test_binomial(): - binomial_f_approximated = BinomialFunction(100, 0.1, approximate=True) - binomial_f = BinomialFunction(100, 0.1, approximate=False) - - # Just check that it does not raise an error and that it produces some - # values - G = NeuronGroup( - 1, - """ - x : 1 - y : 1 - """, - ) - G.run_regularly( - """ - x = binomial_f_approximated() - y = binomial_f() - """ - ) - mon = StateMonitor(G, ["x", "y"], record=0) - run(1 * ms) - assert np.var(mon[0].x) > 0 - assert np.var(mon[0].y) > 0 - - -@pytest.mark.standalone_compatible -def test_poisson(): - # Just check that it does not raise an error and that it produces some - # values - G = NeuronGroup( - 5, - """ - l : 1 - x : integer - y : integer - z : integer - """, - ) - G.l = [0, 1, 5, 15, 25] - G.run_regularly( - """ - x = poisson(l) - y = poisson(5) - z = poisson(0) - """ - ) - mon = StateMonitor(G, ["x", "y", "z"], record=True) - run(100 * defaultclock.dt) - assert_equal(mon.x[0], 0) - assert all(np.var(mon.x[1:], axis=1) > 0) - assert all(np.var(mon.y, axis=1) > 0) - assert_equal(mon.z, 0) - - -def test_declare_types(): - if prefs.codegen.target != "numpy": - pytest.skip("numpy-only test") - - @declare_types(a="integer", b="float", result="highest") - def f(a, b): - return a * b - - assert f._arg_types == ["integer", "float"] - assert f._return_type == "highest" - - @declare_types(b="float") - def f(a, b, c): - return a * b * c - - assert f._arg_types == ["any", "float", "any"] - assert f._return_type == "float" - - def bad_argtype(): - @declare_types(b="floating") - def f(a, b, c): - return a * b * c - - with pytest.raises(ValueError): - bad_argtype() - - def bad_argname(): - @declare_types(d="floating") - def f(a, b, c): - return a * b * c - - with pytest.raises(ValueError): - bad_argname() - - @check_units(a=volt, b=1) - @declare_types(a="float", b="integer") - def f(a, b): - return a * b - - @declare_types(a="float", b="integer") - @check_units(a=volt, b=1) - def f(a, b): - return a * b - - def bad_units(): - @declare_types(a="integer", b="float") - @check_units(a=volt, b=1, result=volt) - def f(a, b): - return a * b - - eqs = """ - dv/dt = f(v, 1)/second : 1 - """ - G = NeuronGroup(1, eqs) - Network(G).run(1 * ms) - - with pytest.raises(BrianObjectException) as exc: - bad_units() - assert exc_isinstance(exc, TypeError) - - def bad_type(): - @implementation("numpy", discard_units=True) - @declare_types(a="float", result="float") - @check_units(a=1, result=1) - def f(a): - return a - - eqs = """ - a : integer - dv/dt = f(a)*v/second : 1 - """ - G = NeuronGroup(1, eqs) - Network(G).run(1 * ms) - - with pytest.raises(BrianObjectException) as exc: - bad_type() - assert exc_isinstance(exc, TypeError) - - -def test_multiple_stateless_function_calls(): - # Check that expressions such as rand() + rand() (which might be incorrectly - # simplified to 2*rand()) raise an error - G = NeuronGroup(1, "dv/dt = (rand() - rand())/second : 1") - net = Network(G) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, NotImplementedError) - G2 = NeuronGroup(1, "v:1", threshold="v>1", reset="v=rand() - rand()") - net2 = Network(G2) - with pytest.raises(BrianObjectException) as exc: - net2.run(0 * ms) - assert exc_isinstance(exc, NotImplementedError) - G3 = NeuronGroup(1, "v:1") - G3.run_regularly("v = rand() - rand()") - net3 = Network(G3) - with pytest.raises(BrianObjectException) as exc: - net3.run(0 * ms) - assert exc_isinstance(exc, NotImplementedError) - G4 = NeuronGroup(1, "x : 1") - # Verify that synaptic equations are checked as well, see #1146 - S = Synapses(G4, G4, "dy/dt = (rand() - rand())/second : 1 (clock-driven)") - S.connect() - net = Network(G4, S) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, NotImplementedError) - - -@pytest.mark.codegen_independent -def test_parse_dimension_errors(): - from brian2.parsing.expressions import parse_expression_dimensions - - @check_units(x=1, result=1) - def foo(x): - return x - - # Function call with keyword arguments - with pytest.raises(ValueError): - parse_expression_dimensions("foo(a=1, b=2)", {"foo": foo}) - # Unknown function - with pytest.raises(SyntaxError): - parse_expression_dimensions("bar(1, 2)", {"foo": foo}) - # Function without unit definition - with pytest.raises(ValueError): - parse_expression_dimensions("bar(1, 2)", {"bar": lambda x, y: x + y}) - # Function with wrong number of arguments - with pytest.raises(SyntaxError): - parse_expression_dimensions("foo(1, 2)", {"foo": foo}) - - -if __name__ == "__main__": - # prefs.codegen.target = 'numpy' - import time - - from _pytest.outcomes import Skipped - - from brian2 import prefs - - for f in [ - test_constants_sympy, - test_constants_values, - test_math_functions, - test_clip, - test_bool_to_int, - test_timestep_function, - test_timestep_function_during_run, - test_user_defined_function, - test_user_defined_function_units, - test_simple_user_defined_function, - test_manual_user_defined_function, - test_external_function_cython, - test_user_defined_function_discarding_units, - test_user_defined_function_discarding_units_2, - test_function_implementation_container, - test_function_dependencies_numpy, - test_function_dependencies_cython, - test_function_dependencies_cython_rename, - test_repeated_function_dependencies, - test_binomial, - test_poisson, - test_declare_types, - test_multiple_stateless_function_calls, - ]: - try: - start = time.time() - f() - print("Test", f.__name__, "took", time.time() - start) - except Skipped as e: - print("Skipping test", f.__name__, e) diff --git a/brian2/tests/test_logger.py b/brian2/tests/test_logger.py deleted file mode 100644 index d1b8bff30..000000000 --- a/brian2/tests/test_logger.py +++ /dev/null @@ -1,173 +0,0 @@ -import multiprocessing -import os - -import pytest - -from brian2.core.preferences import prefs -from brian2.utils.logger import BrianLogger, catch_logs, get_logger - -logger = get_logger("brian2.tests.test_logger") - - -@pytest.mark.codegen_independent -def test_file_logging(): - BrianLogger.initialize() - logger.error("error message xxx") - logger.warn("warning message xxx") - logger.info("info message xxx") - logger.debug("debug message xxx") - logger.diagnostic("diagnostic message xxx") - BrianLogger.file_handler.flush() - # By default, only >= debug messages should show up - assert os.path.isfile(BrianLogger.tmp_log) - with open(BrianLogger.tmp_log, encoding="utf-8") as f: - log_content = f.readlines() - for level, line in zip(["error", "warning", "info", "debug"], log_content[-4:]): - assert "brian2.tests.test_logger" in line - assert f"{level} message xxx" in line - assert level.upper() in line - - -@pytest.mark.codegen_independent -def test_file_logging_special_characters(): - BrianLogger.initialize() - # Test logging with special characters that could occur in log messages and - # require UTF-8 - special_chars = "→ ≠ ≤ ≥ ← ∞ µ ∝ ∂ ∅" - logger.debug(special_chars) - BrianLogger.file_handler.flush() - assert os.path.isfile(BrianLogger.tmp_log) - with open(BrianLogger.tmp_log, encoding="utf-8") as f: - log_content = f.readlines() - last_line = log_content[-1] - assert "brian2.tests.test_logger" in last_line - assert special_chars in last_line - - -def run_in_process(x): - logger.info(f"subprocess info message {x}") - - -def run_in_process_with_logger(x): - prefs.logging.delete_log_on_exit = False - BrianLogger.initialize() - logger.info(f"subprocess info message {x}") - BrianLogger.file_handler.flush() - return BrianLogger.tmp_log - - -@pytest.mark.codegen_independent -def test_file_logging_multiprocessing(): - logger.info("info message before multiprocessing") - p = multiprocessing.Pool() - - try: - p.map(run_in_process, range(3)) - finally: - p.close() - p.join() - - BrianLogger.file_handler.flush() - assert os.path.isfile(BrianLogger.tmp_log) - with open(BrianLogger.tmp_log, encoding="utf-8") as f: - log_content = f.readlines() - # The subprocesses should not have written to the log file - assert "info message before multiprocessing" in log_content[-1] - - -@pytest.mark.codegen_independent -def test_file_logging_multiprocessing_with_loggers(): - logger.info("info message before multiprocessing") - - p = multiprocessing.Pool() - try: - log_files = p.map(run_in_process_with_logger, range(3)) - finally: - p.close() - p.join() - - BrianLogger.file_handler.flush() - assert os.path.isfile(BrianLogger.tmp_log) - with open(BrianLogger.tmp_log, encoding="utf-8") as f: - log_content = f.readlines() - # The subprocesses should not have written to the main log file - assert "info message before multiprocessing" in log_content[-1] - - # Each subprocess should have their own log file - for x, log_file in enumerate(log_files): - assert os.path.isfile(log_file) - with open(log_file, encoding="utf-8") as f: - log_content = f.readlines() - assert f"subprocess info message {x}" in log_content[-1] - - prefs.logging.delete_log_on_exit = True - - -@pytest.mark.codegen_independent -def test_submodule_logging(): - submodule_logger = get_logger("submodule.dummy") - BrianLogger.initialize() - submodule_logger.error("error message xxx") - submodule_logger.warn("warning message xxx") - submodule_logger.info("info message xxx") - submodule_logger.debug("debug message xxx") - submodule_logger.diagnostic("diagnostic message xxx") - BrianLogger.file_handler.flush() - # By default, only >= debug messages should show up - assert os.path.isfile(BrianLogger.tmp_log) - with open(BrianLogger.tmp_log, encoding="utf-8") as f: - log_content = f.readlines() - for level, line in zip(["error", "warning", "info", "debug"], log_content[-4:]): - assert "submodule.dummy" in line - # The logger name has brian2 internally prefixed, but this shouldn't show up in logs - assert not "brian2.submodule.dummy" in line - assert f"{level} message xxx" in line - assert level.upper() in line - - with catch_logs() as l: - logger.warn("warning message from Brian") - submodule_logger.warn("warning message from submodule") - # only the warning from Brian should be logged - assert len(l) == 1 - assert "warning message from Brian" in l[0] - - with catch_logs(only_from=("submodule",)) as l: - logger.warn("warning message from Brian") - submodule_logger.warn("warning message from submodule") - # only the warning from submodule should be logged - assert len(l) == 1 - assert "warning message from submodule" in l[0] - - # Make sure that a submodule with a name starting with "brian2" gets handled correctly - submodule_logger = get_logger("brian2submodule.dummy") - BrianLogger.initialize() - submodule_logger.error("error message xxx") - submodule_logger.warn("warning message xxx") - submodule_logger.info("info message xxx") - submodule_logger.debug("debug message xxx") - submodule_logger.diagnostic("diagnostic message xxx") - BrianLogger.file_handler.flush() - # By default, only >= debug messages should show up - assert os.path.isfile(BrianLogger.tmp_log) - with open(BrianLogger.tmp_log, encoding="utf-8") as f: - log_content = f.readlines() - for level, line in zip(["error", "warning", "info", "debug"], log_content[-4:]): - assert "submodule.dummy" in line - # The logger name has brian2 internally prefixed, but this shouldn't show up in logs - assert not "brian2.submodule.dummy" in line - assert f"{level} message xxx" in line - assert level.upper() in line - - with catch_logs() as l: - logger.warn("warning message from Brian") - submodule_logger.warn("warning message from submodule") - # only the warning from Brian should be logged - assert len(l) == 1 - assert "warning message from Brian" in l[0] - - -if __name__ == "__main__": - test_file_logging() - test_file_logging_special_characters() - test_file_logging_multiprocessing() - test_file_logging_multiprocessing_with_loggers() diff --git a/brian2/tests/test_memory.py b/brian2/tests/test_memory.py deleted file mode 100644 index 1c15abc1a..000000000 --- a/brian2/tests/test_memory.py +++ /dev/null @@ -1,140 +0,0 @@ -import numpy as np -import pytest -from numpy.testing import assert_equal - -from brian2.memory.dynamicarray import DynamicArray, DynamicArray1D - - -@pytest.mark.codegen_independent -def test_dynamic_array_1d_access(): - da = DynamicArray1D(10) - da[:] = np.arange(10) - assert da[7] == 7 - assert len(da) == 10 - assert da.shape == (10,) - assert len(str(da)) - assert len(repr(da)) - da[:] += 1 - da.data[:] += 1 - assert all(da[:] == (np.arange(10) + 2)) - - -@pytest.mark.codegen_independent -def test_dynamic_array_1d_resize_up_down(): - for numpy_resize in [True, False]: - da = DynamicArray1D(10, use_numpy_resize=numpy_resize, refcheck=False) - da[:] = np.arange(10) - da.resize(15) - assert len(da) == 15 - assert da.shape == (15,) - assert all(da[10:] == 0) - assert all(da[:10] == np.arange(10)) - da.resize(5) - assert len(da) == 5 - assert da.shape == (5,) - assert all(da[:] == np.arange(5)) - - -@pytest.mark.codegen_independent -def test_dynamic_array_1d_resize_down_up(): - for numpy_resize in [True, False]: - da = DynamicArray1D(10, use_numpy_resize=numpy_resize) - da[:] = np.arange(10) - da.resize(5) - assert len(da) == 5 - assert da.shape == (5,) - assert all(da[:5] == np.arange(5)) - da.resize(10) - assert len(da) == 10 - assert da.shape == (10,) - assert all(da[:5] == np.arange(5)) - assert all(da[5:] == 0) - - -@pytest.mark.codegen_independent -def test_dynamic_array_1d_shrink(): - for numpy_resize in [True, False]: - da = DynamicArray1D(10, use_numpy_resize=numpy_resize, refcheck=False) - da[:] = np.arange(10) - da.shrink(5) - assert len(da) == 5 - assert all(da[:] == np.arange(5)) - # After using shrink, the underlying array should have changed - assert len(da._data) == 5 - - -@pytest.mark.codegen_independent -def test_dynamic_array_2d_access(): - da = DynamicArray1D((10, 20)) - da[:, :] = np.arange(200).reshape((10, 20)) - assert da[5, 10] == 5 * 20 + 10 - assert da.shape == (10, 20) - assert len(str(da)) - assert len(repr(da)) - da[:] += 1 - da.data[:] += 1 - assert_equal(da[:, :], np.arange(200).reshape((10, 20)) + 2) - - -@pytest.mark.codegen_independent -def test_dynamic_array_2d_resize_up_down(): - for numpy_resize in [True, False]: - da = DynamicArray((10, 20), use_numpy_resize=numpy_resize, refcheck=False) - da[:, :] = np.arange(200).reshape((10, 20)) - da.resize((15, 20)) - assert da.shape == (15, 20) - assert_equal(da[10:, :], np.zeros((5, 20))) - assert_equal(da[:10, :], np.arange(200).reshape((10, 20))) - da.resize((15, 25)) - assert da.shape == (15, 25) - assert_equal(da[:10, 20:], np.zeros((10, 5))) - assert_equal(da[:10, :20], np.arange(200).reshape((10, 20))) - - da.resize((10, 20)) - assert da.shape == (10, 20) - assert_equal(da[:, :], np.arange(200).reshape((10, 20))) - - -@pytest.mark.codegen_independent -def test_dynamic_array_2d_resize_down_up(): - for numpy_resize in [True, False]: - da = DynamicArray((10, 20), use_numpy_resize=numpy_resize, refcheck=False) - da[:, :] = np.arange(200).reshape((10, 20)) - da.resize((5, 20)) - assert da.shape == (5, 20) - assert_equal(da, np.arange(100).reshape((5, 20))) - da.resize((5, 15)) - assert da.shape == (5, 15) - for row_idx, row in enumerate(da): - assert_equal(row, 20 * row_idx + np.arange(15)) - - da.resize((10, 20)) - assert da.shape == (10, 20) - for row_idx, row in enumerate(da[:5, :15]): - assert_equal(row, 20 * row_idx + np.arange(15)) - assert_equal(da[5:, 15:], 0) - - -@pytest.mark.codegen_independent -def test_dynamic_array_2d_shrink(): - for numpy_resize in [True, False]: - da = DynamicArray((10, 20), use_numpy_resize=numpy_resize, refcheck=False) - da[:, :] = np.arange(200).reshape((10, 20)) - da.shrink((5, 15)) - assert da.shape == (5, 15) - # After using shrink, the underlying array should have changed - assert da._data.shape == (5, 15) - assert_equal( - da[:, :], np.arange(15).reshape((1, 15)) + 20 * np.arange(5).reshape((5, 1)) - ) - - -if __name__ == "__main__": - test_dynamic_array_1d_access() - test_dynamic_array_1d_resize_up_down() - test_dynamic_array_1d_resize_down_up() - test_dynamic_array_1d_shrink() - test_dynamic_array_2d_access() - test_dynamic_array_2d_resize_up_down() - test_dynamic_array_2d_resize_down_up() - test_dynamic_array_2d_shrink() diff --git a/brian2/tests/test_monitor.py b/brian2/tests/test_monitor.py deleted file mode 100644 index efb2259b4..000000000 --- a/brian2/tests/test_monitor.py +++ /dev/null @@ -1,727 +0,0 @@ -import logging -import tempfile -import uuid - -import pytest -from numpy.testing import assert_array_equal - -from brian2 import * -from brian2.devices.cpp_standalone.device import CPPStandaloneDevice -from brian2.tests.utils import assert_allclose -from brian2.utils.logger import catch_logs - - -@pytest.mark.standalone_compatible -def test_spike_monitor(): - G_without_threshold = NeuronGroup(5, "x : 1") - G = NeuronGroup( - 3, - """ - dv/dt = rate : 1 - rate: Hz - """, - threshold="v>1", - reset="v=0", - ) - # We don't use 100 and 1000Hz, because then the membrane potential would - # be exactly at 1 after 10 resp. 100 timesteps. Due to floating point - # issues this will not be exact, - G.rate = [101, 0, 1001] * Hz - - mon = SpikeMonitor(G) - - with pytest.raises(ValueError): - SpikeMonitor(G, order=1) # need to specify 'when' as well - with pytest.raises(ValueError) as ex: - SpikeMonitor(G_without_threshold) - assert "threshold" in str(ex) - - # Creating a SpikeMonitor for a Synapses object should not work - S = Synapses(G, G, on_pre="v += 0") - S.connect() - with pytest.raises(TypeError): - SpikeMonitor(S) - - run(10 * ms) - - spike_trains = mon.spike_trains() - - assert_allclose(mon.t[mon.i == 0], [9.9] * ms) - assert len(mon.t[mon.i == 1]) == 0 - assert_allclose(mon.t[mon.i == 2], np.arange(10) * ms + 0.9 * ms) - assert_allclose(mon.t_[mon.i == 0], np.array([9.9 * float(ms)])) - assert len(mon.t_[mon.i == 1]) == 0 - assert_allclose(mon.t_[mon.i == 2], (np.arange(10) + 0.9) * float(ms)) - assert_allclose(spike_trains[0], [9.9] * ms) - assert len(spike_trains[1]) == 0 - assert_allclose(spike_trains[2], np.arange(10) * ms + 0.9 * ms) - assert_array_equal(mon.count, np.array([1, 0, 10])) - - i, t = mon.it - i_, t_ = mon.it_ - assert_array_equal(i, mon.i) - assert_array_equal(i, i_) - assert_array_equal(t, mon.t) - assert_array_equal(t_, mon.t_) - - with pytest.raises(KeyError): - spike_trains[3] - with pytest.raises(KeyError): - spike_trains[-1] - with pytest.raises(KeyError): - spike_trains["string"] - - # Check that indexing into the VariableView works (this fails if we do not - # update the N variable correctly) - assert_allclose(mon.t[:5], [0.9, 1.9, 2.9, 3.9, 4.9] * ms) - - -def test_spike_monitor_indexing(): - generator = SpikeGeneratorGroup(3, [0, 0, 0, 1], [0, 1, 2, 1] * ms) - mon = SpikeMonitor(generator) - run(3 * ms) - - assert_array_equal(mon.t["i == 1"], [1] * ms) - assert_array_equal(mon.t[mon.i == 1], [1] * ms) - assert_array_equal(mon.i[mon.t > 1.5 * ms], [0] * ms) - assert_array_equal(mon.i["t > 1.5 * ms"], [0] * ms) - - -@pytest.mark.standalone_compatible -def test_spike_monitor_variables(): - G = NeuronGroup( - 3, - """ - dv/dt = rate : 1 - rate : Hz - prev_spikes : integer - """, - threshold="v>1", - reset="v=0; prev_spikes += 1", - ) - # We don't use 100 and 1000Hz, because then the membrane potential would - # be exactly at 1 after 10 resp. 100 timesteps. Due to floating point - # issues this will not be exact, - G.rate = [101, 0, 1001] * Hz - mon1 = SpikeMonitor(G, variables="prev_spikes") - mon2 = SpikeMonitor(G, variables="prev_spikes", when="after_resets") - run(10 * ms) - all_values = mon1.all_values() - prev_spikes_values = mon1.values("prev_spikes") - assert_array_equal(mon1.prev_spikes[mon1.i == 0], [0]) - assert_array_equal(prev_spikes_values[0], [0]) - assert_array_equal(all_values["prev_spikes"][0], [0]) - assert_array_equal(mon1.prev_spikes[mon1.i == 1], []) - assert_array_equal(prev_spikes_values[1], []) - assert_array_equal(all_values["prev_spikes"][1], []) - assert_array_equal(mon1.prev_spikes[mon1.i == 2], np.arange(10)) - assert_array_equal(prev_spikes_values[2], np.arange(10)) - assert_array_equal(all_values["prev_spikes"][2], np.arange(10)) - assert_array_equal(mon2.prev_spikes[mon2.i == 0], [1]) - assert_array_equal(mon2.prev_spikes[mon2.i == 1], []) - assert_array_equal(mon2.prev_spikes[mon2.i == 2], np.arange(10) + 1) - - -@pytest.mark.standalone_compatible -def test_spike_monitor_get_states(): - G = NeuronGroup( - 3, - """dv/dt = rate : 1 - rate : Hz - prev_spikes : integer""", - threshold="v>1", - reset="v=0; prev_spikes += 1", - ) - # We don't use 100 and 1000Hz, because then the membrane potential would - # be exactly at 1 after 10 resp. 100 timesteps. Due to floating point - # issues this will not be exact, - G.rate = [101, 0, 1001] * Hz - mon1 = SpikeMonitor(G, variables="prev_spikes") - run(10 * ms) - all_states = mon1.get_states() - assert set(all_states.keys()) == {"count", "i", "t", "prev_spikes", "N"} - assert_array_equal(all_states["count"], mon1.count[:]) - assert_array_equal(all_states["i"], mon1.i[:]) - assert_array_equal(all_states["t"], mon1.t[:]) - assert_array_equal(all_states["prev_spikes"], mon1.prev_spikes[:]) - assert_array_equal(all_states["N"], mon1.N) - - -@pytest.mark.standalone_compatible -def test_spike_monitor_subgroups(): - G = NeuronGroup(6, """do_spike : boolean""", threshold="do_spike") - G.do_spike = [True, False, False, False, True, True] - spikes_all = SpikeMonitor(G) - spikes_1 = SpikeMonitor(G[:2]) - spikes_2 = SpikeMonitor(G[2:4]) - spikes_3 = SpikeMonitor(G[4:]) - run(defaultclock.dt) - assert_allclose(spikes_all.i, [0, 4, 5]) - assert_allclose(spikes_all.t, [0, 0, 0] * ms) - assert_allclose(spikes_1.i, [0]) - assert_allclose(spikes_1.t, [0] * ms) - assert len(spikes_2.i) == 0 - assert len(spikes_2.t) == 0 - assert_allclose(spikes_3.i, [0, 1]) # recorded spike indices are relative - assert_allclose(spikes_3.t, [0, 0] * ms) - - -def test_spike_monitor_bug_824(): - # See github issue #824 - if prefs.codegen.target != "numpy": - pytest.skip("numpy-only test") - - G = NeuronGroup(6, """do_spike : boolean""", threshold="do_spike") - G.do_spike = [True, False, False, True, False, False] - spikes_1 = SpikeMonitor(G[:3]) - spikes_2 = SpikeMonitor(G[3:]) - run(4 * defaultclock.dt) - assert_array_equal(spikes_1.count, [4, 0, 0]) - assert_array_equal(spikes_2.count, [4, 0, 0]) - - -@pytest.mark.standalone_compatible -def test_event_monitor(): - G = NeuronGroup( - 3, - """ - dv/dt = rate : 1 - rate: Hz - """, - events={"my_event": "v>1"}, - ) - G.run_on_event("my_event", "v=0") - # We don't use 100 and 1000Hz, because then the membrane potential would - # be exactly at 1 after 10 resp. 100 timesteps. Due to floating point - # issues this will not be exact, - G.rate = [101, 0, 1001] * Hz - - mon = EventMonitor(G, "my_event") - net = Network(G, mon) - net.run(10 * ms) - - event_trains = mon.event_trains() - - assert_allclose(mon.t[mon.i == 0], [9.9] * ms) - assert len(mon.t[mon.i == 1]) == 0 - assert_allclose(mon.t[mon.i == 2], np.arange(10) * ms + 0.9 * ms) - assert_allclose(mon.t_[mon.i == 0], np.array([9.9 * float(ms)])) - assert len(mon.t_[mon.i == 1]) == 0 - assert_allclose(mon.t_[mon.i == 2], (np.arange(10) + 0.9) * float(ms)) - assert_allclose(event_trains[0], [9.9] * ms) - assert len(event_trains[1]) == 0 - assert_allclose(event_trains[2], np.arange(10) * ms + 0.9 * ms) - assert_array_equal(mon.count, np.array([1, 0, 10])) - - i, t = mon.it - i_, t_ = mon.it_ - assert_array_equal(i, mon.i) - assert_array_equal(i, i_) - assert_array_equal(t, mon.t) - assert_array_equal(t_, mon.t_) - - with pytest.raises(KeyError): - event_trains[3] - with pytest.raises(KeyError): - event_trains[-1] - with pytest.raises(KeyError): - event_trains["string"] - - -@pytest.mark.standalone_compatible -def test_event_monitor_no_record(): - # Check that you can switch off recording spike times/indices - G = NeuronGroup( - 3, - """ - dv/dt = rate : 1 - rate: Hz - """, - events={"my_event": "v>1"}, - threshold="v>1", - reset="v=0", - ) - # We don't use 100 and 1000Hz, because then the membrane potential would - # be exactly at 1 after 10 resp. 100 timesteps. Due to floating point - # issues this will not be exact, - G.rate = [101, 0, 1001] * Hz - - event_mon = EventMonitor(G, "my_event", record=False) - event_mon2 = EventMonitor(G, "my_event", variables="rate", record=False) - spike_mon = SpikeMonitor(G, record=False) - spike_mon2 = SpikeMonitor(G, variables="rate", record=False) - net = Network(G, event_mon, event_mon2, spike_mon, spike_mon2) - net.run(10 * ms) - - # i and t should not be there - assert "i" not in event_mon.variables - assert "t" not in event_mon.variables - assert "i" not in spike_mon.variables - assert "t" not in spike_mon.variables - - assert_array_equal(event_mon.count, np.array([1, 0, 10])) - assert_array_equal(spike_mon.count, np.array([1, 0, 10])) - assert spike_mon.num_spikes == sum(spike_mon.count) - assert event_mon.num_events == sum(event_mon.count) - - # Other variables should still have been recorded - assert len(spike_mon2.rate) == spike_mon.num_spikes - assert len(event_mon2.rate) == event_mon.num_events - - -@pytest.mark.standalone_compatible -def test_spike_trains(): - # An arbitrary combination of indices that has been shown to sort in an - # unstable way with quicksort and therefore lead to out-of-order values - # in the dictionary returned by spike_trains() of several neurons (see #725) - generator = SpikeGeneratorGroup( - 10, - [6, 1, 2, 4, 6, 9, 1, 4, 7, 8, 0, 6, 3, 6, 4, 8, 9, 2, 5, 3], - [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19] * ms, - dt=1 * ms, - ) - monitor = SpikeMonitor(generator) - run(19.1 * ms) - trains = monitor.spike_trains() - for idx, spike_times in trains.items(): - assert all( - np.diff(spike_times) > 0 * ms - ), f"Spike times for neuron {int(idx)} are not sorted" - - -def test_synapses_state_monitor(): - G = NeuronGroup(2, "") - S = Synapses(G, G, "w: siemens") - S.connect(True) - S.w = "j*nS" - - # record from a Synapses object (all synapses connecting to neuron 1) - synapse_mon = StateMonitor(S, "w", record=S[:, 1]) - synapse_mon2 = StateMonitor(S, "w", record=S["j==1"]) - - net = Network(G, S, synapse_mon, synapse_mon2) - net.run(10 * ms) - # Synaptic variables - assert_allclose(synapse_mon[S[0, 1]].w, 1 * nS) - assert_allclose(synapse_mon.w[1], 1 * nS) - assert_allclose(synapse_mon2[S[0, 1]].w, 1 * nS) - assert_allclose(synapse_mon2[S["i==0 and j==1"]].w, 1 * nS) - assert_allclose(synapse_mon2.w[1], 1 * nS) - - -@pytest.mark.standalone_compatible -def test_state_monitor(): - # Unique name to get the warning even for repeated runs of the test - unique_name = f"neurongroup_{str(uuid.uuid4()).replace('-', '_')}" - # Check that all kinds of variables can be recorded - G = NeuronGroup( - 2, - """ - dv/dt = -v / (10*ms) : 1 - f = clip(v, 0.1, 0.9) : 1 - rate: Hz - """, - threshold="v>1", - reset="v=0", - refractory=2 * ms, - name=unique_name, - ) - G.rate = [100, 1000] * Hz - G.v = 1 - - S = Synapses(G, G, "w: siemens") - S.connect(True) - S.w = "j*nS" - - # A bit peculiar, but in principle one should be allowed to record - # nothing except for the time - nothing_mon = StateMonitor(G, [], record=True) - no_record = StateMonitor(G, "v", record=False) - - # Use a single StateMonitor - v_mon = StateMonitor(G, "v", record=True) - v_mon1 = StateMonitor(G, "v", record=[1]) - - # Use a StateMonitor for specified variables - multi_mon = StateMonitor(G, ["v", "f", "rate"], record=True) - multi_mon1 = StateMonitor(G, ["v", "f", "rate"], record=[1]) - - # Use a StateMonitor recording everything - all_mon = StateMonitor(G, True, record=True) - - # Record synapses with explicit indices (the only way allowed in standalone) - synapse_mon = StateMonitor(S, "w", record=np.arange(len(G) ** 2)) - - run(10 * ms) - - # Check time recordings - assert_array_equal(nothing_mon.t, v_mon.t) - assert_array_equal(nothing_mon.t_, np.asarray(nothing_mon.t)) - assert_array_equal(nothing_mon.t_, v_mon.t_) - assert_allclose(nothing_mon.t, np.arange(len(nothing_mon.t)) * defaultclock.dt) - assert_array_equal(no_record.t, v_mon.t) - - # Check v recording - assert_allclose(v_mon.v.T, np.exp(np.tile(-v_mon.t, (2, 1)).T / (10 * ms))) - assert_allclose(v_mon.v_.T, np.exp(np.tile(-v_mon.t_, (2, 1)).T / float(10 * ms))) - assert_array_equal(v_mon.v, multi_mon.v) - assert_array_equal(v_mon.v_, multi_mon.v_) - assert_array_equal(v_mon.v, all_mon.v) - assert_array_equal(v_mon.v[1:2], v_mon1.v) - assert_array_equal(multi_mon.v[1:2], multi_mon1.v) - assert len(no_record.v) == 0 - - # Other variables - assert_array_equal( - multi_mon.rate_.T, np.tile(np.atleast_2d(G.rate_), (multi_mon.rate.shape[1], 1)) - ) - assert_array_equal(multi_mon.rate[1:2], multi_mon1.rate) - assert_allclose(np.clip(multi_mon.v, 0.1, 0.9), multi_mon.f) - assert_allclose(np.clip(multi_mon1.v, 0.1, 0.9), multi_mon1.f) - - assert all( - all_mon[0].not_refractory[:] == True - ), "not_refractory should be True, but got(not_refractory, v):%s " % str( - (all_mon.not_refractory, all_mon.v) - ) - - # Synapses - assert_allclose( - synapse_mon.w[:], np.tile(S.j[:] * nS, (synapse_mon.w[:].shape[1], 1)).T - ) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_state_monitor_record_single_timestep(): - G = NeuronGroup(1, "dv/dt = -v/(5*ms) : 1") - G.v = 1 - mon = StateMonitor(G, "v", record=True) - # Recording before a run should not work - with pytest.raises(TypeError): - mon.record_single_timestep() - run(0.5 * ms) - mon.record_single_timestep() - device.build(direct_call=False, **device.build_options) - assert mon.t[0] == 0 * ms - assert mon[0].v[0] == 1 - assert_allclose(mon.t[-1], 0.5 * ms) - assert len(mon.t) == 6 - assert mon[0].v[-1] == G.v - - -@pytest.mark.standalone_compatible -def test_state_monitor_bigger_dt(): - # Check that it is possible to record with a slower clock, i.e. bigger dt - G = NeuronGroup(1, "dv/dt = -v/(5*ms) : 1", method="exact") - G.v = 1 - mon = StateMonitor(G, "v", record=True) - slow_mon = StateMonitor(G, "v", record=True, dt=defaultclock.dt * 5) - run(defaultclock.dt * 11) - assert len(mon.t) == len(mon.v[0]) == 11 - assert len(slow_mon.t) == len(slow_mon.v[0]) == 3 - for timepoint in [0, 5, 10]: - assert mon.t[timepoint] == slow_mon.t[timepoint // 5] - assert mon.v[0, timepoint] == slow_mon.v[0, timepoint // 5] - - -@pytest.mark.standalone_compatible -def test_state_monitor_indexing(): - # Check indexing semantics - G = NeuronGroup(10, "v:volt") - G.v = np.arange(10) * volt - mon = StateMonitor(G, "v", record=[5, 6, 7]) - - run(2 * defaultclock.dt) - - assert_array_equal(mon.v, np.array([[5, 5], [6, 6], [7, 7]]) * volt) - assert_array_equal(mon.v_, np.array([[5, 5], [6, 6], [7, 7]])) - assert_array_equal(mon[5].v, mon.v[0]) - assert_array_equal(mon[7].v, mon.v[2]) - assert_array_equal(mon[[5, 7]].v, mon.v[[0, 2]]) - assert_array_equal(mon[np.array([5, 7])].v, mon.v[[0, 2]]) - - assert_allclose(mon.t[1:], Quantity([defaultclock.dt])) - - with pytest.raises(IndexError): - mon[8] - with pytest.raises(TypeError): - mon["string"] - with pytest.raises(TypeError): - mon[5.0] - with pytest.raises(TypeError): - mon[[5.0, 6.0]] - - -@pytest.mark.standalone_compatible -def test_state_monitor_get_states(): - G = NeuronGroup( - 2, - """ - dv/dt = -v / (10*ms) : 1 - f = clip(v, 0.1, 0.9) : 1 - rate: Hz - """, - threshold="v>1", - reset="v=0", - refractory=2 * ms, - ) - G.rate = [100, 1000] * Hz - G.v = 1 - mon = StateMonitor(G, ["v", "f", "rate"], record=True) - run(10 * defaultclock.dt) - all_states = mon.get_states() - assert set(all_states.keys()) == {"v", "f", "rate", "t", "N"} - assert_array_equal(all_states["v"].T, mon.v[:]) - assert_array_equal(all_states["f"].T, mon.f[:]) - assert_array_equal(all_states["rate"].T, mon.rate[:]) - assert_array_equal(all_states["t"], mon.t[:]) - assert_array_equal(all_states["N"], mon.N) - - -@pytest.mark.standalone_compatible -def test_state_monitor_resize(): - # Test for issue #518 (cython did not resize the Variable object) - G = NeuronGroup(2, "v : 1") - mon = StateMonitor(G, "v", record=True) - defaultclock.dt = 0.1 * ms - run(1 * ms) - # On standalone, the size information of the variables is only updated - # after the variable has been accessed, so we can not only check the size - # information of the variables object - assert len(mon.t) == 10 - assert mon.v.shape == (2, 10) - assert mon.variables["t"].size == 10 - # Note that the internally stored variable has the transposed shape of the - # variable that is visible to the user - assert mon.variables["v"].size == (10, 2) - - -@pytest.mark.standalone_compatible -def test_state_monitor_synapses(): - # Check that recording from synapses works correctly - G = NeuronGroup(5, "v : 1", threshold="False", reset="v = 0") - S1 = Synapses(G, G, "w : 1", on_pre="v_post += w") - S1.run_regularly("w += 1") - S1.connect(i=[0, 1], j=[2, 3]) - S1.w = "i" - # We can check the record argument even in standalone mode, since we created - # the synapses with an array of indices of known length - with catch_logs() as l: - S1_mon = StateMonitor(S1, "w", record=[0, 1]) - assert len(l) == 0 - - with pytest.raises(IndexError): - StateMonitor(S1, "w", record=[0, 2]) - - S2 = Synapses(G, G, "w : 1", on_pre="v_post += w") - S2.run_regularly("w += 1") - S2.connect("i==j") # Not yet executed for standalone mode - S2.w = "i" - with catch_logs() as l: - S2_mon = StateMonitor(S2, "w", record=[0, 4]) - - if isinstance(get_device(), CPPStandaloneDevice): - assert len(l) == 1 - assert l[0][0] == "WARNING" - assert l[0][1].endswith(".cannot_check_statemonitor_indices") - else: - assert len(l) == 0 - run(2 * defaultclock.dt) - assert_array_equal(S1_mon.w[:], [[0, 1], [1, 2]]) - assert_array_equal(S2_mon.w[:], [[0, 1], [4, 5]]) - - -@pytest.mark.codegen_independent -def test_statemonitor_namespace(): - # Make sure that StateMonitor is correctly inheriting its source's namespace - G = NeuronGroup(2, "x = i + y : integer", namespace={"y": 3}) - mon = StateMonitor(G, "x", record=True) - run(defaultclock.dt, namespace={}) - assert_array_equal(mon.x, [[3], [4]]) - - -@pytest.mark.standalone_compatible -def test_rate_monitor_1(): - G = NeuronGroup(5, "v : 1", threshold="v>1") # no reset - G.v = 1.1 # All neurons spike every time step - rate_mon = PopulationRateMonitor(G) - run(10 * defaultclock.dt) - - assert_allclose(rate_mon.t, np.arange(10) * defaultclock.dt) - assert_allclose(rate_mon.t_, np.arange(10) * defaultclock.dt_) - assert_allclose(rate_mon.t, np.arange(10) * defaultclock.dt) - assert_allclose(rate_mon.rate, np.ones(10) / defaultclock.dt) - assert_allclose(rate_mon.rate_, np.asarray(np.ones(10) / defaultclock.dt_)) - # Check that indexing into the VariableView works (this fails if we do not - # update the N variable correctly) - assert_allclose(rate_mon.t[:5], np.arange(5) * defaultclock.dt) - - -@pytest.mark.standalone_compatible -def test_rate_monitor_2(): - G = NeuronGroup(10, "v : 1", threshold="v>1") # no reset - G.v["i<5"] = 1.1 # Half of the neurons fire every time step - rate_mon = PopulationRateMonitor(G) - net = Network(G, rate_mon) - net.run(10 * defaultclock.dt) - assert_allclose(rate_mon.rate, 0.5 * np.ones(10) / defaultclock.dt) - assert_allclose(rate_mon.rate_, 0.5 * np.asarray(np.ones(10) / defaultclock.dt_)) - - -@pytest.mark.codegen_independent -def test_rate_monitor_smoothed_rate(): - # Test the filter response by having a single spiking neuron - G = SpikeGeneratorGroup(1, [0], [1] * ms) - r_mon = PopulationRateMonitor(G) - run(3 * ms) - index = int(np.round(1 * ms / defaultclock.dt)) - except_index = np.array([idx for idx in range(len(r_mon.rate)) if idx != index]) - assert_array_equal(r_mon.rate[except_index], 0 * Hz) - assert_allclose(r_mon.rate[index], 1 / defaultclock.dt) - ### Flat window - # Using a flat window of size = dt should not change anything - assert_allclose(r_mon.rate, r_mon.smooth_rate(window="flat", width=defaultclock.dt)) - smoothed = r_mon.smooth_rate(window="flat", width=5 * defaultclock.dt) - assert_array_equal(smoothed[: index - 2], 0 * Hz) - assert_array_equal(smoothed[index + 3 :], 0 * Hz) - assert_allclose(smoothed[index - 2 : index + 3], 0.2 / defaultclock.dt) - with catch_logs(log_level=logging.INFO): - smoothed2 = r_mon.smooth_rate(window="flat", width=5.4 * defaultclock.dt) - assert_array_equal(smoothed, smoothed2) - - ### Gaussian window - width = 5 * defaultclock.dt - smoothed = r_mon.smooth_rate(window="gaussian", width=width) - # 0 outside of window - assert_array_equal(smoothed[: index - 10], 0 * Hz) - assert_array_equal(smoothed[index + 11 :], 0 * Hz) - # Gaussian around the spike - gaussian = np.exp( - -((r_mon.t[index - 10 : index + 11] - 1 * ms) ** 2) / (2 * width**2) - ) - gaussian /= sum(gaussian) - assert_allclose(smoothed[index - 10 : index + 11], 1 / defaultclock.dt * gaussian) - - ### Arbitrary window - window = np.ones(5) - smoothed_flat = r_mon.smooth_rate(window="flat", width=5 * defaultclock.dt) - smoothed_custom = r_mon.smooth_rate(window=window) - assert_allclose(smoothed_flat, smoothed_custom) - - -@pytest.mark.codegen_independent -def test_rate_monitor_smoothed_rate_incorrect(): - # Test the filter response by having a single spiking neuron - G = SpikeGeneratorGroup(1, [0], [1] * ms) - r_mon = PopulationRateMonitor(G) - run(2 * ms) - - with pytest.raises(TypeError): - r_mon.smooth_rate(window="flat") # no width - with pytest.raises(TypeError): - r_mon.smooth_rate(window=np.ones(5), width=1 * ms) - with pytest.raises(NotImplementedError): - r_mon.smooth_rate(window="unknown", width=1 * ms) - with pytest.raises(TypeError): - r_mon.smooth_rate(window=object()) - with pytest.raises(TypeError): - r_mon.smooth_rate(window=np.ones(5, 2)) - with pytest.raises(TypeError): - r_mon.smooth_rate(window=np.ones(4)) # even number - - -@pytest.mark.standalone_compatible -def test_rate_monitor_get_states(): - G = NeuronGroup(5, "v : 1", threshold="v>1") # no reset - G.v = 1.1 # All neurons spike every time step - rate_mon = PopulationRateMonitor(G) - run(10 * defaultclock.dt) - all_states = rate_mon.get_states() - assert set(all_states.keys()) == {"rate", "t", "N"} - assert_array_equal(all_states["rate"], rate_mon.rate[:]) - assert_array_equal(all_states["t"], rate_mon.t[:]) - assert_array_equal(all_states["N"], rate_mon.N) - - -@pytest.mark.standalone_compatible -def test_rate_monitor_subgroups(): - old_dt = defaultclock.dt - defaultclock.dt = 0.01 * ms - G = NeuronGroup( - 4, - """ - dv/dt = rate : 1 - rate : Hz - """, - threshold="v>0.999", - reset="v=0", - ) - G.rate = [100, 200, 400, 800] * Hz - rate_all = PopulationRateMonitor(G) - rate_1 = PopulationRateMonitor(G[:2]) - rate_2 = PopulationRateMonitor(G[2:]) - run(1 * second) - assert_allclose(mean(G.rate_[:]), mean(rate_all.rate_[:])) - assert_allclose(mean(G.rate_[:2]), mean(rate_1.rate_[:])) - assert_allclose(mean(G.rate_[2:]), mean(rate_2.rate_[:])) - - defaultclock.dt = old_dt - - -@pytest.mark.standalone_compatible -def test_rate_monitor_subgroups_2(): - G = NeuronGroup(6, """do_spike : boolean""", threshold="do_spike") - G.do_spike = [True, False, False, False, True, True] - rate_all = PopulationRateMonitor(G) - rate_1 = PopulationRateMonitor(G[:2]) - rate_2 = PopulationRateMonitor(G[2:4]) - rate_3 = PopulationRateMonitor(G[4:]) - run(2 * defaultclock.dt) - assert_allclose(rate_all.rate, 0.5 / defaultclock.dt) - assert_allclose(rate_1.rate, 0.5 / defaultclock.dt) - assert_allclose(rate_2.rate, 0 * Hz) - assert_allclose(rate_3.rate, 1 / defaultclock.dt) - - -@pytest.mark.codegen_independent -def test_monitor_str_repr(): - # Basic test that string representations are not empty - G = NeuronGroup(2, "dv/dt = -v/(10*ms) : 1", threshold="v>1", reset="v=0") - spike_mon = SpikeMonitor(G) - state_mon = StateMonitor(G, "v", record=True) - rate_mon = PopulationRateMonitor(G) - for obj in [spike_mon, state_mon, rate_mon]: - assert len(str(obj)) - assert len(repr(obj)) - - -if __name__ == "__main__": - from _pytest.outcomes import Skipped - - test_spike_monitor() - test_spike_monitor_indexing() - test_spike_monitor_get_states() - test_spike_monitor_subgroups() - try: - test_spike_monitor_bug_824() - except Skipped: - pass - test_spike_monitor_variables() - test_event_monitor() - test_event_monitor_no_record() - test_spike_trains() - test_synapses_state_monitor() - test_state_monitor() - test_state_monitor_record_single_timestep() - test_state_monitor_get_states() - test_state_monitor_indexing() - test_state_monitor_resize() - test_rate_monitor_1() - test_rate_monitor_2() - test_rate_monitor_smoothed_rate() - test_rate_monitor_smoothed_rate_incorrect() - test_rate_monitor_get_states() - test_rate_monitor_subgroups() - test_rate_monitor_subgroups_2() - test_monitor_str_repr() diff --git a/brian2/tests/test_morphology.py b/brian2/tests/test_morphology.py deleted file mode 100644 index e287aaf1a..000000000 --- a/brian2/tests/test_morphology.py +++ /dev/null @@ -1,1601 +0,0 @@ -import os -import tempfile - -import pytest -from numpy.testing import assert_equal - -from brian2 import numpy as np -from brian2.spatialneuron import * -from brian2.tests.utils import assert_allclose -from brian2.units import DimensionMismatchError, cm, second, um -from brian2.utils.logger import catch_logs - - -@pytest.mark.codegen_independent -def test_attributes_soma(): - soma = Soma(diameter=10 * um) - assert isinstance(soma, Morphology) - # Single compartment - assert soma.n == 1 - assert soma.total_sections == 1 - assert soma.total_compartments == 1 - with pytest.raises(TypeError): - len(soma) # ambiguous - # Compartment attributes - assert_equal(soma.diameter, [10] * um) - assert_equal(soma.length, [10] * um) - assert_equal(soma.distance, [0] * um) - assert_equal(soma.end_distance, 0 * um) - assert soma.r_length_1 > 1 * cm - assert soma.r_length_2 > 1 * cm - assert_equal(soma.area, np.pi * soma.diameter**2) - assert_allclose(soma.volume, 1.0 / 6.0 * np.pi * (10 * um) ** 3) - - # No coordinates were specified - assert soma.start_x is None - assert soma.start_y is None - assert soma.start_z is None - assert soma.x is None - assert soma.y is None - assert soma.z is None - assert soma.end_x is None - assert soma.end_y is None - assert soma.end_z is None - - -@pytest.mark.codegen_independent -def test_attributes_soma_coordinates(): - # Specify only one of the coordinates - xyz = {"x", "y", "z"} - for coord in xyz: - kwds = {coord: 5 * um} - soma = Soma(diameter=10 * um, **kwds) - # Length shouldn't change (not defined by coordinates but by the diameter) - assert_equal(soma.length, [10] * um) - assert_equal(soma.distance, [0] * um) - - # Coordinates should be specified now, with 0 values for the other - # coordinates - for other_coord in xyz - {coord}: - assert_equal(getattr(soma, f"start_{other_coord}"), [0] * um) - assert_equal(getattr(soma, other_coord), [0] * um) - assert_equal(getattr(soma, f"end_{other_coord}"), [0] * um) - - assert_equal(getattr(soma, f"start_{coord}"), [5] * um) - assert_equal(getattr(soma, coord), [5] * um) - assert_equal(getattr(soma, f"end_{coord}"), [5] * um) - - # Specify all coordinates - soma = Soma(diameter=10 * um, x=1 * um, y=2 * um, z=3 * um) - # Length shouldn't change (not defined by coordinates but by the diameter) - assert_equal(soma.length, [10] * um) - assert_equal(soma.distance, [0] * um) - - assert_equal(soma.start_x, 1 * um) - assert_equal(soma.x, 1 * um) - assert_equal(soma.end_x, 1 * um) - assert_equal(soma.start_y, 2 * um) - assert_equal(soma.y, 2 * um) - assert_equal(soma.end_y, 2 * um) - assert_equal(soma.start_z, 3 * um) - assert_equal(soma.z, 3 * um) - assert_equal(soma.end_z, 3 * um) - - -@pytest.mark.codegen_independent -def test_attributes_cylinder(): - n = 10 - cylinder = Cylinder(n=n, diameter=10 * um, length=200 * um) - assert isinstance(cylinder, Morphology) - # Single section with 10 compartments - assert cylinder.n == n - assert cylinder.total_sections == 1 - assert cylinder.total_compartments == n - with pytest.raises(TypeError): - len(cylinder) # ambiguous - - # Compartment attributes - assert_equal(cylinder.diameter, np.ones(n) * 10 * um) - assert_equal(cylinder.length, np.ones(n) * 20 * um) - assert_equal(cylinder.distance, np.arange(n) * 20 * um + 10 * um) - assert_equal(cylinder.end_distance, 200 * um) - # TODO: r_length - assert_allclose(cylinder.area, np.pi * cylinder.diameter * cylinder.length) - assert_allclose( - cylinder.volume, 1.0 / 4.0 * np.pi * cylinder.diameter**2 * cylinder.length - ) - - # No coordinates were specified - assert cylinder.start_x is None - assert cylinder.start_y is None - assert cylinder.start_z is None - assert cylinder.x is None - assert cylinder.y is None - assert cylinder.z is None - assert cylinder.end_x is None - assert cylinder.end_y is None - assert cylinder.end_z is None - - -@pytest.mark.codegen_independent -def test_attributes_cylinder_coordinates(): - # Specify only the end-point of the section - n = 10 - # Specify only one of the coordinates - xyz = {"x", "y", "z"} - for coord in xyz: - kwds = {coord: [0, 200] * um} - cylinder = Cylinder(n=n, diameter=10 * um, **kwds) - assert_equal(cylinder.diameter, np.ones(n) * 10 * um) - assert_equal(cylinder.length, np.ones(n) * 20 * um) - assert_equal(cylinder.distance, np.arange(n) * 20 * um + 10 * um) - assert_equal(cylinder.end_distance, 200 * um) - - # Coordinates should be specified now, with 0 values for the other - # coordinates - for other_coord in xyz - {coord}: - assert_equal(getattr(cylinder, f"start_{other_coord}"), np.zeros(n) * um) - assert_equal(getattr(cylinder, other_coord), np.zeros(n) * um) - assert_equal(getattr(cylinder, f"end_{other_coord}"), np.zeros(n) * um) - - assert_equal(getattr(cylinder, f"start_{coord}"), np.arange(n) * 20 * um) - assert_equal(getattr(cylinder, coord), np.arange(n) * 20 * um + 10 * um) - assert_equal( - getattr(cylinder, f"end_{coord}"), np.arange(n) * 20 * um + 20 * um - ) - - # Specify all coordinates - val = [0, 200.0 / np.sqrt(3.0)] * um - cylinder = Cylinder(n=n, diameter=10 * um, x=val, y=val, z=val) - - assert_equal(cylinder.diameter, np.ones(n) * 10 * um) - assert_allclose(cylinder.length, np.ones(n) * 20 * um) - assert_allclose(cylinder.distance, np.arange(n) * 20 * um + 10 * um) - assert_allclose(cylinder.end_distance, 200 * um) - - for coord in ["x", "y", "z"]: - assert_allclose(getattr(cylinder, f"start_{coord}"), np.arange(n) * val[1] / n) - assert_allclose( - getattr(cylinder, coord), np.arange(n) * val[1] / n + 0.5 * val[1] / n - ) - assert_allclose( - getattr(cylinder, f"end_{coord}"), np.arange(n) * val[1] / n + val[1] / n - ) - - -@pytest.mark.codegen_independent -def test_attributes_section(): - n = 10 - # No difference to a cylinder - sec = Section(n=n, diameter=np.ones(n + 1) * 10 * um, length=np.ones(n) * 20 * um) - cyl = Cylinder(n=1, diameter=10 * um, length=0 * um) # dummy cylinder - cyl.child = sec - assert isinstance(sec, Morphology) - # Single section with 10 compartments - assert sec.n == n - assert sec.total_sections == 1 - assert sec.total_compartments == n - with pytest.raises(TypeError): - len(sec) # ambiguous - - # Compartment attributes - assert_allclose(sec.diameter, np.ones(n) * 10 * um) - assert_allclose(sec.length, np.ones(n) * 20 * um) - assert_allclose(sec.distance, np.arange(n) * 20 * um + 10 * um) - assert_allclose(sec.end_distance, 200 * um) - # TODO: r_length - assert_allclose( - sec.area, np.pi * 0.5 * (sec.start_diameter + sec.end_diameter) * sec.length - ) - assert_allclose(sec.volume, 1.0 / 4.0 * np.pi * sec.diameter**2 * sec.length) - - # No coordinates were specified - assert sec.start_x is None - assert sec.start_y is None - assert sec.start_z is None - assert sec.x is None - assert sec.y is None - assert sec.z is None - assert sec.end_x is None - assert sec.end_y is None - assert sec.end_z is None - - -@pytest.mark.codegen_independent -def test_attributes_section_coordinates_single(): - # Specify only the end-point of the section (no difference to cylinder) - n = 10 - # Specify only one of the coordinates - xyz = {"x", "y", "z"} - for coord in xyz: - kwds = {coord: np.linspace(0 * um, 200 * um, n + 1)} - sec = Section(n=n, diameter=np.ones(n + 1) * 10 * um, **kwds) - cyl = Cylinder(n=1, diameter=10 * um, length=0 * um) # dummy cylinder - cyl.child = sec - assert_equal(sec.diameter, np.ones(n) * 10 * um) - assert_equal(sec.length, np.ones(n) * 20 * um) - assert_equal(sec.distance, np.arange(n) * 20 * um + 10 * um) - assert_equal(sec.end_distance, 200 * um) - - # Coordinates should be specified now, with 0 values for the other - # coordinates - for other_coord in xyz - {coord}: - assert_equal(getattr(sec, f"start_{other_coord}"), np.zeros(n) * um) - assert_equal(getattr(sec, other_coord), np.zeros(n) * um) - assert_equal(getattr(sec, f"end_{other_coord}"), np.zeros(n) * um) - - assert_equal(getattr(sec, f"start_{coord}"), np.arange(n) * 20 * um) - assert_equal(getattr(sec, coord), np.arange(n) * 20 * um + 10 * um) - assert_equal(getattr(sec, f"end_{coord}"), np.arange(n) * 20 * um + 20 * um) - - # Specify all coordinates - val = 200.0 / np.sqrt(3.0) * um - sec = Section( - n=n, - diameter=np.ones(n + 1) * 10 * um, - x=np.linspace(0 * um, val, n + 1), - y=np.linspace(0 * um, val, n + 1), - z=np.linspace(0 * um, val, n + 1), - ) - cyl = Cylinder(n=1, diameter=10 * um, length=0 * um) - cyl.child = sec - assert_equal(sec.diameter, np.ones(n) * 10 * um) - assert_allclose(sec.length, np.ones(n) * 20 * um) - assert_allclose(sec.distance, np.arange(n) * 20 * um + 10 * um) - assert_allclose(sec.end_distance, 200 * um) - - for coord in ["x", "y", "z"]: - assert_allclose(getattr(sec, f"start_{coord}"), np.arange(n) * val / n) - assert_allclose(getattr(sec, coord), np.arange(n) * val / n + 0.5 * val / n) - assert_allclose(getattr(sec, f"end_{coord}"), np.arange(n) * val / n + val / n) - - -@pytest.mark.codegen_independent -def test_attributes_section_coordinates_all(): - n = 3 - # Specify all coordinates - sec = Section( - n=n, - diameter=[10, 10, 10, 10] * um, - x=[10, 11, 11, 11] * um, - y=[100, 100, 101, 101] * um, - z=[1000, 1000, 1000, 1001] * um, - ) - - assert_equal(sec.diameter, np.ones(n) * 10 * um) - assert_allclose(sec.length, np.ones(n) * um) - assert_allclose(sec.distance, np.arange(n) * um + 0.5 * um) - assert_allclose(sec.end_distance, 3 * um) - - assert_allclose(sec.start_x, [10, 11, 11] * um) - assert_allclose(sec.x, [10.5, 11, 11] * um) - assert_allclose(sec.end_x, [11, 11, 11] * um) - assert_allclose(sec.start_y, [100, 100, 101] * um) - assert_allclose(sec.y, [100, 100.5, 101] * um) - assert_allclose(sec.end_y, [100, 101, 101] * um) - assert_allclose(sec.start_z, [1000, 1000, 1000] * um) - assert_allclose(sec.z, [1000, 1000, 1000.5] * um) - assert_allclose(sec.end_z, [1000, 1000, 1001] * um) - - # Specify varying diameters - sec = Section( - n=n, - diameter=[20, 10, 5, 2.5] * um, - x=[0, 1, 1, 1] * um, - y=[0, 0, 1, 1] * um, - z=[0, 0, 0, 1] * um, - ) - assert_allclose(sec.start_diameter, [20, 10, 5] * um) - # diameter at midpoint - assert_allclose(sec.diameter, 0.5 * (sec.start_diameter + sec.end_diameter)) - assert_allclose(sec.end_diameter, [10, 5, 2.5] * um) - # TODO: Check area and volume - - -def _check_tree_cables(morphology, coordinates=False): - # number of compartments per section - assert morphology.n == 10 - assert morphology["1"].n == 5 - assert morphology["2"].n == 5 - assert morphology["21"].n == 5 - assert morphology["22"].n == 5 - # number of compartments per subtree - assert morphology.total_compartments == 30 - assert morphology["1"].total_compartments == 5 - assert morphology["2"].total_compartments == 15 - assert morphology["21"].total_compartments == 5 - assert morphology["22"].total_compartments == 5 - # number of sections per subtree - assert morphology.total_sections == 5 - assert morphology["1"].total_sections == 1 - assert morphology["2"].total_sections == 3 - assert morphology["21"].total_sections == 1 - assert morphology["22"].total_sections == 1 - # Check that distances (= distance to root at electrical midpoint) - # correctly follow the tree structure - assert_allclose(morphology.distance, np.arange(10) * 10 * um + 5 * um) - assert_allclose( - morphology["2"].distance, 100 * um + np.arange(5) * 10 * um + 5 * um - ) - assert_allclose( - morphology["21"].distance, 150 * um + np.arange(5) * 10 * um + 5 * um - ) - assert_allclose(morphology.end_distance, 100 * um) - assert_allclose(morphology["1"].end_distance, 200 * um) - assert_allclose(morphology["2"].end_distance, 150 * um) - assert_allclose(morphology["21"].end_distance, 200 * um) - assert_allclose(morphology["22"].end_distance, 200 * um) - # Check that section diameters are correctly inherited from the parent - # sections - assert_allclose(morphology["1"].start_diameter, [10, 8, 6, 4, 2] * um) - assert_allclose(morphology["22"].start_diameter, [5, 4, 3, 2, 1] * um) - - if coordinates: - # Coordinates should be absolute - # section: cable - assert_allclose(morphology.start_x, np.arange(10) * 10 * um) - assert_allclose(morphology.x, np.arange(10) * 10 * um + 5 * um) - assert_allclose(morphology.end_x, np.arange(10) * 10 * um + 10 * um) - assert_allclose(morphology.y, np.zeros(10) * um) - assert_allclose(morphology.z, np.zeros(10) * um) - # section: cable['1'] - step = 20 / np.sqrt(2) * um - assert_allclose(morphology["1"].start_x, 100 * um + np.arange(5) * step) - assert_allclose(morphology["1"].x, 100 * um + np.arange(5) * step + step / 2) - assert_allclose(morphology["1"].end_x, 100 * um + np.arange(5) * step + step) - assert_allclose(morphology["1"].start_y, np.arange(5) * step) - assert_allclose(morphology["1"].y, np.arange(5) * step + step / 2) - assert_allclose(morphology["1"].end_y, np.arange(5) * step + step) - assert_allclose(morphology["1"].z, np.zeros(5) * um) - # section: cable['2'] - step = 10 / np.sqrt(2) * um - assert_allclose(morphology["2"].start_x, 100 * um + np.arange(5) * step) - assert_allclose(morphology["2"].x, 100 * um + np.arange(5) * step + step / 2) - assert_allclose(morphology["2"].end_x, 100 * um + np.arange(5) * step + step) - assert_allclose(morphology["2"].start_y, -np.arange(5) * step) - assert_allclose(morphology["2"].y, -(np.arange(5) * step + step / 2)) - assert_allclose(morphology["2"].end_y, -(np.arange(5) * step + step)) - assert_allclose(morphology["2"].z, np.zeros(5) * um) - # section: cable ['21'] - step = 10 / np.sqrt(2) * um - assert_allclose( - morphology["21"].start_x, - 100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step, - ) - assert_allclose( - morphology["21"].x, - 100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step + step / 2, - ) - assert_allclose( - morphology["21"].end_x, - 100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step + step, - ) - assert_allclose(morphology["21"].start_y, -np.ones(5) * 50 / np.sqrt(2) * um) - assert_allclose(morphology["21"].y, -np.ones(5) * 50 / np.sqrt(2) * um) - assert_allclose(morphology["21"].end_y, -np.ones(5) * 50 / np.sqrt(2) * um) - assert_allclose(morphology["21"].start_z, np.arange(5) * step) - assert_allclose(morphology["21"].z, np.arange(5) * step + step / 2) - assert_allclose(morphology["21"].end_z, np.arange(5) * step + step) - # section: cable['22'] - step = 10 / np.sqrt(2) * um - assert_allclose( - morphology["22"].start_x, - 100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step, - ) - assert_allclose( - morphology["22"].x, - 100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step + step / 2, - ) - assert_allclose( - morphology["22"].end_x, - 100 * um + 50 / np.sqrt(2) * um + np.arange(5) * step + step, - ) - assert_allclose(morphology["22"].start_y, -np.ones(5) * 50 / np.sqrt(2) * um) - assert_allclose(morphology["22"].y, -np.ones(5) * 50 / np.sqrt(2) * um) - assert_allclose(morphology["22"].end_y, -np.ones(5) * 50 / np.sqrt(2) * um) - assert_allclose(morphology["22"].start_z, -np.arange(5) * step) - assert_allclose(morphology["22"].z, -(np.arange(5) * step + step / 2)) - assert_allclose(morphology["22"].end_z, -(np.arange(5) * step + step)) - - -@pytest.mark.codegen_independent -def test_tree_cables_schematic(): - cable = Cylinder(n=10, diameter=10 * um, length=100 * um) - cable.L = Section( - n=5, diameter=[10, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - cable.R = Cylinder(n=5, diameter=5 * um, length=50 * um) - cable.RL = Cylinder(n=5, diameter=2.5 * um, length=50 * um) - cable.RR = Section( - n=5, diameter=[5, 4, 3, 2, 1, 0] * um, length=np.ones(5) * 10 * um - ) - - _check_tree_cables(cable) - - -@pytest.mark.codegen_independent -def test_tree_cables_coordinates(): - # The lengths of the sections should be identical to the previous test - cable = Cylinder(n=10, x=[0, 100] * um, diameter=10 * um) - cable.L = Section( - n=5, - diameter=[10, 8, 6, 4, 2, 0] * um, - x=np.linspace(0, 100, 6) / np.sqrt(2) * um, - y=np.linspace(0, 100, 6) / np.sqrt(2) * um, - ) - cable.R = Cylinder( - n=5, diameter=5 * um, x=[0, 50] * um / np.sqrt(2), y=[0, -50] * um / np.sqrt(2) - ) - cable.RL = Cylinder( - n=5, diameter=2.5 * um, x=[0, 50] * um / np.sqrt(2), z=[0, 50] * um / np.sqrt(2) - ) - cable.RR = Section( - n=5, - diameter=[5, 4, 3, 2, 1, 0] * um, - x=np.linspace(0, 50, 6) * um / np.sqrt(2), - z=np.linspace(0, -50, 6) * um / np.sqrt(2), - ) - - _check_tree_cables(cable, coordinates=True) - - -@pytest.mark.codegen_independent -def test_tree_cables_from_points(): - # The coordinates should be identical to the previous test - # fmt: off - points = [ # cable - (1, None, 0, 0, 0, 10, -1), - (2, None, 10, 0, 0, 10, 1), - (3, None, 20, 0, 0, 10, 2), - (4, None, 30, 0, 0, 10, 3), - (5, None, 40, 0, 0, 10, 4), - (6, None, 50, 0, 0, 10, 5), - (7, None, 60, 0, 0, 10, 6), - (8, None, 70, 0, 0, 10, 7), - (9, None, 80, 0, 0, 10, 8), - (10, None, 90, 0, 0, 10, 9), - (11, None, 100, 0, 0, 10, 10), - # cable.L (using automatic names) - (12, None, 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 11), - (13, None, 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 12), - (14, None, 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 13), - (15, None, 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 14), - (16, None, 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 15), - # cable.R (using automatic names) - (17, None, 100+10/np.sqrt(2), -10/np.sqrt(2), 0, 5 , 11), - (18, None, 100+20/np.sqrt(2), -20/np.sqrt(2), 0, 5 , 17), - (19, None, 100+30/np.sqrt(2), -30/np.sqrt(2), 0, 5 , 18), - (20, None, 100+40/np.sqrt(2), -40/np.sqrt(2), 0, 5 , 19), - (21, None, 100+50/np.sqrt(2), -50/np.sqrt(2), 0, 5 , 20), - # cable.RL (using explicit names) - (22, 'L' , 100+60/np.sqrt(2), -50/np.sqrt(2), 10/np.sqrt(2), 2.5, 21), - (23, 'L' , 100+70/np.sqrt(2), -50/np.sqrt(2), 20/np.sqrt(2), 2.5, 22), - (24, 'L' , 100+80/np.sqrt(2), -50/np.sqrt(2), 30/np.sqrt(2), 2.5, 23), - (25, 'L' , 100+90/np.sqrt(2), -50/np.sqrt(2), 40/np.sqrt(2), 2.5, 24), - (26, 'L' , 100+100/np.sqrt(2), -50/np.sqrt(2), 50/np.sqrt(2), 2.5, 25), - # cable.RR (using explicit names) - (27, 'R' , 100+60/np.sqrt(2), -50/np.sqrt(2), -10/np.sqrt(2), 4, 21), - (28, 'R' , 100+70/np.sqrt(2), -50/np.sqrt(2), -20/np.sqrt(2), 3, 27), - (29, 'R' , 100+80/np.sqrt(2), -50/np.sqrt(2), -30/np.sqrt(2), 2, 28), - (30, 'R' , 100+90/np.sqrt(2), -50/np.sqrt(2), -40/np.sqrt(2), 1, 29), - (31, 'R' , 100+100/np.sqrt(2), -50/np.sqrt(2), -50/np.sqrt(2), 0, 30), - ] - # fmt: on - cable = Morphology.from_points(points) - - # Check that the names are used - assert cable.L.n == 5 - assert cable.R.n == 5 - assert cable.RL.n == 5 - assert cable.RR.n == 5 - _check_tree_cables(cable, coordinates=True) - - -def test_tree_cables_from_swc(): - swc_content = """ -# Test file -1 0 0 0 0 5 -1 -2 0 10 0 0 5 1 -3 0 20 0 0 5 2 -4 0 30 0 0 5 3 -5 0 40 0 0 5 4 -6 0 50 0 0 5 5 -7 0 60 0 0 5 6 -8 0 70 0 0 5 7 -9 0 80 0 0 5 8 -10 0 90 0 0 5 9 -11 0 100 0 0 5 10 -12 2 114.14213562373095 14.142135623730949 0 4 11 -13 2 128.2842712474619 28.284271247461898 0 3 12 -14 2 142.42640687119285 42.426406871192846 0 2 13 -15 2 156.5685424949238 56.568542494923797 0 1 14 -16 2 170.71067811865476 70.710678118654741 0 0 15 -17 2 107.07106781186548 -7.0710678118654746 0 2.5 11 -18 2 114.14213562373095 -14.142135623730949 0 2.5 17 -19 2 121.21320343559643 -21.213203435596423 0 2.5 18 -20 2 128.2842712474619 -28.284271247461898 0 2.5 19 -21 2 135.35533905932738 -35.35533905932737 0 2.5 20 -22 2 142.42640687119285 -35.35533905932737 7.0710678118654746 1.25 21 -23 2 149.49747468305833 -35.35533905932737 14.142135623730949 1.25 22 -24 2 156.5685424949238 -35.35533905932737 21.213203435596423 1.25 23 -25 2 163.63961030678928 -35.35533905932737 28.284271247461898 1.25 24 -26 2 170.71067811865476 -35.35533905932737 35.35533905932737 1.25 25 -27 2 142.42640687119285 -35.35533905932737 -7.0710678118654746 2 21 -28 2 149.49747468305833 -35.35533905932737 -14.142135623730949 1.5 27 -29 2 156.5685424949238 -35.35533905932737 -21.213203435596423 1 28 -30 2 163.63961030678928 -35.35533905932737 -28.284271247461898 0.5 29 -31 2 170.71067811865476 -35.35533905932737 -35.35533905932737 0 30 -""" - tmp_filename = tempfile.mktemp("cable_morphology.swc") - with open(tmp_filename, "w") as f: - f.write(swc_content) - cable = Morphology.from_file(tmp_filename) - os.remove(tmp_filename) - _check_tree_cables(cable, coordinates=True) - - -def _check_tree_soma(morphology, coordinates=False, use_cylinders=True): - # number of compartments per section - assert morphology.n == 1 - assert morphology["1"].n == 5 - assert morphology["2"].n == 5 - - # number of compartments per subtree - assert morphology.total_compartments == 11 - assert morphology["1"].total_compartments == 5 - assert morphology["2"].total_compartments == 5 - - # number of sections per subtree - assert morphology.total_sections == 3 - assert morphology["1"].total_sections == 1 - assert morphology["2"].total_sections == 1 - - assert_allclose(morphology.diameter, [30] * um) - - # Check that distances (= distance to root at midpoint) - # correctly follow the tree structure - # Note that the soma does add nothing to the distance - assert_equal(morphology.distance, 0 * um) - assert_allclose(morphology["1"].distance, np.arange(5) * 20 * um + 10 * um) - assert_allclose(morphology["2"].distance, np.arange(5) * 10 * um + 5 * um) - assert_allclose(morphology.end_distance, 0 * um) - assert_allclose(morphology["1"].end_distance, 100 * um) - assert_allclose(morphology["2"].end_distance, 50 * um) - - assert_allclose(morphology.diameter, 30 * um) - assert_allclose(morphology["1"].start_diameter, [8, 8, 6, 4, 2] * um) - assert_allclose(morphology["1"].diameter, [8, 7, 5, 3, 1] * um) - assert_allclose(morphology["1"].end_diameter, [8, 6, 4, 2, 0] * um) - assert_allclose(morphology["2"].start_diameter, np.ones(5) * 5 * um) - assert_allclose(morphology["2"].diameter, np.ones(5) * 5 * um) - assert_allclose(morphology["2"].end_diameter, np.ones(5) * 5 * um) - - if coordinates: - # Coordinates should be absolute - # section: soma - assert_allclose(morphology.start_x, 100 * um) - assert_allclose(morphology.x, 100 * um) - assert_allclose(morphology.end_x, 100 * um) - assert_allclose(morphology.y, 0 * um) - assert_allclose(morphology.z, 0 * um) - # section: cable['1'] - step = 20 / np.sqrt(2) * um - assert_allclose(morphology["1"].start_x, 100 * um + np.arange(5) * step) - assert_allclose(morphology["1"].x, 100 * um + np.arange(5) * step + step / 2) - assert_allclose(morphology["1"].end_x, 100 * um + np.arange(5) * step + step) - assert_allclose(morphology["1"].start_y, np.arange(5) * step) - assert_allclose(morphology["1"].y, np.arange(5) * step + step / 2) - assert_allclose(morphology["1"].end_y, np.arange(5) * step + step) - assert_allclose(morphology["1"].z, np.zeros(5) * um) - # section: cable['2'] - step = 10 / np.sqrt(2) * um - assert_allclose(morphology["2"].start_x, 100 * um + np.arange(5) * step) - if use_cylinders: - assert_allclose( - morphology["2"].x, 100 * um + np.arange(5) * step + step / 2 - ) - assert_allclose(morphology["2"].end_x, 100 * um + np.arange(5) * step + step) - assert_allclose(morphology["2"].start_y, -np.arange(5) * step) - if use_cylinders: - assert_allclose(morphology["2"].y, -(np.arange(5) * step + step / 2)) - assert_allclose(morphology["2"].end_y, -(np.arange(5) * step + step)) - if use_cylinders: - assert_allclose(morphology["2"].z, np.zeros(5) * um) - - -@pytest.mark.codegen_independent -def test_tree_soma_schematic(): - soma = Soma(diameter=30 * um) - soma.L = Section( - n=5, diameter=[8, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - soma.R = Cylinder(n=5, diameter=5 * um, length=50 * um) - - _check_tree_soma(soma) - - -@pytest.mark.codegen_independent -def test_tree_soma_coordinates(): - soma = Soma(diameter=30 * um, x=100 * um) - soma.L = Section( - n=5, - diameter=[8, 8, 6, 4, 2, 0] * um, - x=np.linspace(0, 100, 6) / np.sqrt(2) * um, - y=np.linspace(0, 100, 6) / np.sqrt(2) * um, - ) # tapering truncated cones - soma.R = Cylinder( - n=5, diameter=5 * um, x=[0, 50] * um / np.sqrt(2), y=[0, -50] * um / np.sqrt(2) - ) - - _check_tree_soma(soma, coordinates=True) - - -@pytest.mark.codegen_independent -def test_tree_soma_from_points(): - # The coordinates should be identical to the previous test - # fmt: on - points = [ # soma - (1, "soma", 100, 0, 0, 30, -1), - # soma.L - (2, "L", 100 + 20 / np.sqrt(2), 20 / np.sqrt(2), 0, 8, 1), - (3, "L", 100 + 40 / np.sqrt(2), 40 / np.sqrt(2), 0, 6, 2), - (4, "L", 100 + 60 / np.sqrt(2), 60 / np.sqrt(2), 0, 4, 3), - (5, "L", 100 + 80 / np.sqrt(2), 80 / np.sqrt(2), 0, 2, 4), - (6, "L", 100 + 100 / np.sqrt(2), 100 / np.sqrt(2), 0, 0, 5), - # soma.R - (7, "R", 100 + 10 / np.sqrt(2), -10 / np.sqrt(2), 0, 5, 1), - (8, "R", 100 + 20 / np.sqrt(2), -20 / np.sqrt(2), 0, 5, 7), - (9, "R", 100 + 30 / np.sqrt(2), -30 / np.sqrt(2), 0, 5, 8), - (10, "R", 100 + 40 / np.sqrt(2), -40 / np.sqrt(2), 0, 5, 9), - (11, "R", 100 + 50 / np.sqrt(2), -50 / np.sqrt(2), 0, 5, 10), - ] - # fmt: on - cable = Morphology.from_points(points) - _check_tree_soma(cable, coordinates=True, use_cylinders=False) - - -@pytest.mark.codegen_independent -def test_tree_soma_from_points_3_point_soma(): - # The coordinates should be identical to the previous test - # fmt: off - points = [ # soma - (1, 'soma', 100, 0, 0, 30, -1), - (2, 'soma', 100, 15, 0, 30, 1), - (3, 'soma', 100, -15, 0, 30, 1), - # soma.L - (4, 'L' , 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 1), - (5, 'L' , 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 4), - (6, 'L' , 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 5), - (7, 'L' , 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 6), - (8, 'L' , 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 7), - # soma.R - (9, 'R' , 100+10/np.sqrt(2), -10/np.sqrt(2), 0, 5 , 1), - (10, 'R' , 100+20/np.sqrt(2), -20/np.sqrt(2), 0, 5 , 9), - (11, 'R' , 100+30/np.sqrt(2), -30/np.sqrt(2), 0, 5 , 10), - (12, 'R' , 100+40/np.sqrt(2), -40/np.sqrt(2), 0, 5 , 11), - (13, 'R' , 100+50/np.sqrt(2), -50/np.sqrt(2), 0, 5 , 12), - ] - # fmt: on - cable = Morphology.from_points(points) - _check_tree_soma(cable, coordinates=True, use_cylinders=False) - # The first compartment should be a spherical soma! - assert isinstance(cable, Soma) - - -@pytest.mark.codegen_independent -def test_tree_soma_from_points_3_point_soma_incorrect(): - # Inconsistent diameters - # fmt: off - points = [ # soma - (1, 'soma', 100, 0, 0, 30, -1), - (2, 'soma', 100, 15, 0, 28, 1), - (3, 'soma', 100, -15, 0, 30, 1), - # soma.L - (4, 'L' , 100+20/np.sqrt(2), 20/np.sqrt(2), 0, 8 , 1), - (5, 'L' , 100+40/np.sqrt(2), 40/np.sqrt(2), 0, 6 , 4), - (6, 'L' , 100+60/np.sqrt(2), 60/np.sqrt(2), 0, 4 , 5), - (7, 'L' , 100+80/np.sqrt(2), 80/np.sqrt(2), 0, 2 , 6), - (8, 'L' , 100+100/np.sqrt(2), 100/np.sqrt(2), 0, 0 , 7) - ] - # fmt: on - with pytest.raises(ValueError): - Morphology.from_points(points) - - # Inconsistent coordinates - # fmt: off - points = [ # soma - (1, 'soma', 100, 0, 0, 30, -1), - (2, 'soma', 100, 15, 0, 30, 1), - (3, 'soma', 100, -16, 0, 30, 1), - # soma.L - (4, 'L', 100 + 20 / np.sqrt(2), 20 / np.sqrt(2), 0, 8, 1), - (5, 'L', 100 + 40 / np.sqrt(2), 40 / np.sqrt(2), 0, 6, 4), - (6, 'L', 100 + 60 / np.sqrt(2), 60 / np.sqrt(2), 0, 4, 5), - (7, 'L', 100 + 80 / np.sqrt(2), 80 / np.sqrt(2), 0, 2, 6), - (8, 'L', 100 + 100 / np.sqrt(2), 100 / np.sqrt(2), 0, 0, 7) - ] - # fmt: on - with pytest.raises(ValueError): - Morphology.from_points(points) - - -@pytest.mark.codegen_independent -def test_tree_soma_from_points_somas_not_at_start(): - # A single soma in the middle (ununusal but possible) - points = [ # dendrite - (1, "dend", 100, 0, 0, 10, -1), - (2, "dend", 100, 0, 0, 10, 1), - (3, "dend", 100, 0, 0, 10, 2), - (4, "soma", 100, 0, 0, 30, 3), - (5, "dend2", 130, 0, 0, 10, 4), - (6, "dend2", 160, 0, 0, 10, 5), - ] - morpho = Morphology.from_points(points) - assert morpho.total_sections == 3 - assert morpho.total_compartments == 5 - - # Several somata (probably an error) - points = [ # dendrite - (1, "dend", 0, 0, 0, 10, -1), - (2, "dend", 30, 0, 0, 10, 1), - (3, "dend", 60, 0, 0, 10, 2), - (4, "soma", 90, 0, 0, 30, 3), - (5, "dend2", 120, 70, 0, 10, 4), - (6, "dend2", 150, 70, 0, 10, 5), - (7, "soma", 180, 40, 0, 30, 6), - ] - with catch_logs() as logs: - morpho = Morphology.from_points(points) - assert len(logs) == 1 - assert logs[0][1].endswith("soma_compartments") - assert morpho.total_sections == 4 - assert morpho.total_compartments == 6 - - -@pytest.mark.codegen_independent -def test_tree_soma_from_swc(): - swc_content = """ -# Test file -1 1 100 0 0 15 -1 -2 2 114.14213562373095 14.142135623730949 0 4 1 -3 2 128.2842712474619 28.284271247461898 0 3 2 -4 2 142.42640687119285 42.426406871192846 0 2 3 -5 2 156.5685424949238 56.568542494923797 0 1 4 -6 2 170.71067811865476 70.710678118654741 0 0 5 -7 2 107.07106781186548 -7.0710678118654746 0 2.5 1 -8 2 114.14213562373095 -14.142135623730949 0 2.5 7 -9 2 121.21320343559643 -21.213203435596423 0 2.5 8 -10 2 128.2842712474619 -28.284271247461898 0 2.5 9 -11 2 135.35533905932738 -35.35533905932737 0 2.5 10 -""" - tmp_filename = tempfile.mktemp("cable_morphology.swc") - with open(tmp_filename, "w") as f: - f.write(swc_content) - soma = Morphology.from_file(tmp_filename) - os.remove(tmp_filename) - _check_tree_soma(soma, coordinates=True, use_cylinders=False) - - -@pytest.mark.codegen_independent -def test_tree_soma_from_swc_3_point_soma(): - swc_content = """ -# Test file -1 1 100 0 0 15 -1 -2 1 100 15 0 15 1 -3 1 100 -15 0 15 1 -4 2 114.14213562373095 14.142135623730949 0 4 1 -5 2 128.2842712474619 28.284271247461898 0 3 4 -6 2 142.42640687119285 42.426406871192846 0 2 5 -7 2 156.5685424949238 56.568542494923797 0 1 6 -8 2 170.71067811865476 70.710678118654741 0 0 7 -9 2 107.07106781186548 -7.0710678118654746 0 2.5 1 -10 2 114.14213562373095 -14.142135623730949 0 2.5 9 -11 2 121.21320343559643 -21.213203435596423 0 2.5 10 -12 2 128.2842712474619 -28.284271247461898 0 2.5 11 -13 2 135.35533905932738 -35.35533905932737 0 2.5 12 -""" - tmp_filename = tempfile.mktemp("cable_morphology.swc") - with open(tmp_filename, "w") as f: - f.write(swc_content) - soma = Morphology.from_file(tmp_filename) - os.remove(tmp_filename) - _check_tree_soma(soma, coordinates=True, use_cylinders=False) - - -@pytest.mark.codegen_independent -def test_tree_soma_from_swc_3_point_soma_incorrect_points(): - swc_content = """ -# Test file -1 1 100 0 0 15 -1 -2 1 100 15 0 15 1 -3 1 100 -10 0 15 1 -4 2 114.14213562373095 14.142135623730949 0 4 1 -5 2 128.2842712474619 28.284271247461898 0 3 4 -6 2 142.42640687119285 42.426406871192846 0 2 5 -7 2 156.5685424949238 56.568542494923797 0 1 6 -8 2 170.71067811865476 70.710678118654741 0 0 7 -9 2 107.07106781186548 -7.0710678118654746 0 2.5 1 -10 2 114.14213562373095 -14.142135623730949 0 2.5 9 -11 2 121.21320343559643 -21.213203435596423 0 2.5 10 -12 2 128.2842712474619 -28.284271247461898 0 2.5 11 -13 2 135.35533905932738 -35.35533905932737 0 2.5 12 -""" - tmp_filename = tempfile.mktemp("cable_morphology.swc") - with open(tmp_filename, "w") as f: - f.write(swc_content) - with pytest.raises(ValueError, match=r".*radius is 15.000um.*"): - Morphology.from_file(tmp_filename) - os.remove(tmp_filename) - - -@pytest.mark.codegen_independent -def test_tree_soma_from_swc_3_point_soma_incorrect_radius(): - swc_content = """ -# Test file -1 1 100 0 0 15 -1 -2 1 100 15 0 10 1 -3 1 100 -15 0 15 1 -4 2 114.14213562373095 14.142135623730949 0 4 1 -5 2 128.2842712474619 28.284271247461898 0 3 4 -6 2 142.42640687119285 42.426406871192846 0 2 5 -7 2 156.5685424949238 56.568542494923797 0 1 6 -8 2 170.71067811865476 70.710678118654741 0 0 7 -9 2 107.07106781186548 -7.0710678118654746 0 2.5 1 -10 2 114.14213562373095 -14.142135623730949 0 2.5 9 -11 2 121.21320343559643 -21.213203435596423 0 2.5 10 -12 2 128.2842712474619 -28.284271247461898 0 2.5 11 -13 2 135.35533905932738 -35.35533905932737 0 2.5 12 -""" - tmp_filename = tempfile.mktemp("cable_morphology.swc") - with open(tmp_filename, "w") as f: - f.write(swc_content) - with pytest.raises(ValueError, match=r".*not all the diameters are identical.*"): - Morphology.from_file(tmp_filename) - os.remove(tmp_filename) - - -@pytest.mark.codegen_independent -def test_construction_incorrect_arguments(): - ### Morphology - dummy_self = Soma(10 * um) # To allow testing of Morphology.__init__ - with pytest.raises(TypeError): - Morphology.__init__(dummy_self, n=1.5) - with pytest.raises(ValueError): - Morphology.__init__(dummy_self, n=0) - with pytest.raises(TypeError): - Morphology.__init__(dummy_self, "filename.swc") - - ### Soma - with pytest.raises(DimensionMismatchError): - Soma(10) - with pytest.raises(TypeError): - Soma([10, 20] * um) - with pytest.raises(TypeError): - Soma(x=[10, 20] * um) - with pytest.raises(TypeError): - Soma(y=[10, 20] * um) - with pytest.raises(TypeError): - Soma(z=[10, 20] * um) - with pytest.raises(DimensionMismatchError): - Soma(x=10) - with pytest.raises(DimensionMismatchError): - Soma(y=10) - with pytest.raises(DimensionMismatchError): - Soma(z=10) - - ### Cylinder - # Diameter can only be single value - with pytest.raises(TypeError): - Cylinder(n=3, diameter=[10, 20] * um, length=100 * um) - with pytest.raises(TypeError): - Cylinder(n=3, diameter=[10, 20, 30] * um, length=100 * um) - with pytest.raises(TypeError): - Cylinder(n=3, diameter=np.ones(3, 2) * um, length=100 * um) - # Length can only be single value - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, length=[10, 20] * um) - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, length=[10, 20, 30] * um) - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, length=np.ones(3, 2) * um) - # Coordinates have to be two values - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, x=[10] * um) - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, x=[10, 20, 30] * um) - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, y=[10] * um) - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, y=[10, 20, 30] * um) - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, z=[10] * um) - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, z=[10, 20, 30] * um) - # Need either coordinates or lengths - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um) - # But not both - with pytest.raises(TypeError): - Cylinder(n=3, diameter=10 * um, length=30 * um, x=[0, 30] * um) - - ### Section - # Diameter have to be n+1 values - with pytest.raises(TypeError): - Section(n=3, diameter=10 * um, length=np.ones(3) * 10 * um) - with pytest.raises(TypeError): - Section(n=3, diameter=[10, 20, 30] * um, length=np.ones(3) * 10 * um) - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4, 2) * um, length=np.ones(3) * 10 * um) - # Length have to be n values - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um, length=10 * um) - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um, length=[10, 20] * um) - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um, length=np.ones(3, 2) * um) - # Coordinates have to be n+1 values - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um, x=10 * um) - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um, x=[10, 20, 30] * um) - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um, y=10 * um) - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um, y=[10, 20, 30] * um) - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um, z=10 * um) - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um, z=[10, 20, 30] * um) - # Need either coordinates or lengths - with pytest.raises(TypeError): - Section(n=3, diameter=np.ones(4) * 10 * um) - # But not both - with pytest.raises(TypeError): - Section( - n=3, - diameter=np.ones(4) * 10 * um, - length=[10, 20, 30] * um, - x=[0, 10, 20, 30] * um, - ) - - -@pytest.mark.codegen_independent -def test_from_points_long_chain(): - # in previous versions, this led to a recursion error - points = [(1, "soma", 0, 0, 0, 1, -1)] - for i in range(2, 10000): - points.append((i, "dend", 0 + i * 10, 0, 0, 5, i - 1)) - morph = Morphology.from_points(points) - assert ( - morph.total_compartments == 10000 - 1 - ) # compartments are in-between the points - assert morph.total_sections == 2 - - -@pytest.mark.codegen_independent -def test_from_points_minimal(): - points = [(1, "soma", 10, 20, 30, 30, -1)] - morph = Morphology.from_points(points) - assert morph.total_compartments == 1 - assert_allclose(morph.diameter, 30 * um) - assert_allclose(morph.x, 10 * um) - assert_allclose(morph.y, 20 * um) - assert_allclose(morph.z, 30 * um) - - -@pytest.mark.codegen_independent -def test_from_points_incorrect(): - # The coordinates should be identical to the previous test - # fmt: off - points = [ - (1, None, 0, 0, 0, 10, -1), - (2, None, 10, 0, 0, 10, 1), - (2, None, 20, 0, 0, 10, 2), - ] - points2 = [ - (1, None, 0, 0, 0, 10, -1), - (2, None, 10, 0, 0, 10, 1), - (3, None, 20, 0, 0, 10, 3), - ] - points3 = [ - (1, None, 0, 0, 0, 10, -1), - (2, None, 10, 0, 0, 10, 1), - (3, None, 20, 0, 0, 10, 4), - ] - points4 = [ - (1, 0, 0, 0, 10, -1), - (2, 10, 0, 0, 10, 1), - (3, 20, 0, 0, 10, 2), - ] - with pytest.raises(ValueError): - Morphology.from_points(points) - with pytest.raises(ValueError): - Morphology.from_points(points2) - with pytest.raises(ValueError): - Morphology.from_points(points3) - with pytest.raises(ValueError): - Morphology.from_points(points4) - - -@pytest.mark.codegen_independent -def test_subtree_deletion(): - soma = Soma(diameter=30 * um) - first_dendrite = Cylinder(n=5, diameter=5 * um, length=50 * um) - second_dendrite = Cylinder(n=5, diameter=5 * um, length=50 * um) - second_dendrite.L = Cylinder(n=5, diameter=5 * um, length=50 * um) - second_dendrite.R = Cylinder(n=5, diameter=5 * um, length=50 * um) - soma.dend1 = first_dendrite - soma.dend2 = second_dendrite - soma.dend3 = Cylinder(n=5, diameter=5 * um, length=50 * um) - soma.dend3.L = Cylinder(n=5, diameter=5 * um, length=50 * um) - soma.dend3.L.L = Cylinder(n=5, diameter=5 * um, length=50 * um) - - assert soma.total_compartments == 36 - - del soma.dend1 - assert soma.total_compartments == 31 - with pytest.raises(AttributeError): - soma.dend1 - with pytest.raises(AttributeError): - delattr(soma, "dend1") - with pytest.raises(AttributeError): - soma.__delitem__("dend1") - assert first_dendrite not in soma.children - - del soma["dend2"] - assert soma.total_compartments == 16 - with pytest.raises(AttributeError): - soma.dend2 - assert second_dendrite not in soma.children - - del soma.dend3.LL - assert soma.total_compartments == 11 - with pytest.raises(AttributeError): - soma.dend3.LL - with pytest.raises(AttributeError): - soma.dend3.L.L - - -@pytest.mark.codegen_independent -def test_subgroup_indices(): - morpho = Soma(diameter=30 * um) - morpho.L = Cylinder(length=10 * um, diameter=1 * um, n=10) - morpho.LL = Cylinder(length=5 * um, diameter=2 * um, n=5) - morpho.right = Cylinder(length=3 * um, diameter=1 * um, n=7) - - assert_equal(morpho.LL.indices[:], [11, 12, 13, 14, 15]) - assert_equal(morpho.L.indices[3 * um : 5 * um], [4, 5]) - assert_equal( - morpho.L.indices[3 * um : 5 * um], morpho.L[3 * um : 5 * um].indices[:] - ) - assert_equal(morpho.L.indices[: 5 * um], [1, 2, 3, 4, 5]) - assert_equal(morpho.L.indices[3 * um :], [4, 5, 6, 7, 8, 9, 10]) - assert_equal(morpho.L.indices[3.5 * um], 4) - assert_equal(morpho.L.indices[3 * um], 4) - assert_equal(morpho.L.indices[3.9 * um], 4) - assert_equal(morpho.L.indices[3], 4) - assert_equal(morpho.L.indices[-1], 10) - assert_equal(morpho.L.indices[3:5], [4, 5]) - assert_equal(morpho.L.indices[3:], [4, 5, 6, 7, 8, 9, 10]) - assert_equal(morpho.L.indices[:5], [1, 2, 3, 4, 5]) - - -@pytest.mark.codegen_independent -def test_subgroup_attributes(): - morpho = Soma(diameter=30 * um) - morpho.L = Cylinder(length=10 * um, diameter=1 * um, n=10) - morpho.LL = Cylinder(x=[0, 5] * um, diameter=2 * um, n=5) - morpho.right = Cylinder(length=3 * um, diameter=1 * um, n=7) - - # # Getting a single compartment by index - assert_allclose(morpho.L[2].area, morpho.L.area[2]) - assert_allclose(morpho.L[2].volume, morpho.L.volume[2]) - assert_allclose(morpho.L[2].length, morpho.L.length[2]) - assert_allclose(morpho.L[2].r_length_1, morpho.L.r_length_1[2]) - assert_allclose(morpho.L[2].r_length_2, morpho.L.r_length_2[2]) - assert_allclose(morpho.L[2].distance, morpho.L.distance[2]) - assert_allclose(morpho.L[2].diameter, morpho.L.diameter[2]) - assert morpho.L[2].x is None - assert morpho.L[2].y is None - assert morpho.L[2].z is None - assert morpho.L[2].start_x is None - assert morpho.L[2].start_y is None - assert morpho.L[2].start_z is None - assert morpho.L[2].end_x is None - assert morpho.L[2].end_y is None - assert morpho.L[2].end_z is None - - # # Getting a single compartment by position - assert_allclose(morpho.LL[1.5 * um].area, morpho.LL.area[1]) - assert_allclose(morpho.LL[1.5 * um].volume, morpho.LL.volume[1]) - assert_allclose(morpho.LL[1.5 * um].length, morpho.LL.length[1]) - assert_allclose(morpho.LL[1.5 * um].r_length_1, morpho.LL.r_length_1[1]) - assert_allclose(morpho.LL[1.5 * um].r_length_2, morpho.LL.r_length_2[1]) - assert_allclose(morpho.LL[1.5 * um].distance, morpho.LL.distance[1]) - assert_allclose(morpho.LL[1.5 * um].diameter, morpho.LL.diameter[1]) - assert_allclose(morpho.LL[1.5 * um].x, morpho.LL.x[1]) - assert_allclose(morpho.LL[1.5 * um].y, morpho.LL.y[1]) - assert_allclose(morpho.LL[1.5 * um].z, morpho.LL.z[1]) - assert_allclose(morpho.LL[1.5 * um].start_x, morpho.LL.start_x[1]) - assert_allclose(morpho.LL[1.5 * um].start_y, morpho.LL.start_y[1]) - assert_allclose(morpho.LL[1.5 * um].start_z, morpho.LL.start_z[1]) - assert_allclose(morpho.LL[1.5 * um].end_x, morpho.LL.end_x[1]) - assert_allclose(morpho.LL[1.5 * um].end_y, morpho.LL.end_y[1]) - assert_allclose(morpho.LL[1.5 * um].end_z, morpho.LL.end_z[1]) - - # Getting several compartments by indices - assert_allclose(morpho.right[3:6].area, morpho.right.area[3:6]) - assert_allclose(morpho.right[3:6].volume, morpho.right.volume[3:6]) - assert_allclose(morpho.right[3:6].length, morpho.right.length[3:6]) - assert_allclose(morpho.right[3:6].r_length_1, morpho.right.r_length_1[3:6]) - assert_allclose(morpho.right[3:6].r_length_2, morpho.right.r_length_2[3:6]) - assert_allclose(morpho.right[3:6].distance, morpho.right.distance[3:6]) - assert_allclose(morpho.right[3:6].diameter, morpho.right.diameter[3:6]) - assert morpho.right[3:6].x is None - assert morpho.right[3:6].y is None - assert morpho.right[3:6].z is None - assert morpho.right[3:6].start_x is None - assert morpho.right[3:6].start_y is None - assert morpho.right[3:6].start_z is None - assert morpho.right[3:6].end_x is None - assert morpho.right[3:6].end_y is None - assert morpho.right[3:6].end_z is None - - # Getting several compartments by position - assert_allclose(morpho.L[3 * um : 5 * um].distance, [3.5, 4.5] * um) - assert_allclose(morpho.L[3.5 * um : 4.5 * um].distance, [3.5, 4.5] * um) - - -@pytest.mark.codegen_independent -def test_subgroup_incorrect(): - # Incorrect indexing - morpho = Soma(diameter=30 * um) - morpho.L = Cylinder(length=10 * um, diameter=1 * um, n=10) - morpho.LL = Cylinder(length=5 * um, diameter=2 * um, n=5) - morpho.right = Cylinder(length=3 * um, diameter=1 * um, n=7) - - # Non-existing branch - with pytest.raises(AttributeError): - morpho.axon - - # Incorrect indexing - # wrong units or mixing units - with pytest.raises(TypeError): - morpho.L[3 * second : 5 * second] - with pytest.raises(TypeError): - morpho.L[3.4:5.3] - with pytest.raises(TypeError): - morpho.L[3 : 5 * um] - with pytest.raises(TypeError): - morpho.L[3 * um : 5] - # providing a step - with pytest.raises(TypeError): - morpho.L[3 * um : 5 * um : 2 * um] - with pytest.raises(TypeError): - morpho.L[3:5:2] - # incorrect type - with pytest.raises(TypeError): - morpho.L[object()] - # out of range - with pytest.raises(IndexError): - morpho.L[-10 * um] - with pytest.raises(IndexError): - morpho.L[15 * um] - with pytest.raises(IndexError): - morpho.L[10] - - -@pytest.mark.codegen_independent -def test_topology(): - soma = Soma(diameter=30 * um) - soma.L = Section( - n=5, diameter=[10, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - soma.R = Cylinder(n=10, diameter=5 * um, length=50 * um) - soma.R.left = Cylinder(n=10, diameter=2.5 * um, length=50 * um) - soma.R.right = Section( - n=5, diameter=[5, 4, 3, 2, 1, 0] * um, length=np.ones(5) * 10 * um - ) - - str_topology = str(soma.topology()) - lines = [l for l in str_topology.split("\n") if len(l.strip())] - assert len(lines) == 5 # one line for each section - for line, name in zip(lines, ["root", ".L", ".R", ".R.left", "R.right"]): - assert name in line - - -@pytest.mark.codegen_independent -def test_copy_section_soma(): - soma = Soma(diameter=30 * um) - soma_copy = soma.copy_section() - assert soma_copy.diameter[0] == 30 * um - assert soma_copy.x is None - assert soma_copy.y is None - assert soma_copy.z is None - assert soma_copy.type == "soma" - - soma = Soma(diameter=30 * um, x=5 * um, z=-10 * um) - soma_copy = soma.copy_section() - assert soma_copy.diameter[0] == 30 * um - assert_allclose(soma_copy.x[0], 5 * um) - assert_allclose(soma_copy.y[0], 0 * um) - assert_allclose(soma_copy.z[0], -10 * um) - assert soma_copy.type == "soma" - - -@pytest.mark.codegen_independent -def test_copy_section_section(): - # No coordinates - sec = Section( - diameter=[10, 5, 4, 3, 2, 1] * um, n=5, length=np.ones(5) * 10 * um, type="dend" - ) - sec_copy = sec.copy_section() - assert_allclose(sec_copy.start_diameter, sec.start_diameter) - assert_allclose(sec_copy.end_diameter, sec.end_diameter) - assert_allclose(sec_copy.length, sec.length) - assert sec_copy.n == sec.n - assert sec_copy.x is None - assert sec_copy.y is None - assert sec_copy.z is None - assert sec_copy.type == "dend" - - # With coordinates - sec = Section( - diameter=[10, 5, 4, 3, 2, 1] * um, - n=5, - x=[0, 1, 2, 3, 4, 5] * um, - y=[0, -1, -2, -3, -4, -5] * um, - ) - sec_copy = sec.copy_section() - assert_allclose(sec_copy.start_diameter, sec.start_diameter) - assert_allclose(sec_copy.end_diameter, sec.end_diameter) - assert_allclose(sec_copy.length, sec.length) - assert sec_copy.n == sec.n - assert_allclose(sec_copy.x, sec.x) - assert_allclose(sec_copy.y, sec.y) - assert_allclose(sec_copy.z, sec.z) - - assert sec_copy.type is None - - -@pytest.mark.codegen_independent -def test_copy_section_cylinder(): - # no coordinates - sec = Section( - diameter=[10, 5, 4, 3, 2, 1] * um, n=5, length=np.ones(5) * 20 * um, type="dend" - ) - sec_copy = sec.copy_section() - assert_allclose(sec_copy.end_diameter, sec.end_diameter) - assert_allclose(sec_copy.length, sec.length) - assert sec_copy.n == sec.n - assert sec_copy.x is None - assert sec_copy.y is None - assert sec_copy.z is None - assert sec_copy.type == "dend" - - # with coordinates - sec = Section( - diameter=[10, 5, 4, 3, 2, 1] * um, - n=5, - x=[0, 1, 2, 3, 4, 5] * um, - y=[0, -1, -2, -3, -4, -5] * um, - ) - sec_copy = sec.copy_section() - assert_allclose(sec_copy.end_diameter, sec.end_diameter) - assert_allclose(sec_copy.length, sec.length) - assert sec_copy.n == sec.n - assert_allclose(sec_copy.x, sec.x) - assert_allclose(sec_copy.y, sec.y) - assert_allclose(sec_copy.z, sec.z) - - assert sec_copy.type is None - - -def _check_length_coord_consistency(morph_with_coords): - if not isinstance(morph_with_coords, Soma): - vectors = np.diff(morph_with_coords.coordinates, axis=0) - calculated_length = np.sqrt(np.sum(vectors**2, axis=1)) - assert_allclose(calculated_length, morph_with_coords.length) - for child in morph_with_coords.children: - _check_length_coord_consistency(child) - - -@pytest.mark.codegen_independent -def test_generate_coordinates_deterministic(): - morph = Soma(diameter=30 * um) - morph.L = Section( - n=5, diameter=[10, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - morph.R = Cylinder(n=10, diameter=5 * um, length=50 * um) - morph.R.left = Cylinder(n=10, diameter=2.5 * um, length=50 * um) - morph.R.right = Section( - n=5, diameter=[5, 4, 3, 2, 1, 0] * um, length=np.ones(5) * 10 * um - ) - - morph_with_coords = morph.generate_coordinates() - assert morph_with_coords.total_compartments == morph.total_compartments - assert morph_with_coords.total_sections == morph.total_sections - - for new, old in [ - (morph_with_coords, morph), - (morph_with_coords.L, morph.L), - (morph_with_coords.R, morph.R), - (morph_with_coords.R.left, morph.R.left), - (morph_with_coords.R.right, morph.R.right), - ]: - assert new.n == old.n - assert_allclose(new.length, old.length) - assert_allclose(new.diameter, old.diameter) - # The morphology should be in the x/y plane - assert_equal(new.z, 0 * um) - - _check_length_coord_consistency(morph_with_coords) - - -@pytest.mark.codegen_independent -def test_generate_coordinates_random_sections(): - morph = Soma(diameter=30 * um) - morph.L = Section( - n=5, diameter=[10, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - morph.R = Cylinder(n=10, diameter=5 * um, length=50 * um) - morph.R.left = Cylinder(n=10, diameter=2.5 * um, length=50 * um) - morph.R.right = Section( - n=5, diameter=[5, 4, 3, 2, 1, 0] * um, length=np.ones(5) * 10 * um - ) - - morph_with_coords = morph.generate_coordinates(section_randomness=25) - assert morph_with_coords.total_compartments == morph.total_compartments - assert morph_with_coords.total_sections == morph.total_sections - - for new, old in [ - (morph_with_coords, morph), - (morph_with_coords.L, morph.L), - (morph_with_coords.R, morph.R), - (morph_with_coords.R.left, morph.R.left), - (morph_with_coords.R.right, morph.R.right), - ]: - assert new.n == old.n - assert_allclose(new.length, old.length) - assert_allclose(new.diameter, old.diameter) - - _check_length_coord_consistency(morph_with_coords) - - -@pytest.mark.codegen_independent -def test_generate_coordinates_random_compartments(): - morph = Soma(diameter=30 * um) - morph.L = Section( - n=5, diameter=[10, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - morph.R = Cylinder(n=10, diameter=5 * um, length=50 * um) - morph.R.left = Cylinder(n=10, diameter=2.5 * um, length=50 * um) - morph.R.right = Section( - n=5, diameter=[5, 4, 3, 2, 1, 0] * um, length=np.ones(5) * 10 * um - ) - - morph_with_coords = morph.generate_coordinates(compartment_randomness=15) - assert morph_with_coords.total_compartments == morph.total_compartments - assert morph_with_coords.total_sections == morph.total_sections - - for new, old in [ - (morph_with_coords, morph), - (morph_with_coords.L, morph.L), - (morph_with_coords.R, morph.R), - (morph_with_coords.R.left, morph.R.left), - (morph_with_coords.R.right, morph.R.right), - ]: - assert new.n == old.n - assert_allclose(new.length, old.length) - assert_allclose(new.diameter, old.diameter) - - _check_length_coord_consistency(morph_with_coords) - - -@pytest.mark.codegen_independent -def test_generate_coordinates_random_all(): - morph = Soma(diameter=30 * um) - morph.L = Section( - n=5, diameter=[10, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - morph.R = Cylinder(n=10, diameter=5 * um, length=50 * um) - morph.R.left = Cylinder(n=10, diameter=2.5 * um, length=50 * um) - morph.R.right = Section( - n=5, diameter=[5, 4, 3, 2, 1, 0] * um, length=np.ones(5) * 10 * um - ) - - morph_with_coords = morph.generate_coordinates( - section_randomness=25, compartment_randomness=15 - ) - assert morph_with_coords.total_compartments == morph.total_compartments - assert morph_with_coords.total_sections == morph.total_sections - - for new, old in [ - (morph_with_coords, morph), - (morph_with_coords.L, morph.L), - (morph_with_coords.R, morph.R), - (morph_with_coords.R.left, morph.R.left), - (morph_with_coords.R.right, morph.R.right), - ]: - assert new.n == old.n - assert_allclose(new.length, old.length) - assert_allclose(new.diameter, old.diameter) - - _check_length_coord_consistency(morph_with_coords) - - -@pytest.mark.codegen_independent -def test_generate_coordinates_no_overwrite(): - morph = Soma(diameter=30 * um) - morph.L = Section( - n=5, diameter=[10, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - morph.R = Cylinder(n=10, diameter=5 * um, length=50 * um) - morph.R.left = Cylinder(n=10, diameter=2.5 * um, length=50 * um) - morph.R.right = Section( - n=5, diameter=[5, 4, 3, 2, 1, 0] * um, length=np.ones(5) * 10 * um - ) - - morph_with_coords = morph.generate_coordinates(compartment_randomness=15) - # This should not change anything because the morphology already has coordinates! - morph_with_coords2 = morph_with_coords.generate_coordinates( - section_randomness=25, compartment_randomness=15 - ) - - for new, old in [ - (morph_with_coords2, morph_with_coords), - (morph_with_coords2.L, morph_with_coords.L), - (morph_with_coords2.R, morph_with_coords.R), - (morph_with_coords2.R.left, morph_with_coords.R.left), - (morph_with_coords2.R.right, morph_with_coords.R.right), - ]: - assert new.n == old.n - assert_allclose(new.length, old.length) - assert_allclose(new.diameter, old.diameter) - assert_allclose(new.x, old.x) - assert_allclose(new.y, old.y) - assert_allclose(new.z, old.z) - - -@pytest.mark.codegen_independent -def test_generate_coordinates_overwrite(): - morph = Soma(diameter=30 * um) - morph.L = Section( - n=5, diameter=[10, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - morph.R = Cylinder(n=10, diameter=5 * um, length=50 * um) - morph.R.left = Cylinder(n=10, diameter=2.5 * um, length=50 * um) - morph.R.right = Section( - n=5, diameter=[5, 4, 3, 2, 1, 0] * um, length=np.ones(5) * 10 * um - ) - - morph_with_coords = morph.generate_coordinates(compartment_randomness=15) - # This should change things since we explicitly ask for it - morph_with_coords2 = morph_with_coords.generate_coordinates( - section_randomness=25, compartment_randomness=15, overwrite_existing=True - ) - - for new, old in [ # ignore the root compartment - (morph_with_coords2.L, morph_with_coords.L), - (morph_with_coords2.R, morph_with_coords.R), - (morph_with_coords2.R.left, morph_with_coords.R.left), - (morph_with_coords2.R.right, morph_with_coords.R.right), - ]: - assert new.n == old.n - assert_allclose(new.length, old.length) - assert_allclose(new.diameter, old.diameter) - assert all(np.abs(new.x - old.x) > 0) - assert all(np.abs(new.y - old.y) > 0) - assert all(np.abs(new.z - old.z) > 0) - - _check_length_coord_consistency(morph_with_coords2) - - -@pytest.mark.codegen_independent -def test_generate_coordinates_mixed_overwrite(): - morph = Soma(diameter=30 * um) - morph.L = Section( - n=5, diameter=[10, 8, 6, 4, 2, 0] * um, length=np.ones(5) * 20 * um - ) # tapering truncated cones - morph.R = Cylinder(n=10, diameter=5 * um, length=50 * um) - morph_with_coords = morph.generate_coordinates( - section_randomness=25, compartment_randomness=15 - ) - # The following just returns a copy, as all coordinates are already - # specified - morph_copy = morph_with_coords.generate_coordinates() - - # Add new sections that do not yet have coordinates - morph_with_coords.R.left = Cylinder(n=10, diameter=2.5 * um, length=50 * um) - morph_with_coords.R.right = Section( - n=5, diameter=[5, 4, 3, 2, 1, 0] * um, length=np.ones(5) * 10 * um - ) - - # This should change things since we explicitly ask for it - morph_with_coords2 = morph_with_coords.generate_coordinates( - section_randomness=25, compartment_randomness=15 - ) - - for new, old in [ - (morph_with_coords2, morph_with_coords), - (morph_with_coords2.L, morph_with_coords.L), - (morph_with_coords2.R, morph_with_coords.R), - ]: - assert new.n == old.n - assert_allclose(new.length, old.length) - assert_allclose(new.diameter, old.diameter) - assert_allclose(new.x, old.x) - assert_allclose(new.y, old.y) - assert_allclose(new.z, old.z) - - assert morph_with_coords.R.left.x is None - assert len(morph_with_coords2.R.left.x) == morph_with_coords2.R.left.n - - _check_length_coord_consistency(morph_with_coords2) - - -@pytest.mark.codegen_independent -def test_str_repr(): - # A very basic test, make sure that the str/repr functions return - # something and do not raise an error - for morph in [ - Soma(diameter=30 * um), - Soma(diameter=30 * um, x=5 * um, y=10 * um), - Cylinder(n=5, diameter=10 * um, length=50 * um), - Cylinder(n=5, diameter=10 * um, x=[0, 50] * um), - Section( - n=5, diameter=[2.5, 5, 10, 5, 10, 5] * um, length=[10, 20, 5, 5, 10] * um - ), - Section( - n=5, diameter=[2.5, 5, 10, 5, 10, 5] * um, x=[0, 10, 30, 35, 40, 50] * um - ), - ]: - assert len(repr(morph)) > 0 - assert len(str(morph)) > 0 - morph = Soma(30 * um) - assert len(repr(morph.children)) > 0 - assert len(str(morph.children)) > 0 - morph.axon = Cylinder(1 * um, n=10, length=100 * um) - morph.dend = Cylinder(1 * um, n=10, length=50 * um) - assert len(repr(morph.children)) > 0 - assert len(str(morph.children)) > 0 - - -if __name__ == "__main__": - test_attributes_soma() - test_attributes_soma_coordinates() - test_attributes_cylinder() - test_attributes_cylinder_coordinates() - test_attributes_section() - test_attributes_section_coordinates_single() - test_attributes_section_coordinates_all() - test_tree_cables_schematic() - test_tree_cables_coordinates() - test_tree_cables_from_points() - test_tree_cables_from_swc() - test_tree_soma_schematic() - test_tree_soma_coordinates() - test_tree_soma_from_points() - test_tree_soma_from_points_3_point_soma() - test_tree_soma_from_points_3_point_soma_incorrect() - test_tree_soma_from_swc() - test_tree_soma_from_swc_3_point_soma() - test_construction_incorrect_arguments() - test_from_points_minimal() - test_from_points_incorrect() - test_subtree_deletion() - test_subgroup_indices() - test_subgroup_attributes() - test_subgroup_incorrect() - test_topology() - test_copy_section_soma() - test_copy_section_section() - test_copy_section_cylinder() - test_generate_coordinates_deterministic() - test_generate_coordinates_random_sections() - test_generate_coordinates_random_compartments() - test_generate_coordinates_random_all() - test_generate_coordinates_no_overwrite() - test_generate_coordinates_overwrite() - test_generate_coordinates_mixed_overwrite() - test_str_repr() diff --git a/brian2/tests/test_namespaces.py b/brian2/tests/test_namespaces.py deleted file mode 100644 index a00cfd245..000000000 --- a/brian2/tests/test_namespaces.py +++ /dev/null @@ -1,163 +0,0 @@ -import uuid - -import numpy -import pytest -import sympy - -from brian2.core.namespace import get_local_namespace -from brian2.core.variables import Constant -from brian2.groups.group import Group -from brian2.units import second, volt -from brian2.units.fundamentalunits import Unit -from brian2.units.stdunits import Hz, ms, mV -from brian2.units.unitsafefunctions import exp, log, sin -from brian2.utils.logger import catch_logs - - -# a simple Group for testing -class SimpleGroup(Group): - def __init__(self, variables, namespace=None): - self.variables = variables - # We use a unique name to get repeated warnings - Group.__init__( - self, - namespace=namespace, - name=f"simplegroup_{str(uuid.uuid4()).replace('-', '_')}", - ) - - -def _assert_one_warning(l): - assert len(l) == 1, f"expected one warning got {len(l)}" - assert l[0][0] == "WARNING", f"expected a WARNING, got {l[0][0]} instead" - - -@pytest.mark.codegen_independent -def test_default_content(): - """ - Test that the default namespace contains standard units and functions. - """ - group = Group() - # Units - assert group._resolve("second", {}).get_value_with_unit() == second - assert group._resolve("volt", {}).get_value_with_unit() == volt - assert group._resolve("ms", {}).get_value_with_unit() == ms - assert group._resolve("Hz", {}).get_value_with_unit() == Hz - assert group._resolve("mV", {}).get_value_with_unit() == mV - - # Functions - assert group._resolve("sin", {}).pyfunc == sin - assert group._resolve("log", {}).pyfunc == log - assert group._resolve("exp", {}).pyfunc == exp - - # Constants - assert group._resolve("e", {}).sympy_obj == sympy.E - assert group._resolve("e", {}).get_value() == numpy.e - assert group._resolve("pi", {}).sympy_obj == sympy.pi - assert group._resolve("pi", {}).get_value() == numpy.pi - assert group._resolve("inf", {}).sympy_obj == sympy.oo - assert group._resolve("inf", {}).get_value() == numpy.inf - - -@pytest.mark.codegen_independent -def test_explicit_namespace(): - """Test resolution with an explicitly provided namespace""" - group = SimpleGroup(namespace={"variable": 42}, variables={}) - - # Explicitly provided - with catch_logs() as l: - assert group._resolve("variable", {}).get_value_with_unit() == 42 - assert len(l) == 0 - - # Value from an explicit run namespace - with catch_logs() as l: - assert ( - group._resolve( - "yet_another_var", run_namespace={"yet_another_var": 17} - ).get_value_with_unit() - == 17 - ) - assert len(l) == 0 - - -@pytest.mark.codegen_independent -def test_errors(): - # No explicit namespace - group = SimpleGroup(namespace=None, variables={}) - with pytest.raises(KeyError): - group._resolve("nonexisting_variable", {}) - - # Empty explicit namespace - group = SimpleGroup(namespace={}, variables={}) - with pytest.raises(KeyError): - group._resolve("nonexisting_variable", {}) - - # Illegal name - with pytest.raises(ValueError): - SimpleGroup(namespace={"_illegal": 3.0}, variables={}) - - -@pytest.mark.codegen_independent -def test_resolution(): - # implicit namespace - tau = 10 * ms - group = SimpleGroup(namespace=None, variables={}) - namespace = get_local_namespace(level=0) - resolved = group.resolve_all( - ["tau", "ms"], namespace, user_identifiers=["tau", "ms"] - ) - assert len(resolved) == 2 - assert isinstance(resolved, dict) - assert resolved["tau"].get_value_with_unit() == tau - assert resolved["ms"].get_value_with_unit() == ms - del tau - - # explicit namespace - group = SimpleGroup(namespace={"tau": 20 * ms}, variables={}) - namespace = get_local_namespace(level=0) - resolved = group.resolve_all(["tau", "ms"], namespace, ["tau", "ms"]) - assert len(resolved) == 2 - assert resolved["tau"].get_value_with_unit() == 20 * ms - - -@pytest.mark.codegen_independent -def test_warning(): - from brian2.core.functions import DEFAULT_FUNCTIONS - from brian2.units.stdunits import cm as brian_cm - - # Name in external namespace clashes with unit/function name - exp = 23 - cm = 42 - group = SimpleGroup(namespace=None, variables={}) - namespace = get_local_namespace(level=0) - with catch_logs() as l: - resolved = group.resolve_all(["exp"], namespace)["exp"] - assert resolved == DEFAULT_FUNCTIONS["exp"] - assert len(l) == 1, f"got warnings: {str(l)}" - assert l[0][1].endswith(".resolution_conflict") - with catch_logs() as l: - resolved = group.resolve_all(["cm"], namespace)["cm"] - assert resolved.get_value_with_unit() == brian_cm - assert len(l) == 1, f"got warnings: {str(l)}" - assert l[0][1].endswith(".resolution_conflict") - - -@pytest.mark.codegen_independent -def test_warning_internal_variables(): - group1 = SimpleGroup(namespace=None, variables={"N": Constant("N", 5)}) - group2 = SimpleGroup(namespace=None, variables={"N": Constant("N", 7)}) - with catch_logs() as l: - group1.resolve_all(["N"], run_namespace={"N": 5}) # should not raise a warning - assert len(l) == 0, f"got warnings: {str(l)}" - with catch_logs() as l: - group2.resolve_all(["N"], run_namespace={"N": 5}) # should raise a warning - assert len(l) == 1, f"got warnings: {str(l)}" - assert l[0][1].endswith(".resolution_conflict") - - -if __name__ == "__main__": - test_default_content() - test_explicit_namespace() - test_errors() - test_resolution() - test_warning() - test_warning_internal_variables() diff --git a/brian2/tests/test_network.py b/brian2/tests/test_network.py deleted file mode 100644 index 57351c33e..000000000 --- a/brian2/tests/test_network.py +++ /dev/null @@ -1,1865 +0,0 @@ -import copy -import logging -import os -import tempfile -import uuid -import weakref - -import numpy as np -import pytest -from numpy.testing import assert_array_equal, assert_equal - -from brian2 import ( - BrianLogger, - BrianObject, - Clock, - Hz, - MagicError, - MagicNetwork, - Network, - NetworkOperation, - NeuronGroup, - PoissonGroup, - PopulationRateMonitor, - Quantity, - SpikeGeneratorGroup, - SpikeMonitor, - StateMonitor, - Synapses, - TimedArray, - collect, - defaultclock, - magic_network, - ms, - network_operation, - prefs, - profiling_summary, - restore, - run, - second, - start_scope, - stop, - store, - us, -) -from brian2.core.network import schedule_propagation_offset, scheduling_summary -from brian2.devices.device import ( - Device, - RuntimeDevice, - all_devices, - device, - get_device, - reinit_and_delete, - reset_device, - set_device, -) -from brian2.tests.utils import assert_allclose -from brian2.utils.logger import catch_logs - - -@pytest.mark.codegen_independent -def test_incorrect_network_use(): - """Test some wrong uses of `Network` and `MagicNetwork`""" - with pytest.raises(TypeError): - Network(name="mynet", anotherkwd="does not exist") - with pytest.raises(TypeError): - Network("not a BrianObject") - net = Network() - with pytest.raises(TypeError): - net.add("not a BrianObject") - with pytest.raises(ValueError): - MagicNetwork() - G = NeuronGroup(10, "v:1") - net.add(G) - with pytest.raises(TypeError): - net.remove(object()) - with pytest.raises(MagicError): - magic_network.add(G) - with pytest.raises(MagicError): - magic_network.remove(G) - - -@pytest.mark.codegen_independent -def test_network_contains(): - """ - Test `Network.__contains__`. - """ - G = NeuronGroup(1, "v:1", name="mygroup") - net = Network(G) - assert "mygroup" in net - assert "neurongroup" not in net - - -@pytest.mark.codegen_independent -def test_empty_network(): - # Check that an empty network functions correctly - net = Network() - net.run(1 * second) - - -class Counter(BrianObject): - add_to_magic_network = True - - def __init__(self, **kwds): - super().__init__(**kwds) - self.count = 0 - self.state = {"state": 0} - - def get_states(self, *args, **kwds): - return dict(self.state) - - def set_states(self, values, *args, **kwds): - for k, v in values.items(): - self.state[k] = v - - def run(self): - self.count += 1 - - -class CounterWithContained(Counter): - add_to_magic_network = True - - def __init__(self, **kwds): - super().__init__(**kwds) - self.sub_counter = Counter() - self.contained_objects.append(self.sub_counter) - - -@pytest.mark.codegen_independent -def test_network_single_object(): - # Check that a network with a single object functions correctly - x = Counter() - net = Network(x) - net.run(1 * ms) - assert_equal(x.count, 10) - - -@pytest.mark.codegen_independent -def test_network_two_objects(): - # Check that a network with two objects and the same clock function correctly - x = Counter(order=5) - y = Counter(order=6) - net = Network() - net.add([x, [y]]) # check that a funky way of adding objects work correctly - net.run(1 * ms) - assert_equal(len(net.objects), 2) - assert_equal(x.count, 10) - assert_equal(y.count, 10) - - -@pytest.mark.codegen_independent -def test_network_from_dict(): - # Check that a network from a dictionary works - x = Counter() - y = Counter() - d = dict(a=x, b=y) - net = Network() - net.add(d) - net.run(1 * ms) - assert_equal(len(net.objects), 2) - assert_equal(x.count, 10) - assert_equal(y.count, 10) - - -class NameLister(BrianObject): - add_to_magic_network = True - updates = [] - - def __init__(self, **kwds): - super().__init__(**kwds) - - def run(self): - NameLister.updates.append(self.name) - - -@pytest.mark.codegen_independent -def test_network_different_clocks(): - NameLister.updates[:] = [] - # Check that a network with two different clocks functions correctly - x = NameLister(name="x", dt=0.1 * ms, order=0) - y = NameLister(name="y", dt=1 * ms, order=1) - net = Network(x, y) - net.run(100 * second + defaultclock.dt, report="text") - updates = "".join(NameLister.updates)[2:] # ignore the first time step - assert updates == ("xxxxxxxxxxy" * 100000) - - -@pytest.mark.codegen_independent -def test_network_different_when(): - # Check that a network with different when attributes functions correctly - NameLister.updates[:] = [] - x = NameLister(name="x", when="start") - y = NameLister(name="y", when="end") - net = Network(x, y) - net.run(0.3 * ms) - assert_equal("".join(NameLister.updates), "xyxyxy") - - -@pytest.mark.codegen_independent -def test_network_default_schedule(): - net = Network() - assert net.schedule == [ - "start", - "groups", - "thresholds", - "synapses", - "resets", - "end", - ] - # Set the preference and check that the change is taken into account - prefs.core.network.default_schedule = list( - reversed(["start", "groups", "thresholds", "synapses", "resets", "end"]) - ) - assert net.schedule == list( - reversed(["start", "groups", "thresholds", "synapses", "resets", "end"]) - ) - - -@pytest.mark.codegen_independent -def test_network_schedule_change(): - # Check that a changed schedule is taken into account correctly - NameLister.updates[:] = [] - x = NameLister(name="x", when="thresholds") - y = NameLister(name="y", when="resets") - net = Network(x, y) - net.run(0.3 * ms) - assert_equal("".join(NameLister.updates), "xyxyxy") - NameLister.updates[:] = [] - net.schedule = ["start", "groups", "synapses", "resets", "thresholds", "end"] - net.run(0.3 * ms) - assert_equal("".join(NameLister.updates), "yxyxyx") - - -@pytest.mark.codegen_independent -def test_network_before_after_schedule(): - # Test that before... and after... slot names can be used - NameLister.updates[:] = [] - x = NameLister(name="x", when="before_resets") - y = NameLister(name="y", when="after_thresholds") - net = Network(x, y) - net.schedule = ["thresholds", "resets", "end"] - net.run(0.3 * ms) - assert_equal("".join(NameLister.updates), "yxyxyx") - - -@pytest.mark.codegen_independent -def test_network_custom_slots(): - # Check that custom slots can be inserted into the schedule - NameLister.updates[:] = [] - x = NameLister(name="x", when="thresholds") - y = NameLister(name="y", when="in_between") - z = NameLister(name="z", when="resets") - net = Network(x, y, z) - net.schedule = [ - "start", - "groups", - "thresholds", - "in_between", - "synapses", - "resets", - "end", - ] - net.run(0.3 * ms) - assert_equal("".join(NameLister.updates), "xyzxyzxyz") - - -@pytest.mark.codegen_independent -def test_network_incorrect_schedule(): - # Test that incorrect arguments provided to schedule raise errors - net = Network() - # net.schedule = object() - with pytest.raises(TypeError): - setattr(net, "schedule", object()) - # net.schedule = 1 - with pytest.raises(TypeError): - setattr(net, "schedule", 1) - # net.schedule = {'slot1', 'slot2'} - with pytest.raises(TypeError): - setattr(net, "schedule", {"slot1", "slot2"}) - # net.schedule = ['slot', 1] - with pytest.raises(TypeError): - setattr(net, "schedule", ["slot", 1]) - # net.schedule = ['start', 'after_start'] - with pytest.raises(ValueError): - setattr(net, "schedule", ["start", "after_start"]) - # net.schedule = ['before_start', 'start'] - with pytest.raises(ValueError): - setattr(net, "schedule", ["before_start", "start"]) - - -@pytest.mark.codegen_independent -def test_schedule_warning(): - previous_device = get_device() - from uuid import uuid4 - - # TestDevice1 supports arbitrary schedules, TestDevice2 does not - class TestDevice1(Device): - # These functions are needed during the setup of the defaultclock - def get_value(self, var): - return np.array([0.0001]) - - def add_array(self, var): - pass - - def init_with_zeros(self, var, dtype): - pass - - def fill_with_array(self, var, arr): - pass - - class TestDevice2(TestDevice1): - def __init__(self): - super().__init__() - self.network_schedule = [ - "start", - "groups", - "synapses", - "thresholds", - "resets", - "end", - ] - - # Unique names are important for getting the warnings again for multiple - # runs of the test suite - name1 = f"testdevice_{str(uuid4())}" - name2 = f"testdevice_{str(uuid4())}" - all_devices[name1] = TestDevice1() - all_devices[name2] = TestDevice2() - - set_device(name1) - assert schedule_propagation_offset() == 0 * ms - net = Network() - assert schedule_propagation_offset(net) == 0 * ms - - # Any schedule should work - net.schedule = list(reversed(net.schedule)) - with catch_logs() as l: - net.run(0 * ms) - assert len(l) == 0, "did not expect a warning" - - assert schedule_propagation_offset(net) == defaultclock.dt - - set_device(name2) - assert schedule_propagation_offset() == defaultclock.dt - - # Using the correct schedule should work - net.schedule = ["start", "groups", "synapses", "thresholds", "resets", "end"] - with catch_logs() as l: - net.run(0 * ms) - assert len(l) == 0, "did not expect a warning" - assert schedule_propagation_offset(net) == defaultclock.dt - - # Using another (e.g. the default) schedule should raise a warning - net.schedule = None - with catch_logs() as l: - net.run(0 * ms) - assert len(l) == 1 and l[0][1].endswith("schedule_conflict") - reset_device(previous_device) - - -@pytest.mark.codegen_independent -def test_scheduling_summary_magic(): - basename = f"name{str(uuid.uuid4()).replace('-', '_')}" - group = NeuronGroup( - 10, "dv/dt = -v/(10*ms) : 1", threshold="v>1", reset="v=1", name=basename - ) - group.run_regularly("v = rand()", dt=defaultclock.dt * 10, when="end") - state_mon = StateMonitor(group, "v", record=True, name=f"{basename}_sm") - inactive_state_mon = StateMonitor( - group, "v", record=True, name=f"{basename}_sm_ia", when="after_end" - ) - inactive_state_mon.active = False - summary_before = scheduling_summary() - - assert [entry.name for entry in summary_before.entries] == [ - f"{basename}_sm", - f"{basename}_stateupdater", - f"{basename}_spike_thresholder", - f"{basename}_spike_resetter", - f"{basename}_run_regularly", - f"{basename}_sm_ia", - ] - assert [entry.when for entry in summary_before.entries] == [ - "start", - "groups", - "thresholds", - "resets", - "end", - "after_end", - ] - assert [entry.dt for entry in summary_before.entries] == [ - defaultclock.dt, - defaultclock.dt, - defaultclock.dt, - defaultclock.dt, - defaultclock.dt * 10, - defaultclock.dt, - ] - assert [entry.active for entry in summary_before.entries] == [ - True, - True, - True, - True, - True, - False, - ] - assert len(str(summary_before)) - assert len(summary_before._repr_html_()) - run(defaultclock.dt) - summary_after = scheduling_summary() - assert str(summary_after) == str(summary_before) - assert summary_after._repr_html_() == summary_before._repr_html_() - - -@pytest.mark.codegen_independent -def test_scheduling_summary(): - basename = f"name{str(uuid.uuid4()).replace('-', '_')}" - group = NeuronGroup( - 10, "dv/dt = -v/(10*ms) : 1", threshold="v>1", reset="v=1", name=basename - ) - group.run_regularly("v = rand()", dt=defaultclock.dt * 10, when="end") - state_mon = StateMonitor(group, "v", record=True, name=f"{basename}_sm") - inactive_state_mon = StateMonitor( - group, "v", record=True, name=f"{basename}_sm_ia", when="after_end" - ) - inactive_state_mon.active = False - - @network_operation(name=f"{basename}_net_op", when="before_end") - def foo(): - pass - - net = Network(group, state_mon, inactive_state_mon, foo) - summary_before = scheduling_summary(net) - assert [entry.name for entry in summary_before.entries] == [ - f"{basename}_sm", - f"{basename}_stateupdater", - f"{basename}_spike_thresholder", - f"{basename}_spike_resetter", - f"{basename}_net_op", - f"{basename}_run_regularly", - f"{basename}_sm_ia", - ] - assert [entry.when for entry in summary_before.entries] == [ - "start", - "groups", - "thresholds", - "resets", - "before_end", - "end", - "after_end", - ] - assert [entry.dt for entry in summary_before.entries] == [ - defaultclock.dt, - defaultclock.dt, - defaultclock.dt, - defaultclock.dt, - defaultclock.dt, - defaultclock.dt * 10, - defaultclock.dt, - ] - assert [entry.active for entry in summary_before.entries] == [ - True, - True, - True, - True, - True, - True, - False, - ] - assert len(str(summary_before)) - assert len(summary_before._repr_html_()) - run(defaultclock.dt) - summary_after = scheduling_summary(net) - assert str(summary_after) == str(summary_before) - assert summary_after._repr_html_() == summary_before._repr_html_() - - -class Preparer(BrianObject): - add_to_magic_network = True - - def __init__(self, **kwds): - super().__init__(**kwds) - self.did_reinit = False - self.did_pre_run = False - self.did_post_run = False - - def reinit(self, level=0): - self.did_reinit = True - - def before_run(self, namespace=None, level=0): - self.did_pre_run = True - - def after_run(self): - self.did_post_run = True - - -@pytest.mark.codegen_independent -def test_magic_network(): - # test that magic network functions correctly - x = Counter() - y = Counter() - run(10 * ms) - assert_equal(x.count, 100) - assert_equal(y.count, 100) - - assert len(repr(magic_network)) # very basic test... - assert len(str(magic_network)) # very basic test... - - -class Stopper(BrianObject): - add_to_magic_network = True - - def __init__(self, stoptime, stopfunc, **kwds): - super().__init__(**kwds) - self.stoptime = stoptime - self.stopfunc = stopfunc - - def run(self): - self.stoptime -= 1 - if self.stoptime <= 0: - self.stopfunc() - - -@pytest.mark.codegen_independent -def test_network_stop(): - # test that Network.stop and global stop() work correctly - net = Network() - x = Stopper(10, net.stop) - net.add(x) - net.run(10 * ms) - assert_equal(defaultclock.t, 1 * ms) - - x = Stopper(10, stop) - net = Network(x) - net.run(10 * ms) - assert_equal(defaultclock.t, 1 * ms) - - -@pytest.mark.codegen_independent -def test_network_operations(): - # test NetworkOperation and network_operation - seq = [] - - def f1(): - seq.append("a") - - op1 = NetworkOperation(f1, when="start", order=1) - - @network_operation - def f2(): - seq.append("b") - - @network_operation(when="end", order=1) - def f3(): - seq.append("c") - - # In complex frameworks, network operations might be object methods that - # access some common data - class Container: - def __init__(self): - self.g1_data = "B" - self.g2_data = "C" - - def g1(self): - seq.append(self.g1_data) - - def g2(self): - seq.append(self.g2_data) - - c = Container() - c_op1 = NetworkOperation(c.g1) - c_op2 = NetworkOperation(c.g2, when="end", order=1) - net = Network(op1, f2, f3, c_op1, c_op2) - net.run(1 * ms) - - assert_equal("".join(seq), "bBacC" * 10) - - -@pytest.mark.codegen_independent -def test_incorrect_network_operations(): - # Network operations with more than one argument are not allowed - def func(x, y): - pass - - class Container: - def func(self, x, y): - pass - - c = Container() - - with pytest.raises(TypeError): - NetworkOperation(func) - with pytest.raises(TypeError): - NetworkOperation(c.func) - - # Incorrect use of @network_operation -- it does not work on an instance - # method - try: - - class Container: - @network_operation - def func(self): - pass - - raise AssertionError("expected a TypeError") - except TypeError: - pass # this is what we expected - - -@pytest.mark.codegen_independent -def test_network_operations_name(): - # test NetworkOperation name input - seq = [] - - def f1(): - seq.append("a") - - def f2(): - seq.append("b") - - def x(): - pass - - op = NetworkOperation(lambda: x) - assert_equal(op.name, "networkoperation") - - op0 = NetworkOperation(lambda: x, name="named_network") - assert_equal(op0.name, "named_network") - - op1 = NetworkOperation(f1, name="networkoperation_1") - op2 = NetworkOperation(f1, name="networkoperation_3") - op3 = NetworkOperation(f2, name="networkoperation_2") - - net = Network(op1, op2, op3) - net.run(1 * ms) - - assert_equal("".join(seq), "aba" * 10) - - -@pytest.mark.codegen_independent -def test_network_active_flag(): - # test that the BrianObject.active flag is recognised by Network.run - x = Counter() - y = Counter() - y.active = False - run(1 * ms) - assert_equal(x.count, 10) - assert_equal(y.count, 0) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_spikes_after_deactivating(): - # Make sure that a spike in the last time step gets cleared. See #1319 - always_spike = NeuronGroup(1, "", threshold="True", reset="") - spike_mon = SpikeMonitor(always_spike) - run(defaultclock.dt) - always_spike.active = False - run(defaultclock.dt) - device.build(direct_call=False, **device.build_options) - assert_equal(spike_mon.t[:], [0] * second) - - -@pytest.mark.codegen_independent -def test_network_t(): - # test that Network.t works as expected - x = Counter(dt=1 * ms) - y = Counter(dt=2 * ms) - net = Network(x, y) - net.run(4 * ms) - assert_equal(net.t, 4 * ms) - net.run(1 * ms) - assert_equal(net.t, 5 * ms) - assert_equal(x.count, 5) - assert_equal(y.count, 3) - net.run(0.5 * ms) # should only update x - assert_equal(net.t, 5.5 * ms) - assert_equal(x.count, 6) - assert_equal(y.count, 3) - net.run(0.5 * ms) # shouldn't do anything - assert_equal(net.t, 6 * ms) - assert_equal(x.count, 6) - assert_equal(y.count, 3) - net.run(0.5 * ms) # should update x and y - assert_equal(net.t, 6.5 * ms) - assert_equal(x.count, 7) - assert_equal(y.count, 4) - - del x, y, net - - # now test with magic run - x = Counter(dt=1 * ms) - y = Counter(dt=2 * ms) - run(4 * ms) - assert_equal(magic_network.t, 4 * ms) - assert_equal(x.count, 4) - assert_equal(y.count, 2) - run(4 * ms) - assert_equal(magic_network.t, 8 * ms) - assert_equal(x.count, 8) - assert_equal(y.count, 4) - run(1 * ms) - assert_equal(magic_network.t, 9 * ms) - assert_equal(x.count, 9) - assert_equal(y.count, 5) - - -@pytest.mark.codegen_independent -def test_incorrect_dt_defaultclock(): - defaultclock.dt = 0.5 * ms - G = NeuronGroup(1, "dv/dt = -v / (10*ms) : 1") - net = Network(G) - net.run(0.5 * ms) - defaultclock.dt = 1 * ms - with pytest.raises(ValueError): - net.run(0 * ms) - - -@pytest.mark.codegen_independent -def test_incorrect_dt_custom_clock(): - clock = Clock(dt=0.5 * ms) - G = NeuronGroup(1, "dv/dt = -v / (10*ms) : 1", clock=clock) - net = Network(G) - net.run(0.5 * ms) - clock.dt = 1 * ms - with pytest.raises(ValueError): - net.run(0 * ms) - - -@pytest.mark.codegen_independent -def test_network_remove(): - x = Counter() - y = Counter() - net = Network(x, y) - net.remove(y) - net.run(1 * ms) - assert_equal(x.count, 10) - assert_equal(y.count, 0) - # the relevance of this test is when we use weakref.proxy objects in - # Network.objects, we should be able to add and remove these from - # the Network just as much as the original objects - # TODO: Does this test make sense now that Network does not store weak - # references by default? - for obj in copy.copy(net.objects): - net.remove(obj) - net.run(1 * ms) - assert_equal(x.count, 10) - assert_equal(y.count, 0) - - -@pytest.mark.codegen_independent -def test_contained_objects(): - obj = CounterWithContained() - net = Network(obj) - # The contained object should not be stored explicitly - assert len(net.objects) == 1 - # It should be accessible via the network interface, though - assert len(net) == 2 - net.run(defaultclock.dt) - - # The contained object should be executed during the run - assert obj.count == 1 - assert obj.sub_counter.count == 1 - - # contained objects should be accessible via get_states/set_states - states = net.get_states() - assert len(states) == 2 - assert set(states.keys()) == {obj.name, obj.sub_counter.name} - assert set(states[obj.name].keys()) == {"state"} - assert set(states[obj.sub_counter.name].keys()) == {"state"} - net[obj.name].set_states({"state": 1}) - net[obj.sub_counter.name].set_states({"state": 2}) - - net.remove(obj) - assert len(net.objects) == 0 - assert len(net) == 0 - assert len(net.get_states()) == 0 - net.run(defaultclock.dt) - assert obj.count == 1 - assert obj.sub_counter.count == 1 - - -class NoninvalidatingCounter(Counter): - add_to_magic_network = True - invalidates_magic_network = False - - -@pytest.mark.codegen_independent -def test_invalid_magic_network(): - x = Counter() - run(1 * ms) - assert_equal(x.count, 10) - y = Counter() - try: - run(1 * ms) - raise AssertionError("Expected a MagicError") - except MagicError: - pass # this is expected - del x, y - x = Counter() - run(1 * ms) - y = NoninvalidatingCounter() - run(1 * ms) - assert_equal(x.count, 20) - assert_equal(y.count, 10) - del y - run(1 * ms) - assert_equal(x.count, 30) - del x - x = Counter() - run(1 * ms) - assert_equal(magic_network.t, 1 * ms) - del x - x = Counter() - y = Counter() - run(1 * ms) - assert_equal(x.count, 10) - assert_equal(y.count, 10) - - -@pytest.mark.codegen_independent -def test_multiple_networks_invalid(): - x = Counter() - net = Network(x) - net.run(1 * ms) - try: - run(1 * ms) - raise AssertionError("Expected a RuntimeError") - except RuntimeError: - pass # this is expected - - try: - net2 = Network(x) - raise AssertionError("Expected a RuntimeError") - except RuntimeError: - pass # this is expected - - -@pytest.mark.codegen_independent -def test_magic_weak_reference(): - """ - Test that holding a weak reference to an object does not make it get - simulated.""" - - G1 = NeuronGroup(1, "v:1") - - # this object should not be included - G2 = weakref.ref(NeuronGroup(1, "v:1")) - - with catch_logs(log_level=logging.DEBUG) as l: - run(1 * ms) - # Check the debug messages for the number of included objects - magic_objects = [ - msg[2] for msg in l if msg[1] == "brian2.core.magic.magic_objects" - ][0] - assert "2 objects" in magic_objects, f"Unexpected log message: {magic_objects}" - - -@pytest.mark.codegen_independent -def test_magic_unused_object(): - """Test that creating unused objects does not affect the magic system.""" - - def create_group(): - # Produce two objects but return only one - G1 = NeuronGroup(1, "v:1") # no Thresholder or Resetter - G2 = NeuronGroup(1, "v:1") # This object should be garbage collected - return G1 - - G = create_group() - with catch_logs(log_level=logging.DEBUG) as l: - run(1 * ms) - - # Check the debug messages for the number of included objects - magic_objects = [ - msg[2] for msg in l if msg[1] == "brian2.core.magic.magic_objects" - ][0] - assert "2 objects" in magic_objects, f"Unexpected log message: {magic_objects}" - - -@pytest.mark.codegen_independent -def test_network_access(): - x = Counter(name="counter") - net = Network(x) - assert len(net) == 1 - assert len(repr(net)) # very basic test... - assert len(str(net)) # very basic test... - - # accessing objects - assert net["counter"] is x - with pytest.raises(TypeError): - net[123] - with pytest.raises(TypeError): - net[1:3] - with pytest.raises(KeyError): - net["non-existing"] - - objects = [obj for obj in net] - assert set(objects) == set(net.objects) - - # deleting objects - del net["counter"] - with pytest.raises(TypeError): - net.__delitem__(123) - with pytest.raises(TypeError): - net.__delitem__(slice(1, 3)) - with pytest.raises(KeyError): - net.__delitem__("counter") - - -@pytest.mark.codegen_independent -def test_dependency_check(): - def create_net(): - G = NeuronGroup(10, "v: 1", threshold="False") - dependent_objects = [ - StateMonitor(G, "v", record=True), - SpikeMonitor(G), - PopulationRateMonitor(G), - Synapses(G, G, on_pre="v+=1"), - ] - return dependent_objects - - dependent_objects = create_net() - # Trying to simulate the monitors/synapses without the group should fail - for obj in dependent_objects: - with pytest.raises(ValueError): - Network(obj).run(0 * ms) - - # simulation with a magic network should work when we have an explicit - # reference to one of the objects, but the object should be inactive and - # we should get a warning - assert all(obj.active for obj in dependent_objects) - for obj in dependent_objects: # obj is our explicit reference - with catch_logs() as l: - run(0 * ms) - dependency_warnings = [ - msg[2] for msg in l if msg[1] == "brian2.core.magic.dependency_warning" - ] - assert len(dependency_warnings) == 1 - assert not obj.active - - -def test_loop(): - """ - Somewhat realistic test with a loop of magic networks - """ - - def run_simulation(): - G = NeuronGroup(10, "dv/dt = -v / (10*ms) : 1", reset="v=0", threshold="v>1") - G.v = np.linspace(0, 1, 10) - run(1 * ms) - # We return potentially problematic references to a VariableView - return G.v - - # First run - with catch_logs(log_level=logging.DEBUG) as l: - v = run_simulation() - assert v[0] == 0 and 0 < v[-1] < 1 - # Check the debug messages for the number of included objects - magic_objects = [ - msg[2] for msg in l if msg[1] == "brian2.core.magic.magic_objects" - ][0] - assert "4 objects" in magic_objects - - # Second run - with catch_logs(log_level=logging.DEBUG) as l: - v = run_simulation() - assert v[0] == 0 and 0 < v[-1] < 1 - # Check the debug messages for the number of included objects - magic_objects = [ - msg[2] for msg in l if msg[1] == "brian2.core.magic.magic_objects" - ][0] - assert "4 objects" in magic_objects - - -@pytest.mark.codegen_independent -def test_magic_collect(): - """ - Make sure all expected objects are collected in a magic network - """ - P = PoissonGroup(10, rates=100 * Hz) - G = NeuronGroup(10, "v:1", threshold="False") - S = Synapses(G, G, "") - - state_mon = StateMonitor(G, "v", record=True) - spike_mon = SpikeMonitor(G) - rate_mon = PopulationRateMonitor(G) - - objects = collect() - - assert len(objects) == 6, f"expected {int(6)} objects, got {len(objects)}" - - -import sys -from contextlib import contextmanager -from io import BytesIO, StringIO - - -@contextmanager -def captured_output(): - new_out, new_err = StringIO(), StringIO() - old_out, old_err = sys.stdout, sys.stderr - try: - sys.stdout, sys.stderr = new_out, new_err - yield sys.stdout, sys.stderr - finally: - sys.stdout, sys.stderr = old_out, old_err - - -@pytest.mark.codegen_independent -def test_progress_report(): - """ - Very basic test of progress reporting - """ - G = NeuronGroup(1, "") - net = Network(G) - - # No output - with captured_output() as (out, err): - net.run(1 * ms, report=None) - # There should be at least two lines of output - out, err = out.getvalue(), err.getvalue() - assert len(out) == 0 and len(err) == 0 - - with captured_output() as (out, err): - net.run(1 * ms) - # There should be at least two lines of output - out, err = out.getvalue(), err.getvalue() - assert len(out) == 0 and len(err) == 0 - - # Progress should go to stdout - with captured_output() as (out, err): - net.run(1 * ms, report="text") - # There should be at least two lines of output - out, err = out.getvalue(), err.getvalue() - assert len(out.split("\n")) >= 2 and len(err) == 0 - - with captured_output() as (out, err): - net.run(1 * ms, report="stdout") - # There should be at least two lines of output - out, err = out.getvalue(), err.getvalue() - assert len(out.split("\n")) >= 2 and len(err) == 0 - - # Progress should go to stderr - with captured_output() as (out, err): - net.run(1 * ms, report="stderr") - # There should be at least two lines of output - out, err = out.getvalue(), err.getvalue() - assert len(err.split("\n")) >= 2 and len(out) == 0 - - # Custom function - calls = [] - - def capture_progress(elapsed, complete, start, duration): - calls.append((elapsed, complete, start, duration)) - - with captured_output() as (out, err): - net.run(1 * ms, report=capture_progress) - out, err = out.getvalue(), err.getvalue() - - assert len(err) == 0 and len(out) == 0 - # There should be at least a call for the start and the end - assert len(calls) >= 2 and calls[0][1] == 0.0 and calls[-1][1] == 1.0 - - -@pytest.mark.codegen_independent -def test_progress_report_incorrect(): - """ - Test wrong use of the report option - """ - G = NeuronGroup(1, "") - net = Network(G) - with pytest.raises(ValueError): - net.run(1 * ms, report="unknown") - with pytest.raises(TypeError): - net.run(1 * ms, report=object()) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_multiple_runs_report_standalone(): - group = NeuronGroup(1, "dv/dt = 1*Hz : 1") - run(1 * ms, report="text") - run(1 * ms) - device.build(direct_call=False, **device.build_options) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_multiple_runs_report_standalone_2(): - group = NeuronGroup(1, "dv/dt = 1*Hz : 1") - run(1 * ms) - run(1 * ms, report="text") - device.build(direct_call=False, **device.build_options) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_multiple_runs_report_standalone_3(): - group = NeuronGroup(1, "dv/dt = 1*Hz : 1") - run(1 * ms, report="text") - run(1 * ms, report="text") - device.build(direct_call=False, **device.build_options) - - -# This tests a specific limitation of the C++ standalone mode (cannot mix -# multiple report methods) -@pytest.mark.cpp_standalone -@pytest.mark.standalone_only -def test_multiple_runs_report_standalone_incorrect(): - set_device("cpp_standalone", build_on_run=False) - group = NeuronGroup(1, "dv/dt = 1*Hz : 1") - run(1 * ms, report="text") - with pytest.raises(NotImplementedError): - run(1 * ms, report="stderr") - - -@pytest.mark.codegen_independent -def test_store_restore(): - source = NeuronGroup( - 10, - """dv/dt = rates : 1 - rates : Hz""", - threshold="v>1", - reset="v=0", - ) - source.rates = "i*100*Hz" - target = NeuronGroup(10, "v:1") - synapses = Synapses(source, target, model="w:1", on_pre="v+=w") - synapses.connect(j="i") - synapses.w = "i*1.0" - synapses.delay = "i*ms" - state_mon = StateMonitor(target, "v", record=True) - spike_mon = SpikeMonitor(source) - net = Network(source, target, synapses, state_mon, spike_mon) - net.store() # default time slot - net.run(10 * ms) - net.store("second") - net.run(10 * ms) - v_values = state_mon.v[:, :] - spike_indices, spike_times = spike_mon.it_ - net.restore() # Go back to beginning - assert defaultclock.t == 0 * ms - assert net.t == 0 * ms - net.run(20 * ms) - assert_equal(v_values, state_mon.v[:, :]) - assert_equal(spike_indices, spike_mon.i[:]) - assert_equal(spike_times, spike_mon.t_[:]) - - # Go back to middle - net.restore("second") - assert defaultclock.t == 10 * ms - assert net.t == 10 * ms - net.run(10 * ms) - assert_equal(v_values, state_mon.v[:, :]) - assert_equal(spike_indices, spike_mon.i[:]) - assert_equal(spike_times, spike_mon.t_[:]) - - # Go back again (see github issue #681) - net.restore("second") - assert defaultclock.t == 10 * ms - assert net.t == 10 * ms - - -@pytest.mark.codegen_independent -def test_store_restore_to_file(): - filename = tempfile.mktemp(suffix="state", prefix="brian_test") - source = NeuronGroup( - 10, - """ - dv/dt = rates : 1 - rates : Hz - """, - threshold="v>1", - reset="v=0", - ) - source.rates = "i*100*Hz" - target = NeuronGroup(10, "v:1") - synapses = Synapses(source, target, model="w:1", on_pre="v+=w") - synapses.connect(j="i") - synapses.w = "i*1.0" - synapses.delay = "i*ms" - state_mon = StateMonitor(target, "v", record=True) - spike_mon = SpikeMonitor(source) - net = Network(source, target, synapses, state_mon, spike_mon) - net.store(filename=filename) # default time slot - net.run(10 * ms) - net.store("second", filename=filename) - net.run(10 * ms) - v_values = state_mon.v[:, :] - spike_indices, spike_times = spike_mon.it_ - - net.restore(filename=filename) # Go back to beginning - assert defaultclock.t == 0 * ms - assert net.t == 0 * ms - net.run(20 * ms) - assert_equal(v_values, state_mon.v[:, :]) - assert_equal(spike_indices, spike_mon.i[:]) - assert_equal(spike_times, spike_mon.t_[:]) - - # Go back to middle - net.restore("second", filename=filename) - assert defaultclock.t == 10 * ms - assert net.t == 10 * ms - net.run(10 * ms) - assert_equal(v_values, state_mon.v[:, :]) - assert_equal(spike_indices, spike_mon.i[:]) - assert_equal(spike_times, spike_mon.t_[:]) - try: - os.remove(filename) - except OSError: - pass - - -@pytest.mark.codegen_independent -def test_store_restore_to_file_new_objects(): - # A more realistic test where the objects are completely re-created - filename = tempfile.mktemp(suffix="state", prefix="brian_test") - - def create_net(): - # Use a bit of a complicated spike and connection pattern with - # heterogeneous delays - - # Note: it is important that all objects have the same name, this would - # be the case if we were running this in a new process but to not rely - # on garbage collection we will assign explicit names here - source = SpikeGeneratorGroup( - 5, - np.arange(5).repeat(3), - [3, 4, 1, 2, 3, 7, 5, 4, 1, 0, 5, 9, 7, 8, 9] * ms, - name="source", - ) - target = NeuronGroup(10, "v:1", name="target") - synapses = Synapses(source, target, model="w:1", on_pre="v+=w", name="synapses") - synapses.connect("j>=i") - synapses.w = "i*1.0 + j*2.0" - synapses.delay = "(5-i)*ms" - state_mon = StateMonitor(target, "v", record=True, name="statemonitor") - input_spikes = SpikeMonitor(source, name="input_spikes") - net = Network(source, target, synapses, state_mon, input_spikes) - return net - - net = create_net() - net.store(filename=filename) # default time slot - net.run(5 * ms) - net.store("second", filename=filename) - net.run(5 * ms) - input_spike_indices = np.array(net["input_spikes"].i) - input_spike_times = Quantity(net["input_spikes"].t, copy=True) - v_values_full_sim = Quantity(net["statemonitor"].v[:, :], copy=True) - - net = create_net() - net.restore(filename=filename) # Go back to beginning - net.run(10 * ms) - assert_equal(input_spike_indices, net["input_spikes"].i) - assert_equal(input_spike_times, net["input_spikes"].t) - assert_equal(v_values_full_sim, net["statemonitor"].v[:, :]) - - net = create_net() - net.restore("second", filename=filename) # Go back to middle - net.run(5 * ms) - assert_equal(input_spike_indices, net["input_spikes"].i) - assert_equal(input_spike_times, net["input_spikes"].t) - assert_equal(v_values_full_sim, net["statemonitor"].v[:, :]) - - try: - os.remove(filename) - except OSError: - pass - - -@pytest.mark.codegen_independent -def test_store_restore_to_file_differing_nets(): - # Check that the store/restore mechanism is not used with differing - # networks - filename = tempfile.mktemp(suffix="state", prefix="brian_test") - - source = SpikeGeneratorGroup( - 5, [0, 1, 2, 3, 4], [0, 1, 2, 3, 4] * ms, name="source_1" - ) - mon = SpikeMonitor(source, name="monitor") - net = Network(source, mon) - net.store(filename=filename) - - source_2 = SpikeGeneratorGroup( - 5, [0, 1, 2, 3, 4], [0, 1, 2, 3, 4] * ms, name="source_2" - ) - mon = SpikeMonitor(source_2, name="monitor") - net = Network(source_2, mon) - with pytest.raises(KeyError): - net.restore(filename=filename) - - net = Network(source) # Without the monitor - with pytest.raises(KeyError): - net.restore(filename=filename) - - -@pytest.mark.codegen_independent -def test_store_restore_magic(): - source = NeuronGroup( - 10, - """ - dv/dt = rates : 1 - rates : Hz - """, - threshold="v>1", - reset="v=0", - ) - source.rates = "i*100*Hz" - target = NeuronGroup(10, "v:1") - synapses = Synapses(source, target, model="w:1", on_pre="v+=w") - synapses.connect(j="i") - synapses.w = "i*1.0" - synapses.delay = "i*ms" - state_mon = StateMonitor(target, "v", record=True) - spike_mon = SpikeMonitor(source) - store() # default time slot - run(10 * ms) - store("second") - run(10 * ms) - v_values = state_mon.v[:, :] - spike_indices, spike_times = spike_mon.it_ - - restore() # Go back to beginning - assert magic_network.t == 0 * ms - run(20 * ms) - assert defaultclock.t == 20 * ms - assert_equal(v_values, state_mon.v[:, :]) - assert_equal(spike_indices, spike_mon.i[:]) - assert_equal(spike_times, spike_mon.t_[:]) - - # Go back to middle - restore("second") - assert magic_network.t == 10 * ms - run(10 * ms) - assert defaultclock.t == 20 * ms - assert_equal(v_values, state_mon.v[:, :]) - assert_equal(spike_indices, spike_mon.i[:]) - assert_equal(spike_times, spike_mon.t_[:]) - - -@pytest.mark.codegen_independent -def test_store_restore_magic_to_file(): - filename = tempfile.mktemp(suffix="state", prefix="brian_test") - source = NeuronGroup( - 10, - """ - dv/dt = rates : 1 - rates : Hz - """, - threshold="v>1", - reset="v=0", - ) - source.rates = "i*100*Hz" - target = NeuronGroup(10, "v:1") - synapses = Synapses(source, target, model="w:1", on_pre="v+=w") - synapses.connect(j="i") - synapses.w = "i*1.0" - synapses.delay = "i*ms" - state_mon = StateMonitor(target, "v", record=True) - spike_mon = SpikeMonitor(source) - store(filename=filename) # default time slot - run(10 * ms) - store("second", filename=filename) - run(10 * ms) - v_values = state_mon.v[:, :] - spike_indices, spike_times = spike_mon.it_ - - restore(filename=filename) # Go back to beginning - assert magic_network.t == 0 * ms - run(20 * ms) - assert defaultclock.t == 20 * ms - assert_equal(v_values, state_mon.v[:, :]) - assert_equal(spike_indices, spike_mon.i[:]) - assert_equal(spike_times, spike_mon.t_[:]) - - # Go back to middle - restore("second", filename=filename) - assert magic_network.t == 10 * ms - run(10 * ms) - assert defaultclock.t == 20 * ms - assert_equal(v_values, state_mon.v[:, :]) - assert_equal(spike_indices, spike_mon.i[:]) - assert_equal(spike_times, spike_mon.t_[:]) - try: - os.remove(filename) - except OSError: - pass - - -@pytest.mark.codegen_independent -def test_store_restore_spikequeue(): - # See github issue #938 - source = SpikeGeneratorGroup(1, [0], [0] * ms) - target = NeuronGroup(1, "v : 1") - conn = Synapses(source, target, on_pre="v += 1", delay=2 * defaultclock.dt) - conn.connect() - run(defaultclock.dt) # Spike is not yet delivered - store() - run(2 * defaultclock.dt) - assert target.v[0] == 1 - restore() - run(2 * defaultclock.dt) - assert target.v[0] == 1 - restore() - run(2 * defaultclock.dt) - assert target.v[0] == 1 - - -@pytest.mark.skipif( - not isinstance(get_device(), RuntimeDevice), - reason="Getting/setting random number state only supported for runtime device.", -) -def test_restore_with_random_state(): - group = NeuronGroup(10, "dv/dt = -v/(10*ms) + (10*ms)**-0.5*xi : 1", method="euler") - group.v = "rand()" - mon = StateMonitor(group, "v", record=True) - store() - run(10 * ms) - old_v = np.array(group.v) - - restore() # Random state is not restored - run(10 * ms) - assert np.var(old_v - group.v) > 0 # very basic test for difference - - restore(restore_random_state=True) # Random state is restored - run(10 * ms) - assert_equal(old_v, group.v) - - -@pytest.mark.codegen_independent -def test_store_restore_restore_synapses(): - group = NeuronGroup(10, "x : 1", threshold="False", reset="", name="group") - synapses = Synapses(group, group, on_pre="x += 1", name="synapses") - synapses.connect(i=[1, 3, 5], j=[6, 4, 2]) - net = Network(group, synapses) - - tmp_file = tempfile.mktemp() - net.store(filename=tmp_file) - - # clear up - del net - del synapses - del group - - # Recreate the network without connecting the synapses - group = NeuronGroup(10, "x: 1", threshold="False", reset="", name="group") - synapses = Synapses(group, group, "", on_pre="x += 1", name="synapses") - net = Network(group, synapses) - try: - net.restore(filename=tmp_file) - - assert len(synapses) == 3 - assert_array_equal(synapses.i, [1, 3, 5]) - assert_array_equal(synapses.j, [6, 4, 2]) - - # Tunning the network should not raise an error, despite the lack - # of Synapses.connect - net.run(0 * ms) - finally: - os.remove(tmp_file) - - -@pytest.mark.codegen_independent -def test_defaultclock_dt_changes(): - BrianLogger.suppress_name("resolution_conflict") - for dt in [0.1 * ms, 0.01 * ms, 0.5 * ms, 1 * ms, 3.3 * ms]: - defaultclock.dt = dt - G = NeuronGroup(1, "v:1") - mon = StateMonitor(G, "v", record=True) - net = Network(G, mon) - net.run(2 * dt) - assert_equal(mon.t[:], [0, dt / ms] * ms) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_dt_changes_between_runs(): - defaultclock.dt = 0.1 * ms - G = NeuronGroup(1, "v:1") - mon = StateMonitor(G, "v", record=True) - run(0.5 * ms) - defaultclock.dt = 0.5 * ms - run(0.5 * ms) - defaultclock.dt = 0.1 * ms - run(0.5 * ms) - device.build(direct_call=False, **device.build_options) - assert len(mon.t[:]) == 5 + 1 + 5 - assert_allclose( - mon.t[:], [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 1.0, 1.1, 1.2, 1.3, 1.4] * ms - ) - - -@pytest.mark.codegen_independent -def test_dt_restore(): - defaultclock.dt = 0.5 * ms - G = NeuronGroup(1, "dv/dt = -v/(10*ms) : 1") - mon = StateMonitor(G, "v", record=True) - net = Network(G, mon) - net.store() - - net.run(1 * ms) - assert_equal(mon.t[:], [0, 0.5] * ms) - defaultclock.dt = 1 * ms - net.run(2 * ms) - assert_equal(mon.t[:], [0, 0.5, 1, 2] * ms) - net.restore() - assert_equal(mon.t[:], []) - net.run(1 * ms) - assert defaultclock.dt == 0.5 * ms - assert_equal(mon.t[:], [0, 0.5] * ms) - - -@pytest.mark.codegen_independent -def test_continuation(): - defaultclock.dt = 1 * ms - G = NeuronGroup(1, "dv/dt = -v / (10*ms) : 1") - G.v = 1 - mon = StateMonitor(G, "v", record=True) - net = Network(G, mon) - net.run(2 * ms) - - # Run the same simulation but with two runs that use sub-dt run times - G2 = NeuronGroup(1, "dv/dt = -v / (10*ms) : 1") - G2.v = 1 - mon2 = StateMonitor(G2, "v", record=True) - net2 = Network(G2, mon2) - net2.run(0.5 * ms) - net2.run(1.5 * ms) - - assert_equal(mon.t[:], mon2.t[:]) - assert_equal(mon.v[:], mon2.v[:]) - - -@pytest.mark.codegen_independent -def test_get_set_states(): - G = NeuronGroup(10, "v:1", name="a_neurongroup") - G.v = "i" - net = Network(G) - states1 = net.get_states() - states2 = magic_network.get_states() - states3 = net.get_states(read_only_variables=False) - assert ( - set(states1.keys()) - == set(states2.keys()) - == set(states3.keys()) - == {"a_neurongroup"} - ) - assert ( - set(states1["a_neurongroup"].keys()) - == set(states2["a_neurongroup"].keys()) - == {"i", "dt", "N", "t", "v", "t_in_timesteps"} - ) - assert set(states3["a_neurongroup"]) == {"v"} - - # Try re-setting the state - G.v = 0 - net.set_states(states3) - assert_equal(G.v, np.arange(10)) - - -@pytest.mark.codegen_independent -def test_multiple_runs_defaultclock(): - defaultclock.dt = 0.1 * ms - G = NeuronGroup(1, "dv/dt = -v / (10*ms) : 1") - net = Network(G) - net.run(0.5 * ms) - - # The new dt is not compatible with the previous time but it should not - # raise an error because we start a new simulation at time 0 - defaultclock.dt = 1 * ms - G = NeuronGroup(1, "dv/dt = -v / (10*ms) : 1") - net = Network(G) - net.run(1 * ms) - - -@pytest.mark.codegen_independent -def test_multiple_runs_defaultclock_incorrect(): - defaultclock.dt = 0.1 * ms - G = NeuronGroup(1, "dv/dt = -v / (10*ms) : 1") - net = Network(G) - net.run(0.5 * ms) - - # The new dt is not compatible with the previous time since we cannot - # continue at 0.5ms with a dt of 1ms - defaultclock.dt = 1 * ms - with pytest.raises(ValueError): - net.run(1 * ms) - - -@pytest.mark.standalone_compatible -def test_profile(): - G = NeuronGroup( - 10, - "dv/dt = -v / (10*ms) : 1", - threshold="v>1", - reset="v=0", - name="profile_test", - ) - G.v = 1.1 - net = Network(G) - net.run(1 * ms, profile=True) - # The should be four simulated CodeObjects, one for the group and one each - # for state update, threshold and reset + 1 for the clock - info = net.profiling_info - info_dict = dict(info) - # Standalone does not include the NeuronGroup object (which is not doing - # anything during the run) in the profiling information, while runtime - # does - assert 3 <= len(info) <= 4 - assert len(info) == 3 or "profile_test" in info_dict - for obj in ["stateupdater", "spike_thresholder", "spike_resetter"]: - name = f"profile_test_{obj}" - assert name in info_dict or f"{name}_codeobject" in info_dict - assert all([t >= 0 * second for _, t in info]) - - -@pytest.mark.standalone_compatible -def test_profile_off(): - G = NeuronGroup( - 10, - "dv/dt = -v / (10*ms) : 1", - threshold="v>1", - reset="v=0", - name="profile_test", - ) - net = Network(G) - net.run(1 * ms, profile=False) - with pytest.raises(ValueError): - profiling_summary(net) - - -@pytest.mark.codegen_independent -def test_profile_ipython_html(): - G = NeuronGroup( - 10, - "dv/dt = -v / (10*ms) : 1", - threshold="v>1", - reset="v=0", - name="profile_test", - ) - G.v = 1.1 - net = Network(G) - net.run(1 * ms, profile=True) - summary = profiling_summary(net) - assert len(summary._repr_html_()) - - -@pytest.mark.codegen_independent -def test_magic_scope(): - """ - Check that `start_scope` works as expected. - """ - G1 = NeuronGroup(1, "v:1", name="G1") - G2 = NeuronGroup(1, "v:1", name="G2") - objs1 = {obj.name for obj in collect()} - start_scope() - G3 = NeuronGroup(1, "v:1", name="G3") - G4 = NeuronGroup(1, "v:1", name="G4") - objs2 = {obj.name for obj in collect()} - assert objs1 == {"G1", "G2"} - assert objs2 == {"G3", "G4"} - - -@pytest.mark.standalone_compatible -def test_runtime_rounding(): - # Test that runtime and standalone round in the same way, see github issue - # #695 for details - defaultclock.dt = 20.000000000020002 * us - G = NeuronGroup(1, "v:1") - mon = StateMonitor(G, "v", record=True) - run(defaultclock.dt * 250) - assert len(mon.t) == 250 - - -@pytest.mark.codegen_independent -def test_small_runs(): - # One long run and multiple small runs should give the same results - group_1 = NeuronGroup(10, "dv/dt = -v / (10*ms) : 1") - group_1.v = "(i + 1) / N" - mon_1 = StateMonitor(group_1, "v", record=True) - net_1 = Network(group_1, mon_1) - net_1.run(1 * second) - - group_2 = NeuronGroup(10, "dv/dt = -v / (10*ms) : 1") - group_2.v = "(i + 1) / N" - mon_2 = StateMonitor(group_2, "v", record=True) - net_2 = Network(group_2, mon_2) - runtime = 1 * ms - while True: - runtime *= 3 - runtime = min([runtime, 1 * second - net_2.t]) - net_2.run(runtime) - if net_2.t >= 1 * second: - break - - assert_allclose(mon_1.t_[:], mon_2.t_[:]) - assert_allclose(mon_1.v_[:], mon_2.v_[:]) - - -@pytest.mark.codegen_independent -def test_both_equal(): - # check all objects added by Network.add() also have their contained_objects added to 'Network' - tau = 10 * ms - diff_eqn = """dv/dt = (1-v)/tau : 1""" - chg_code = """v = 2*v""" - - Ng = NeuronGroup(1, diff_eqn, method="exact") - M1 = StateMonitor(Ng, "v", record=True) - netObj = Network(Ng, M1) - Ng.run_regularly(chg_code, dt=20 * ms) - netObj.run(100 * ms) - - start_scope() - Ng = NeuronGroup(1, diff_eqn, method="exact") - M2 = StateMonitor(Ng, "v", record=True) - Ng.run_regularly(chg_code, dt=20 * ms) - run(100 * ms) - - assert (M1.v == M2.v).all() - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_long_run(): - defaultclock.dt = 0.1 * ms - group = NeuronGroup(1, "x : 1") - group.run_regularly("x += 1") - # Timesteps are internally stored as 64bit integers, but previous versions - # converted them into 32bit integers along the way. We'll make sure that - # this is not the case and everything runs fine. To not actually run such a - # long simulation we run a single huge time step - start_step = 2**31 - 5 - defaultclock.dt = 0.1 * ms - start_time = start_step * defaultclock.dt - defaultclock.dt = start_time - run(start_time) # A single, *very* long time step - defaultclock.dt = 0.1 * ms - run(6 * defaultclock.dt) - device.build(direct_call=False, **device.build_options) - assert group.x == 7 - - -@pytest.mark.codegen_independent -def test_long_run_dt_change(): - # Check that the dt check is not too restrictive, see issue #730 for details - group = NeuronGroup(1, "") # does nothing... - defaultclock.dt = 0.1 * ms - run(100 * second) - # print profiling_summary() - defaultclock.dt = 0.01 * ms - run(1 * second) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_multiple_runs_constant_change(): - const_v = 1 - group = NeuronGroup(1, "v = const_v : 1") - mon = StateMonitor(group, "v", record=0) - run(defaultclock.dt) - const_v = 2 - run(defaultclock.dt) - device.build(direct_call=False, **device.build_options) - assert_equal(mon.v[0], [1, 2]) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_multiple_runs_function_change(): - inp = TimedArray([1, 2], dt=defaultclock.dt) - group = NeuronGroup(1, "v = inp(t) : 1") - mon = StateMonitor(group, "v", record=0) - run(2 * defaultclock.dt) - inp = TimedArray([0, 0, 3, 4], dt=defaultclock.dt) - run(2 * defaultclock.dt) - device.build(direct_call=False, **device.build_options) - assert_equal(mon.v[0], [1, 2, 3, 4]) - - -@pytest.mark.codegen_independent -def test_unused_object_warning(): - with catch_logs() as logs: - # Create a NeuronGroup that is not used in the network - NeuronGroup(1, "v:1", name="never_used") - # Make sure that it gets garbage collected - import gc - - gc.collect() - assert len(logs) == 1 - assert logs[0][0] == "WARNING" - assert logs[0][1].endswith("unused_brian_object") - assert "never_used" in logs[0][2] - - -if __name__ == "__main__": - BrianLogger.log_level_warn() - for t in [ - test_incorrect_network_use, - test_network_contains, - test_empty_network, - test_network_single_object, - test_network_two_objects, - test_network_from_dict, - test_network_different_clocks, - test_network_different_when, - test_network_default_schedule, - test_network_schedule_change, - test_network_before_after_schedule, - test_network_custom_slots, - test_network_incorrect_schedule, - test_schedule_warning, - test_scheduling_summary_magic, - test_scheduling_summary, - test_magic_network, - test_network_stop, - test_network_operations, - test_incorrect_network_operations, - test_network_operations_name, - test_network_active_flag, - test_network_t, - test_incorrect_dt_defaultclock, - test_incorrect_dt_custom_clock, - test_network_remove, - test_magic_weak_reference, - test_magic_unused_object, - test_invalid_magic_network, - test_multiple_networks_invalid, - test_network_access, - test_loop, - test_magic_collect, - test_progress_report, - test_progress_report_incorrect, - test_multiple_runs_report_standalone, - test_multiple_runs_report_standalone_2, - test_multiple_runs_report_standalone_3, - test_multiple_runs_report_standalone_incorrect, - test_store_restore, - test_store_restore_to_file, - test_store_restore_to_file_new_objects, - test_store_restore_to_file_differing_nets, - test_store_restore_magic, - test_store_restore_magic_to_file, - test_store_restore_spikequeue, - test_store_restore_restore_synapses, - test_defaultclock_dt_changes, - test_dt_changes_between_runs, - test_dt_restore, - test_continuation, - test_get_set_states, - test_multiple_runs_defaultclock, - test_multiple_runs_defaultclock_incorrect, - test_profile, - test_profile_off, - test_profile_ipython_html, - test_magic_scope, - test_runtime_rounding, - test_small_runs, - test_both_equal, - test_long_run, - test_long_run_dt_change, - test_multiple_runs_constant_change, - test_multiple_runs_function_change, - ]: - set_device(all_devices["runtime"]) - t() - reinit_and_delete() diff --git a/brian2/tests/test_neurongroup.py b/brian2/tests/test_neurongroup.py index 3d2f75391..8ba48d958 100644 --- a/brian2/tests/test_neurongroup.py +++ b/brian2/tests/test_neurongroup.py @@ -1974,18 +1974,25 @@ def test_random_values_fixed_seed(): """ v1 : 1 v2 : 1 + v3 : 1 + v4 : 1 """, ) seed(12345678) G.v1 = "rand()" - G.v1 = "v1 + randn()" + G.v2 = "randn()" seed(12345678) - G.v2 = "rand()" - G.v2 = "v2 + randn()" + G.v3 = "rand()" + G.v4 = "randn()" run(0 * ms) # for standalone assert np.var(G.v1[:]) > 0 + assert np.all((G.v1[:] >= 0) & (G.v1[:] < 1)) assert np.var(G.v2[:]) > 0 - assert_allclose(G.v1[:], G.v2[:]) + assert np.var(G.v3[:]) > 0 + assert np.all((G.v3[:] >= 0) & (G.v3[:] < 1)) + assert np.var(G.v4[:]) > 0 + assert_allclose(G.v1[:], G.v3[:]) + assert_allclose(G.v2[:], G.v4[:]) @pytest.mark.standalone_compatible diff --git a/brian2/tests/test_numpy_codegen.py b/brian2/tests/test_numpy_codegen.py deleted file mode 100644 index 6a3a763db..000000000 --- a/brian2/tests/test_numpy_codegen.py +++ /dev/null @@ -1,24 +0,0 @@ -import pytest - -from brian2 import * - - -def test_error_message(): - if prefs.codegen.target != "numpy": - pytest.skip("numpy-only test") - - @check_units(x=1, result=1) - def foo(x): - raise ValueError() - - G = NeuronGroup(1, "v : 1") - G.run_regularly("v = foo(3)") - with pytest.raises(BrianObjectException) as exc: - run(defaultclock.dt) - # The actual code line should be mentioned in the error message - exc.match("v = foo(3)") - - -if __name__ == "__main__": - prefs.codegen.target = "numpy" - test_error_message() diff --git a/brian2/tests/test_parsing.py b/brian2/tests/test_parsing.py deleted file mode 100644 index dfe0f8271..000000000 --- a/brian2/tests/test_parsing.py +++ /dev/null @@ -1,586 +0,0 @@ -""" -Tests the brian2.parsing package -""" - -from collections import namedtuple - -import numpy as np -import pytest - -from brian2 import Function -from brian2.codegen.generators.cpp_generator import CPPCodeGenerator -from brian2.core.functions import DEFAULT_FUNCTIONS -from brian2.core.preferences import prefs -from brian2.core.variables import Constant -from brian2.groups.group import Group -from brian2.parsing.dependencies import abstract_code_dependencies -from brian2.parsing.expressions import ( - _get_value_from_expression, - is_boolean_expression, - parse_expression_dimensions, -) -from brian2.parsing.functions import ( - abstract_code_from_function, - extract_abstract_code_functions, - substitute_abstract_code_functions, -) -from brian2.parsing.rendering import CPPNodeRenderer, NodeRenderer, NumpyNodeRenderer -from brian2.parsing.sympytools import str_to_sympy, sympy_to_str -from brian2.tests.utils import assert_allclose -from brian2.units import ( - DimensionMismatchError, - Unit, - amp, - get_unit, - have_same_dimensions, - volt, -) -from brian2.units.fundamentalunits import DIMENSIONLESS, Dimension -from brian2.utils.logger import std_silent -from brian2.utils.stringtools import deindent, get_identifiers - - -# a simple Group for testing -class SimpleGroup(Group): - def __init__(self, variables, namespace=None): - self.variables = variables - self.namespace = namespace - - -TEST_EXPRESSIONS = """ - a+b+c*d-f+g-(b+d)-(a-c) - a**b**2 - a**(b**2) - (a**b)**2 - a*(b+c*(a+b)*(a-(c*d))) - a/b/c-a/(b/c) - 10//n - n//10 - n//m - 10/n - 10.0/n - n/10 - n/10.0 - n/m - ab - a>=b - a==b - a!=b - a+1 - 1+a - 1+3 - a>0.5 and b>0.5 - a>0.5 and b>0.5 or c>0.5 - a>0.5 and b>0.5 or not c>0.5 - 2%4 - -1%4 - 2.3%5.6 - 2.3%5 - -1.2%3.4 - 17e-12 - 42e17 - """ - - -def parse_expressions(renderer, evaluator, numvalues=10): - exprs = [ - ([m for m in get_identifiers(l) if len(m) == 1], [], l.strip()) - for l in TEST_EXPRESSIONS.split("\n") - if l.strip() - ] - i, imod = 1, 33 - for varids, funcids, expr in exprs: - pexpr = renderer.render_expr(expr) - n = 0 - for _ in range(numvalues): - # assign some random values - ns = {} - for v in varids: - if v in ["n", "m"]: # integer values - ns[v] = i - else: - ns[v] = float(i) / imod - i = i % imod + 1 - r1 = eval(expr.replace("&", " and ").replace("|", " or "), ns) - n += 1 - r2 = evaluator(pexpr, ns) - try: - # Use all close because we can introduce small numerical - # difference through sympy's rearrangements - assert_allclose(r1, r2, atol=1e-8) - except AssertionError as e: - raise AssertionError( - f"In expression {str(expr)} translated to {str(pexpr)} {str(e)}" - ) - - -def numpy_evaluator(expr, userns): - ns = {} - # exec 'from numpy import logical_not' in ns - ns["logical_not"] = np.logical_not - ns.update(**userns) - for k in userns: - if not k.startswith("_"): - ns[k] = np.array([userns[k]]) - try: - x = eval(expr, ns) - except Exception as e: - raise ValueError( - f"Could not evaluate numpy expression {expr} exception {str(e)}" - ) - if isinstance(x, np.ndarray): - return x[0] - else: - return x - - -@pytest.mark.codegen_independent -def test_parse_expressions_python(): - parse_expressions(NodeRenderer(), eval) - - -@pytest.mark.codegen_independent -def test_parse_expressions_numpy(): - parse_expressions(NumpyNodeRenderer(), numpy_evaluator) - - -@pytest.mark.codegen_independent -def test_parse_expressions_sympy(): - # sympy is about symbolic calculation, the string returned by the renderer - # contains "Symbol('a')" etc. so we cannot simply evaluate it in a - # namespace. - # We therefore use a different approach: Convert the expression to a - # sympy expression via str_to_sympy (uses the SympyNodeRenderer internally), - # then convert it back to a string via sympy_to_str and evaluate it - - class SympyRenderer: - def render_expr(self, expr): - return str_to_sympy(expr) - - def evaluator(expr, ns): - expr = sympy_to_str(expr) - ns = dict(ns) - # Add the floor function which is used to implement floor division - ns["floor"] = DEFAULT_FUNCTIONS["floor"] - return eval(expr, ns) - - parse_expressions(SympyRenderer(), evaluator) - - -@pytest.mark.codegen_independent -def test_abstract_code_dependencies(): - code = """ - a = b+c - d = b+c - a = func_a() - a = func_b() - a = x+d - """ - known_vars = {"a", "b", "c"} - known_funcs = {"func_a"} - res = abstract_code_dependencies(code, known_vars, known_funcs) - expected_res = dict( - all=[ - "a", - "b", - "c", - "d", - "x", - "func_a", - "func_b", - ], - read=["b", "c", "d", "x"], - write=["a", "d"], - funcs=["func_a", "func_b"], - known_all=["a", "b", "c", "func_a"], - known_read=["b", "c"], - known_write=["a"], - known_funcs=["func_a"], - unknown_read=["d", "x"], - unknown_write=["d"], - unknown_funcs=["func_b"], - undefined_read=["x"], - newly_defined=["d"], - ) - for k, v in expected_res.items(): - if not getattr(res, k) == set(v): - raise AssertionError( - f"For '{k}' result is {getattr(res, k)} expected {set(v)}" - ) - - -@pytest.mark.codegen_independent -def test_is_boolean_expression(): - # dummy "Variable" class - Var = namedtuple("Var", ["is_boolean"]) - - # dummy function object - class Func: - def __init__(self, returns_bool=False): - self._returns_bool = returns_bool - - # variables / functions - a = Constant("a", value=True) - b = Constant("b", value=False) - c = Constant("c", value=5) - f = Func(returns_bool=True) - g = Func(returns_bool=False) - s1 = Var(is_boolean=True) - s2 = Var(is_boolean=False) - - variables = {"a": a, "b": b, "c": c, "f": f, "g": g, "s1": s1, "s2": s2} - - EVF = [ - (True, "a or b"), - (False, "c"), - (False, "s2"), - (False, "g(s1)"), - (True, "s2 > c"), - (True, "c > 5"), - (True, "True"), - (True, "a=b)"), - (False, "a+b"), - (True, "f(c)"), - (False, "g(c)"), - ( - True, - "f(c) or a= b*c)"), - (DimensionMismatchError, "a or b 30))", "|", "or"), - ("int((v > 30) & (w < 20))", "&", "and"), - ("x +* 3", "", ""), - ("v[index]", "indexing", ""), - ("v.value", "attribute", ""), - ("(v, w)", "tuple", ""), - ] - for expr, expected_1, expected_2 in expr_expected: - try: - nr.render_expr(expr) - raise AssertionError(f"Excepted {expr} to raise a SyntaxError.") - except SyntaxError as exc: - message = str(exc) - assert expected_1 in message - assert expected_2 in message - - -@pytest.mark.codegen_independent -def test_sympy_infinity(): - # See github issue #1061 - assert sympy_to_str(str_to_sympy("inf")) == "inf" - assert sympy_to_str(str_to_sympy("-inf")) == "-inf" - - -if __name__ == "__main__": - from _pytest.outcomes import Skipped - - test_parse_expressions_python() - test_parse_expressions_numpy() - try: - test_parse_expressions_cpp() - except Skipped: - pass - test_parse_expressions_sympy() - test_abstract_code_dependencies() - test_is_boolean_expression() - test_parse_expression_unit() - test_value_from_expression() - test_abstract_code_from_function() - test_extract_abstract_code_functions() - test_substitute_abstract_code_functions() - test_sympytools() - test_error_messages() - test_sympy_infinity() diff --git a/brian2/tests/test_poissongroup.py b/brian2/tests/test_poissongroup.py deleted file mode 100644 index 6318038f3..000000000 --- a/brian2/tests/test_poissongroup.py +++ /dev/null @@ -1,131 +0,0 @@ -import uuid - -import pytest -from numpy.testing import assert_equal - -from brian2 import * -from brian2.core.network import schedule_propagation_offset -from brian2.devices.device import reinit_and_delete -from brian2.tests.utils import exc_isinstance -from brian2.utils.logger import catch_logs - - -@pytest.mark.standalone_compatible -def test_single_rates(): - # Specifying single rates - P0 = PoissonGroup(10, 0 * Hz) - Pfull = PoissonGroup(10, 1.0 / defaultclock.dt) - - # Basic properties - assert len(P0) == len(Pfull) == 10 - assert len(repr(P0)) and len(str(P0)) - spikes_P0 = SpikeMonitor(P0) - spikes_Pfull = SpikeMonitor(Pfull) - run(2 * defaultclock.dt) - assert_equal(spikes_P0.count, np.zeros(len(P0))) - assert_equal(spikes_Pfull.count, 2 * np.ones(len(P0))) - - -@pytest.mark.standalone_compatible -def test_rate_arrays(): - P = PoissonGroup(2, np.array([0, 1.0 / defaultclock.dt]) * Hz) - spikes = SpikeMonitor(P) - run(2 * defaultclock.dt) - - assert_equal(spikes.count, np.array([0, 2])) - - -@pytest.mark.codegen_independent -def test_rate_unit_check(): - with pytest.raises(DimensionMismatchError): - PoissonGroup(1, np.array([1, 2])) - with pytest.raises(DimensionMismatchError): - PoissonGroup(1, np.array([1, 2]) * ms) - P = PoissonGroup(1, "i*mV") - net = Network(P) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, DimensionMismatchError) - - P = PoissonGroup(1, "i") - net = Network(P) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, DimensionMismatchError) - - -@pytest.mark.standalone_compatible -def test_time_dependent_rate(): - # The following two groups should show the same behaviour - timed_array = TimedArray( - np.array([[0, 0], [1.0 / defaultclock.dt, 0]]) * Hz, dt=1 * ms - ) - group_1 = PoissonGroup(2, rates="timed_array(t, i)") - group_2 = PoissonGroup(2, rates="int(i==0)*int(t>1*ms-dt/2)*(1/dt)") - spikes_1 = SpikeMonitor(group_1) - spikes_2 = SpikeMonitor(group_2) - run(2 * ms) - - assert_equal(spikes_1.count, np.array([int(round(1 * ms / defaultclock.dt)), 0])) - assert_equal(spikes_2.count, np.array([int(round(1 * ms / defaultclock.dt)), 0])) - assert sum(spikes_1.t < 1 * ms) == 0 - assert sum(spikes_2.t < 1 * ms) == 0 - - -@pytest.mark.standalone_compatible -def test_propagation(): - # Using a PoissonGroup as a source for Synapses should work as expected - P = PoissonGroup(2, np.array([0, 1.0 / defaultclock.dt]) * Hz) - G = NeuronGroup(2, "v:1") - S = Synapses(P, G, on_pre="v+=1") - S.connect(j="i") - run(2 * defaultclock.dt + schedule_propagation_offset()) - - assert_equal(G.v[:], np.array([0.0, 2.0])) - - -@pytest.mark.standalone_compatible -def test_poissongroup_subgroup(): - # It should be possible to take a subgroup of a PoissonGroup - P = PoissonGroup(4, [0, 0, 0, 0] * Hz) - P1 = P[:2] - P2 = P[2:] - P2.rates = 1.0 / defaultclock.dt - G = NeuronGroup(4, "v:1") - S1 = Synapses(P1, G[:2], on_pre="v+=1") - S1.connect(j="i") - S2 = Synapses(P2, G[2:], on_pre="v+=1") - S2.connect(j="i") - run(2 * defaultclock.dt + schedule_propagation_offset()) - - assert_equal(G.v[:], np.array([0.0, 0.0, 2.0, 2.0])) - - -@pytest.mark.codegen_independent -def test_poissongroup_namespace(): - rate_const = 0 * Hz - P = PoissonGroup( - 1, - rates="rate_const", - namespace={"rate_const": 1 / defaultclock.dt}, - name=f"poissongroup_{uuid.uuid4().hex}", - ) - P2 = PoissonGroup(1, rates="rate_const") - mon = SpikeMonitor(P) - mon2 = SpikeMonitor(P2) - with catch_logs() as l: - run(2 * defaultclock.dt) - assert len(l) == 1 - assert l[0][1].endswith("resolution_conflict") - assert mon.num_spikes == 2 - assert mon2.num_spikes == 0 - - -if __name__ == "__main__": - test_single_rates() - test_rate_arrays() - test_rate_unit_check() - test_time_dependent_rate() - test_propagation() - test_poissongroup_subgroup() - test_poissongroup_namespace() diff --git a/brian2/tests/test_poissoninput.py b/brian2/tests/test_poissoninput.py deleted file mode 100644 index a665cc1a0..000000000 --- a/brian2/tests/test_poissoninput.py +++ /dev/null @@ -1,109 +0,0 @@ -import pytest -from numpy.testing import assert_equal - -from brian2 import * -from brian2.core.network import schedule_propagation_offset -from brian2.devices.device import reinit_and_delete -from brian2.tests.utils import assert_allclose, exc_isinstance - - -@pytest.mark.standalone_compatible -def test_poissoninput(): - # Test extreme cases and do a very basic test of an intermediate case, we - # don't want tests to be stochastic - G = NeuronGroup( - 10, - """ - x : volt - y : volt - y2 : volt - z : volt - z2 : volt - w : 1 - """, - ) - G.w = 0.5 - - never_update = PoissonInput(G, "x", 100, 0 * Hz, weight=1 * volt) - always_update = PoissonInput(G, "y", 50, 1 / defaultclock.dt, weight=2 * volt) - always_update2 = PoissonInput( - G, "y2", 50, 1 / defaultclock.dt, weight="1*volt + 1*volt" - ) - sometimes_update = PoissonInput(G, "z", 10000, 50 * Hz, weight=0.5 * volt) - sometimes_update2 = PoissonInput(G, "z2", 10000, 50 * Hz, weight="w*volt") - - assert_equal(never_update.rate, 0 * Hz) - assert_equal(never_update.N, 100) - assert_equal(always_update.rate, 1 / defaultclock.dt) - assert_equal(always_update.N, 50) - assert_equal(sometimes_update.rate, 50 * Hz) - assert_equal(sometimes_update.N, 10000) - - mon = StateMonitor(G, ["x", "y", "y2", "z", "z2"], record=True, when="end") - - run(1 * ms) - assert_equal(0, mon.x[:]) - assert_equal( - np.tile((1 + np.arange(mon.y[:].shape[1])) * 50 * 2 * volt, (10, 1)), mon.y[:] - ) - assert_equal( - np.tile((1 + np.arange(mon.y[:].shape[1])) * 50 * 2 * volt, (10, 1)), mon.y2[:] - ) - assert all(np.var(np.diff(mon.z[:]), axis=1) > 0) # variability over time - assert all(np.var(mon.z[:], axis=0) > 0) # variability over neurons - assert all(np.var(np.diff(mon.z2[:]), axis=1) > 0) # variability over time - assert all(np.var(mon.z2[:], axis=0) > 0) # variability over neurons - - -@pytest.mark.codegen_independent -def test_poissoninput_errors(): - # Targeting non-existing variable - G = NeuronGroup( - 10, - """ - x : volt - y : 1 - """, - ) - with pytest.raises(KeyError): - PoissonInput(G, "z", 100, 100 * Hz, weight=1.0) - - # Incorrect units - with pytest.raises(DimensionMismatchError): - PoissonInput(G, "x", 100, 100 * Hz, weight=1.0) - with pytest.raises(DimensionMismatchError): - PoissonInput(G, "y", 100, 100 * Hz, weight=1.0 * volt) - - # dt change - old_dt = defaultclock.dt - inp = PoissonInput(G, "x", 100, 100 * Hz, weight=1 * volt) - defaultclock.dt = 2 * old_dt - net = Network(collect()) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, NotImplementedError) - defaultclock.dt = old_dt - - -@pytest.mark.standalone_compatible -def test_poissoninput_refractory(): - eqs = """ - dv/dt = 0/second : 1 (unless refractory) - """ - G = NeuronGroup( - 10, eqs, reset="v=0", threshold="v>4.5", refractory=5 * defaultclock.dt - ) - # Will increase the value by 1.0 at each time step - P = PoissonInput(G, "v", 1, 1 / defaultclock.dt, weight=1.0) - mon = StateMonitor(G, "v", record=5) - run(10 * defaultclock.dt) - expected = np.arange(10, dtype=float) - expected[6 - int(schedule_propagation_offset() / defaultclock.dt) :] = 0 - assert_allclose(mon[5].v[:], expected) - - -if __name__ == "__main__": - test_poissoninput() - reinit_and_delete() - test_poissoninput_errors() - test_poissoninput_refractory() diff --git a/brian2/tests/test_preferences.py b/brian2/tests/test_preferences.py deleted file mode 100644 index 4dbdec452..000000000 --- a/brian2/tests/test_preferences.py +++ /dev/null @@ -1,342 +0,0 @@ -from io import StringIO - -import pytest -from numpy import float32, float64 -from numpy.testing import assert_equal - -from brian2 import amp, restore_initial_state, volt -from brian2.core.preferences import ( - BrianGlobalPreferences, - BrianGlobalPreferencesView, - BrianPreference, - DefaultValidator, - PreferenceError, -) - - -@pytest.mark.codegen_independent -def test_defaultvalidator(): - # Test that the default validator checks the class - validator = DefaultValidator(5) - assert validator(3) - assert not validator("3") - validator = DefaultValidator("astring") - assert validator("another") - assert not validator(3) - # test that the default validator checks the units - validator = DefaultValidator(3 * volt) - assert validator(2 * volt) - assert not validator(1 * amp) - - -@pytest.mark.codegen_independent -def test_brianpreference(): - # check default args - pref = BrianPreference(1.0 / 3, "docs") - assert not pref.validator(1) - assert pref.docs == "docs" - assert pref.default == 1.0 / 3 - assert pref.representor(pref.default) == repr(1.0 / 3) - - -@pytest.mark.codegen_independent -def test_preference_name_checking(): - """ - Test that you cannot set illegal preference names. - """ - gp = BrianGlobalPreferences() - - # Name that starts with an underscore - with pytest.raises(PreferenceError): - gp.register_preferences( - "dummy", - "dummy doc", - _notalegalname=BrianPreference(True, "some preference"), - ) - - # Name that clashes with a method name - with pytest.raises(PreferenceError): - gp.register_preferences( - "dummy", "dummy doc", update=BrianPreference(True, "some preference") - ) - - gp.register_preferences( - "a", "dummy doc", b=BrianPreference(True, "some preference") - ) - - # Trying to register a subcategory that would shadow a preference - with pytest.raises(PreferenceError): - gp.register_preferences( - "a.b", "dummy doc", name=BrianPreference(True, "some preference") - ) - - gp.register_preferences( - "b.c", "dummy doc", name=BrianPreference(True, "some preference") - ) - - # Trying to register a preference that clashes with an existing category - with pytest.raises(PreferenceError): - gp.register_preferences( - "b", "dummy doc", c=BrianPreference(True, "some preference") - ) - - -@pytest.mark.codegen_independent -def test_brianglobalpreferences(): - # test that pre-setting a nonexistent preference in a subsequently - # existing base name raises an error at the correct point - gp = BrianGlobalPreferences() - - # This shouldn't work, in user code only registered preferences can be set - with pytest.raises(PreferenceError): - gp.__setitem__("a.b", 5) - - # This uses the method that is used when reading preferences from a file - gp._set_preference("a.b", 5) - gp._set_preference("a.c", 5) - with pytest.raises(PreferenceError): - gp.register_preferences("a", "docs for a", b=BrianPreference(5, "docs for b")) - # test that post-setting a nonexistent preference in an existing base - # name raises an error - gp = BrianGlobalPreferences() - gp.register_preferences("a", "docs for a", b=BrianPreference(5, "docs for b")) - with pytest.raises(PreferenceError): - gp.__setitem__("a.c", 5) - # Test pre and post-setting some correct names but valid and invalid values - gp = BrianGlobalPreferences() - gp._set_preference("a.b", 5) - gp.register_preferences( - "a", - "docs for a", - b=BrianPreference(5, "docs for b"), - c=BrianPreference(1 * volt, "docs for c"), - d=BrianPreference(0, "docs for d", validator=lambda x: x >= 0), - e=BrianPreference(float64, "docs for e", representor=lambda x: x.__name__), - ) - assert gp["a.c"] == 1 * volt - gp["a.c"] = 2 * volt - with pytest.raises(PreferenceError): - gp.__setitem__("a.c", 3 * amp) - gp["a.d"] = 2.0 - with pytest.raises(PreferenceError): - gp.__setitem__("a.d", -1) - gp["a.e"] = float32 - with pytest.raises(PreferenceError): - gp.__setitem__("a.e", 0) - # test backup and restore - gp._backup() - gp["a.d"] = 10 - assert gp["a.d"] == 10 - gp._restore() - assert gp["a.d"] == 2.0 - # test that documentation and as_file generation runs without error, but - # don't test for values because we might change the organisation of it - assert len(gp.get_documentation()) - gp.as_file - gp.defaults_as_file - # test that reading a preference file works as expected - pref_file = StringIO( - """ - # a comment - a.b = 10 - [a] - c = 5*volt - d = 1 - e = float64 - """ - ) - gp.read_preference_file(pref_file) - assert gp["a.b"] == 10 - assert gp["a.c"] == 5 * volt - assert gp["a.d"] == 1 - assert gp["a.e"] == float64 - # test that reading a badly formatted prefs file fails - pref_file = StringIO( - """ - [a - b = 10 - """ - ) - with pytest.raises(PreferenceError): - gp.read_preference_file(pref_file) - # test that reading a well formatted prefs file with an invalid value fails - pref_file = StringIO( - """ - a.b = 'oh no, not a string' - """ - ) - with pytest.raises(PreferenceError): - gp.read_preference_file(pref_file) - # assert that writing the prefs to a file and loading them gives the - # same values - gp = BrianGlobalPreferences() - gp.register_preferences( - "a", - "docs for a", - b=BrianPreference(5, "docs for b"), - ) - gp._backup() - gp["a.b"] = 10 - str_modified = gp.as_file - str_defaults = gp.defaults_as_file - gp["a.b"] = 15 - gp.read_preference_file(StringIO(str_modified)) - assert gp["a.b"] == 10 - gp.read_preference_file(StringIO(str_defaults)) - assert gp["a.b"] == 5 - # check that load_preferences works, but nothing about its values - gp = BrianGlobalPreferences() - gp.load_preferences() - - # Check that resetting to default preferences works - gp = BrianGlobalPreferences() - gp.register_preferences("a", "docs for a", b=BrianPreference(5, "docs for b")) - assert gp["a.b"] == 5 - gp["a.b"] = 7 - assert gp["a.b"] == 7 - gp.reset_to_defaults() - assert gp["a.b"] == 5 - - -@pytest.mark.codegen_independent -def test_preference_name_access(): - """ - Test various ways of accessing preferences - """ - - gp = BrianGlobalPreferences() - - gp.register_preferences( - "main", "main category", name=BrianPreference(True, "some preference") - ) - gp.register_preferences( - "main.sub", "subcategory", name2=BrianPreference(True, "some preference") - ) - - gp.register_preferences("main.sub_no_pref", "subcategory without preference") - gp.register_preferences( - "main.sub_no_pref.sub", - "deep subcategory", - name=BrianPreference(True, "some preference"), - ) - - # Keyword based access - assert gp["main.name"] - assert gp["main.sub.name2"] - assert gp["main.sub_no_pref.sub.name"] - gp["main.name"] = False - gp["main.sub.name2"] = False - gp["main.sub_no_pref.sub.name"] = False - - # Attribute based access - assert not gp.main.name # we set it to False above - assert not gp.main.sub.name2 - assert not gp.main.sub_no_pref.sub.name - gp.main.name = True - gp.main.sub.name2 = True - gp.main.sub_no_pref.sub.name = True - - # Mixed access - assert gp.main["name"] - assert gp["main"].name - assert gp.main["sub"].name2 - assert gp["main"].sub["name2"] - - # Accessing categories - assert isinstance(gp["main"], BrianGlobalPreferencesView) - assert isinstance(gp["main.sub"], BrianGlobalPreferencesView) - assert isinstance(gp.main, BrianGlobalPreferencesView) - assert isinstance(gp.main.sub, BrianGlobalPreferencesView) - - # Setting categories shouldn't work - with pytest.raises(PreferenceError): - gp.__setitem__("main", None) - with pytest.raises(PreferenceError): - gp.__setattr__("main", None) - with pytest.raises(PreferenceError): - gp.main.__setitem__("sub", None) - with pytest.raises(PreferenceError): - gp.main.__setattr__("sub", None) - - # Neither should deleting categories or preferences - with pytest.raises(PreferenceError): - gp.__delitem__("main") - with pytest.raises(PreferenceError): - gp.__delattr__("main") - with pytest.raises(PreferenceError): - gp.main.__delitem__("name") - with pytest.raises(PreferenceError): - gp.main.__delattr__("name") - with pytest.raises(PreferenceError): - gp.main.__delitem__("sub") - with pytest.raises(PreferenceError): - gp.main.__delattr__("sub") - - # Errors for accessing non-existing preferences - with pytest.raises(KeyError): - gp["main.doesnotexist"] - with pytest.raises(KeyError): - gp["nonexisting.name"] - with pytest.raises(KeyError): - gp.main.doesnotexist - with pytest.raises(KeyError): - gp.nonexisting.name - - # Check dictionary functionality - for name, value in gp.items(): - assert gp[name] == value - - for name, value in gp.main.items(): - assert gp.main[name] == value - - assert len(gp) == 3 # three preferences in total - assert len(gp["main"]) == 3 # all preferences are in the main category - assert len(gp["main.sub"]) == 1 # one preference in main.sub - - assert "main.name" in gp - assert "name" in gp["main"] - assert "name2" in gp["main.sub"] - assert not "name" in gp["main.sub"] - - gp["main.name"] = True - gp.update({"main.name": False}) - assert not gp["main.name"] - - gp.main.update({"name": True}) - assert gp["main.name"] - - # Class based functionality - assert "main" in dir(gp) - assert "sub" in dir(gp.main) - assert "name" in dir(gp.main) - - # Check that the fiddling with getattr and setattr did not destroy the - # access to standard attributes - assert len(gp.prefs) - assert gp.main._basename == "main" - - -@pytest.mark.codegen_independent -def test_str_repr(): - # Just test whether str and repr do not throw an error and return something - gp = BrianGlobalPreferences() - gp.register_preferences( - "main", "main category", name=BrianPreference(True, "some preference") - ) - - assert len(str(gp)) - assert len(repr(gp)) - assert len(str(gp.main)) - assert len(repr(gp.main)) - - -if __name__ == "__main__": - for t in [ - test_defaultvalidator, - test_brianpreference, - test_brianglobalpreferences, - test_preference_name_checking, - test_preference_name_access, - ]: - t() - restore_initial_state() diff --git a/brian2/tests/test_refractory.py b/brian2/tests/test_refractory.py deleted file mode 100644 index 8c8471691..000000000 --- a/brian2/tests/test_refractory.py +++ /dev/null @@ -1,386 +0,0 @@ -from collections import Counter - -import pytest -from numpy.testing import assert_equal - -from brian2 import * -from brian2.core.functions import timestep -from brian2.devices.device import reinit_and_delete -from brian2.equations.refractory import add_refractoriness -from brian2.tests.utils import assert_allclose, exc_isinstance -from brian2.utils.logger import catch_logs - - -@pytest.mark.codegen_independent -def test_add_refractoriness(): - eqs = Equations( - """ - dv/dt = -x*v/second : volt (unless refractory) - dw/dt = -w/second : amp - x : 1 - """ - ) - # only make sure it does not throw an error - eqs = add_refractoriness(eqs) - # Check that the parameters were added - assert "not_refractory" in eqs - assert "lastspike" in eqs - - -@pytest.mark.codegen_independent -def test_missing_refractory_warning(): - # Forgotten refractory argument - with catch_logs() as l: - group = NeuronGroup( - 1, - "dv/dt = -v / (10*ms) : 1 (unless refractory)", - threshold="v > 1", - reset="v = 0", - ) - assert len(l) == 1 - assert l[0][0] == "WARNING" and l[0][1].endswith("no_refractory") - - -@pytest.mark.standalone_compatible -def test_refractoriness_basic(): - G = NeuronGroup( - 1, - """ - dv/dt = 99.999*Hz : 1 (unless refractory) - dw/dt = 99.999*Hz : 1 - """, - threshold="v>1", - reset="v=0;w=0", - refractory=5 * ms, - ) - # It should take 10ms to reach the threshold, then v should stay at 0 - # for 5ms, while w continues to increase - mon = StateMonitor(G, ["v", "w"], record=True, when="end") - run(20 * ms) - # No difference before the spike - assert_allclose( - mon[0].v[: timestep(10 * ms, defaultclock.dt)], - mon[0].w[: timestep(10 * ms, defaultclock.dt)], - ) - # v is not updated during refractoriness - in_refractoriness = mon[0].v[ - timestep(10 * ms, defaultclock.dt) : timestep(15 * ms, defaultclock.dt) - ] - assert_equal(in_refractoriness, np.zeros_like(in_refractoriness)) - # w should evolve as before - assert_allclose( - mon[0].w[: timestep(5 * ms, defaultclock.dt)], - mon[0].w[ - timestep(10 * ms, defaultclock.dt) - + 1 : timestep(15 * ms, defaultclock.dt) - + 1 - ], - ) - assert np.all( - mon[0].w[ - timestep(10 * ms, defaultclock.dt) - + 1 : timestep(15 * ms, defaultclock.dt) - + 1 - ] - > 0 - ) - # After refractoriness, v should increase again - assert np.all( - mon[0].v[ - timestep(15 * ms, defaultclock.dt) : timestep(20 * ms, defaultclock.dt) - ] - > 0 - ) - - -@pytest.mark.standalone_compatible -@pytest.mark.parametrize( - "ref_time", - [ - "5*ms", - "(t-lastspike + 1e-3*dt) < 5*ms", - "time_since_spike + 1e-3*dt < 5*ms", - "ref_subexpression", - "(t-lastspike + 1e-3*dt) < ref", - "ref", - "ref_no_unit*ms", - ], -) -def test_refractoriness_variables(ref_time): - # Try a string evaluating to a quantity, and an explicit boolean - # condition -- all should do the same thing - G = NeuronGroup( - 1, - """ - dv/dt = 99.999*Hz : 1 (unless refractory) - dw/dt = 99.999*Hz : 1 - ref : second - ref_no_unit : 1 - time_since_spike = (t - lastspike) +1e-3*dt : second - ref_subexpression = (t - lastspike + 1e-3*dt) < ref : boolean - """, - threshold="v>1", - reset="v=0;w=0", - refractory=ref_time, - dtype={ - "ref": defaultclock.variables["t"].dtype, - "ref_no_unit": defaultclock.variables["t"].dtype, - "lastspike": defaultclock.variables["t"].dtype, - "time_since_spike": defaultclock.variables["t"].dtype, - }, - ) - G.ref = 5 * ms - G.ref_no_unit = 5 - # It should take 10ms to reach the threshold, then v should stay at 0 - # for 5ms, while w continues to increase - mon = StateMonitor(G, ["v", "w"], record=True, when="end") - run(20 * ms) - try: - # No difference before the spike - assert_allclose( - mon[0].v[: timestep(10 * ms, defaultclock.dt)], - mon[0].w[: timestep(10 * ms, defaultclock.dt)], - ) - # v is not updated during refractoriness - in_refractoriness = mon[0].v[ - timestep(10 * ms, defaultclock.dt) : timestep(15 * ms, defaultclock.dt) - ] - assert_allclose(in_refractoriness, np.zeros_like(in_refractoriness)) - # w should evolve as before - assert_allclose( - mon[0].w[: timestep(5 * ms, defaultclock.dt)], - mon[0].w[ - timestep(10 * ms, defaultclock.dt) - + 1 : timestep(15 * ms, defaultclock.dt) - + 1 - ], - ) - assert np.all( - mon[0].w[ - timestep(10 * ms, defaultclock.dt) - + 1 : timestep(15 * ms, defaultclock.dt) - + 1 - ] - > 0 - ) - # After refractoriness, v should increase again - assert np.all( - mon[0].v[ - timestep(15 * ms, defaultclock.dt) : timestep(20 * ms, defaultclock.dt) - ] - > 0 - ) - except AssertionError as ex: - raise - raise AssertionError( - f"Assertion failed when using {ref_time!r} as refractory argument:\n{ex}" - ) - - -@pytest.mark.standalone_compatible -def test_refractoriness_threshold_basic(): - G = NeuronGroup( - 1, - """ - dv/dt = 199.99*Hz : 1 - """, - threshold="v > 1", - reset="v=0", - refractory=10 * ms, - ) - # The neuron should spike after 5ms but then not spike for the next - # 10ms. The state variable should continue to integrate so there should - # be a spike after 15ms - spike_mon = SpikeMonitor(G) - run(16 * ms) - assert_allclose(spike_mon.t, [5, 15] * ms) - - -@pytest.mark.standalone_compatible -def test_refractoriness_repeated(): - # Create a group that spikes whenever it can - group = NeuronGroup(1, "", threshold="True", refractory=10 * defaultclock.dt) - spike_mon = SpikeMonitor(group) - run(10000 * defaultclock.dt) - assert spike_mon.t[0] == 0 * ms - assert_allclose(np.diff(spike_mon.t), 10 * defaultclock.dt) - - -@pytest.mark.standalone_compatible -def test_refractoriness_repeated_legacy(): - if prefs.core.default_float_dtype == np.float32: - pytest.skip( - "Not testing legacy refractory mechanism with single precision floats." - ) - # Switch on behaviour from versions <= 2.1.2 - prefs.legacy.refractory_timing = True - # Create a group that spikes whenever it can - group = NeuronGroup(1, "", threshold="True", refractory=10 * defaultclock.dt) - spike_mon = SpikeMonitor(group) - run(10000 * defaultclock.dt) - assert spike_mon.t[0] == 0 * ms - - # Empirical values from running with earlier Brian versions - assert_allclose( - np.diff(spike_mon.t)[:10], [1.1, 1, 1.1, 1, 1.1, 1.1, 1.1, 1.1, 1, 1.1] * ms - ) - steps = Counter(np.diff(np.int_(np.round(spike_mon.t / defaultclock.dt)))) - assert len(steps) == 2 and steps[10] == 899 and steps[11] == 91 - prefs.legacy.refractory_timing = False - - -@pytest.mark.standalone_compatible -@pytest.mark.parametrize( - "ref_time", - [ - 10 * ms, - "10*ms", - "timestep(t-lastspike, dt) < timestep(10*ms, dt)", - "timestep(t-lastspike, dt) < timestep(ref, dt)", - "ref", - "ref_no_unit*ms", - ], -) -def test_refractoriness_threshold(ref_time): - # Try a quantity, a string evaluating to a quantity, and an explicit boolean - # condition -- all should do the same thing - G = NeuronGroup( - 1, - """ - dv/dt = 199.999*Hz : 1 - ref : second - ref_no_unit : 1 - """, - threshold="v > 1", - reset="v=0", - refractory=ref_time, - dtype={ - "ref": defaultclock.variables["t"].dtype, - "ref_no_unit": defaultclock.variables["t"].dtype, - }, - ) - G.ref = 10 * ms - G.ref_no_unit = 10 - # The neuron should spike after 5ms but then not spike for the next - # 10ms. The state variable should continue to integrate so there should - # be a spike after 15ms - spike_mon = SpikeMonitor(G) - run(16 * ms) - assert_allclose(spike_mon.t, [5, 15] * ms) - - -@pytest.mark.codegen_independent -def test_refractoriness_types(): - # make sure that using a wrong type of refractoriness does not work - group = NeuronGroup(1, "", refractory="3*Hz") - with pytest.raises(BrianObjectException) as exc: - Network(group).run(0 * ms) - assert exc_isinstance(exc, TypeError) - group = NeuronGroup(1, "ref: Hz", refractory="ref") - with pytest.raises(BrianObjectException) as exc: - Network(group).run(0 * ms) - assert exc_isinstance(exc, TypeError) - group = NeuronGroup(1, "", refractory="3") - with pytest.raises(BrianObjectException) as exc: - Network(group).run(0 * ms) - assert exc_isinstance(exc, TypeError) - group = NeuronGroup(1, "ref: 1", refractory="ref") - with pytest.raises(BrianObjectException) as exc: - Network(group).run(0 * ms) - assert exc_isinstance(exc, TypeError) - - -@pytest.mark.codegen_independent -def test_conditional_write_set(): - """ - Test that the conditional_write attribute is set correctly - """ - G = NeuronGroup( - 1, - """ - dv/dt = 10*Hz : 1 (unless refractory) - dw/dt = 10*Hz : 1 - """, - refractory=2 * ms, - ) - assert G.variables["v"].conditional_write is G.variables["not_refractory"] - assert G.variables["w"].conditional_write is None - - -@pytest.mark.standalone_compatible -def test_conditional_write_behaviour(): - H = NeuronGroup(1, "v:1", threshold="v>-1") - - tau = 1 * ms - eqs = """ - dv/dt = (2-v)/tau : 1 (unless refractory) - dx/dt = 0/tau : 1 (unless refractory) - dy/dt = 0/tau : 1 - """ - reset = """ - v = 0 - x -= 0.05 - y -= 0.05 - """ - G = NeuronGroup(1, eqs, threshold="v>1", reset=reset, refractory=1 * ms) - - Sx = Synapses(H, G, on_pre="x += dt*100*Hz") - Sx.connect(True) - - Sy = Synapses(H, G, on_pre="y += dt*100*Hz") - Sy.connect(True) - - M = StateMonitor(G, variables=True, record=True) - - run(10 * ms) - - assert G.x[0] < 0.2 - assert G.y[0] > 0.2 - assert G.v[0] < 1.1 - - -@pytest.mark.standalone_compatible -def test_conditional_write_automatic_and_manual(): - source = NeuronGroup(1, "", threshold="True") # spiking all the time - target = NeuronGroup( - 2, - """ - dv/dt = 0/ms : 1 (unless refractory) - dw/dt = 0/ms : 1 - """, - threshold="t == 0*ms", - refractory="False", - ) # only refractory for the first time step - # Cell is spiking/refractory only in the first time step - syn = Synapses( - source, - target, - on_pre=""" - v += 1 - w += 1 * int(not_refractory_post) - """, - ) - syn.connect() - mon = StateMonitor(target, ["v", "w"], record=True, when="end") - run(2 * defaultclock.dt) - - # Synapse should not have been effective in the first time step - assert_allclose(mon.v[:, 0], 0) - assert_allclose(mon.v[:, 1], 1) - assert_allclose(mon.w[:, 0], 0) - assert_allclose(mon.w[:, 1], 1) - - -if __name__ == "__main__": - test_add_refractoriness() - test_missing_refractory_warning() - test_refractoriness_basic() - test_refractoriness_variables() - test_refractoriness_threshold() - test_refractoriness_threshold_basic() - test_refractoriness_repeated() - test_refractoriness_repeated_legacy() - test_refractoriness_types() - test_conditional_write_set() - test_conditional_write_behaviour() - test_conditional_write_automatic_and_manual() diff --git a/brian2/tests/test_spatialneuron.py b/brian2/tests/test_spatialneuron.py deleted file mode 100644 index 67906ed73..000000000 --- a/brian2/tests/test_spatialneuron.py +++ /dev/null @@ -1,996 +0,0 @@ -import itertools -import os - -import pytest -from numpy.testing import assert_equal - -from brian2 import * -from brian2.devices.device import reinit_and_delete -from brian2.tests.utils import assert_allclose - -try: - import scipy -except ImportError: - scipy = None - - -numpy_needs_scipy = pytest.mark.skipif( - # Using condition string, since we cannot yet know - # prefs.codegen.target at module import time - "prefs.codegen.target == 'numpy' and not scipy", - reason="multi-compartmental models need scipy to run with numpy", -) - - -@pytest.mark.codegen_independent -@numpy_needs_scipy -def test_custom_events(): - # Set (could be moved in a setup) - EL = -65 * mV - gL = 0.0003 * siemens / cm**2 - ev = """ - Im = gL * (EL - v) : amp/meter**2 - event_time1 : second - """ - # Create a three compartments morphology - morpho = Soma(diameter=10 * um) - morpho.dend1 = Cylinder(n=1, diameter=1 * um, length=10 * um) - morpho.dend2 = Cylinder(n=1, diameter=1 * um, length=10 * um) - G = SpatialNeuron( - morphology=morpho, model=ev, events={"event1": "t>=i*ms and t= neuron.diffusion_state_updater._starts[:].flat - ) - - # Check that length and distances make sense - assert_allclose(sum(morpho.L.length), 10 * um) - assert_allclose(morpho.L.distance, (0.5 + np.arange(10)) * um) - assert_allclose(sum(morpho.LL.length), 5 * um) - assert_allclose(morpho.LL.distance, (10 + 0.5 + np.arange(5)) * um) - assert_allclose(sum(morpho.LR.length), 5 * um) - assert_allclose(morpho.LR.distance, (10 + 0.25 + np.arange(10) * 0.5) * um) - assert_allclose(sum(morpho.right.length), 3 * um) - assert_allclose(morpho.right.distance, (0.5 + np.arange(7)) * 3.0 / 7.0 * um) - assert_allclose(sum(morpho.right.nextone.length), 2 * um) - assert_allclose( - morpho.right.nextone.distance, 3 * um + (0.5 + np.arange(3)) * 2.0 / 3.0 * um - ) - - -@pytest.mark.codegen_independent -def test_construction_coordinates(): - # Same as test_construction, but uses coordinates instead of lengths to - # set up everything - # Note that all coordinates here are relative to the origin of the - # respective cylinder - BrianLogger.suppress_name("resolution_conflict") - morpho = Soma(diameter=30 * um) - morpho.L = Cylinder(x=[0, 10] * um, diameter=1 * um, n=10) - morpho.LL = Cylinder(y=[0, 5] * um, diameter=2 * um, n=5) - morpho.LR = Cylinder(z=[0, 5] * um, diameter=2 * um, n=10) - morpho.right = Cylinder( - x=[0, sqrt(2) * 1.5] * um, y=[0, sqrt(2) * 1.5] * um, diameter=1 * um, n=7 - ) - morpho.right.nextone = Cylinder( - y=[0, sqrt(2)] * um, z=[0, sqrt(2)] * um, diameter=1 * um, n=3 - ) - gL = 1e-4 * siemens / cm**2 - EL = -70 * mV - eqs = """ - Im=gL*(EL-v) : amp/meter**2 - I : meter (point current) - """ - - # Check units of currents - with pytest.raises(DimensionMismatchError): - SpatialNeuron(morphology=morpho, model=eqs) - - eqs = """ - Im=gL*(EL-v) : amp/meter**2 - """ - neuron = SpatialNeuron( - morphology=morpho, model=eqs, Cm=1 * uF / cm**2, Ri=100 * ohm * cm - ) - - # Test initialization of values - neuron.LL.v = EL - assert_allclose(neuron.L.main.v, 0 * mV) - assert_allclose(neuron.LL.v, EL) - neuron.LL[1 * um : 3 * um].v = 0 * mV - assert_allclose(neuron.LL.v, Quantity([EL, 0 * mV, 0 * mV, EL, EL])) - assert_allclose(neuron.Cm, 1 * uF / cm**2) - - # Test morphological variables - assert_allclose(neuron.L.main.x, morpho.L.x) - assert_allclose(neuron.LL.main.x, morpho.LL.x) - assert_allclose(neuron.right.main.x, morpho.right.x) - assert_allclose(neuron.L.main.distance, morpho.L.distance) - # assert_allclose(neuron.L.main.diameter, morpho.L.diameter) - assert_allclose(neuron.L.main.area, morpho.L.area) - assert_allclose(neuron.L.main.length, morpho.L.length) - - # Check basic consistency of the flattened representation - assert all( - neuron.diffusion_state_updater._ends[:].flat - >= neuron.diffusion_state_updater._starts[:].flat - ) - - # Check that length and distances make sense - assert_allclose(sum(morpho.L.length), 10 * um) - assert_allclose(morpho.L.distance, (0.5 + np.arange(10)) * um) - assert_allclose(sum(morpho.LL.length), 5 * um) - assert_allclose(morpho.LL.distance, (10 + 0.5 + np.arange(5)) * um) - assert_allclose(sum(morpho.LR.length), 5 * um) - assert_allclose(morpho.LR.distance, (10 + 0.25 + np.arange(10) * 0.5) * um) - assert_allclose(sum(morpho.right.length), 3 * um) - assert_allclose(morpho.right.distance, (0.5 + np.arange(7)) * 3.0 / 7.0 * um) - assert_allclose(sum(morpho.right.nextone.length), 2 * um) - assert_allclose( - morpho.right.nextone.distance, 3 * um + (0.5 + np.arange(3)) * 2.0 / 3.0 * um - ) - - -@pytest.mark.long -@numpy_needs_scipy -def test_infinitecable(): - """ - Test simulation of an infinite cable vs. theory for current pulse (Green function) - """ - BrianLogger.suppress_name("resolution_conflict") - defaultclock.dt = 0.001 * ms - - # Morphology - diameter = 1 * um - Cm = 1 * uF / cm**2 - Ri = 100 * ohm * cm - N = 500 - morpho = Cylinder(diameter=diameter, length=3 * mm, n=N) - - # Passive channels - gL = 1e-4 * siemens / cm**2 - eqs = """ - Im=-gL*v : amp/meter**2 - I : amp (point current) - """ - - neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri) - - # Monitors - mon = StateMonitor(neuron, "v", record=N / 2 - 20) - - neuron.I[len(neuron) // 2] = 1 * nA # injecting in the middle - run(0.02 * ms) - neuron.I = 0 * amp - run(3 * ms) - t = mon.t - v = mon[N // 2 - 20].v - # Theory (incorrect near cable ends) - x = 20 * morpho.length[0] - la = neuron.space_constant[0] - taum = Cm / gL # membrane time constant - theory = ( - 1.0 - / (la * Cm * pi * diameter) - * sqrt(taum / (4 * pi * (t + defaultclock.dt))) - * exp( - -(t + defaultclock.dt) / taum - - taum / (4 * (t + defaultclock.dt)) * (x / la) ** 2 - ) - ) - theory = theory * 1 * nA * 0.02 * ms - assert_allclose( - v[t > 0.5 * ms], theory[t > 0.5 * ms], atol=float(6.32 * uvolt) - ) # high error tolerance (not exact because not infinite cable) - - -@pytest.mark.standalone_compatible -@numpy_needs_scipy -def test_finitecable(): - """ - Test simulation of short cylinder vs. theory for constant current. - """ - BrianLogger.suppress_name("resolution_conflict") - - defaultclock.dt = 0.01 * ms - - # Morphology - diameter = 1 * um - length = 300 * um - Cm = 1 * uF / cm**2 - Ri = 150 * ohm * cm - N = 200 - morpho = Cylinder(diameter=diameter, length=length, n=N) - - # Passive channels - gL = 1e-4 * siemens / cm**2 - EL = -70 * mV - eqs = """ - Im=gL*(EL-v) : amp/meter**2 - I : amp (point current) - """ - - neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri) - neuron.v = EL - - neuron.I[0] = 0.02 * nA # injecting at the left end - - run(100 * ms) - - # Theory - x = neuron.distance - v = neuron.v - la = neuron.space_constant[0] - ra = la * 4 * Ri / (pi * diameter**2) - theory = EL + ra * neuron.I[0] * cosh((length - x) / la) / sinh(length / la) - assert_allclose(v - EL, theory - EL, atol=1e-6) - - -@pytest.mark.standalone_compatible -@numpy_needs_scipy -def test_rallpack1(): - """ - Rallpack 1 - """ - if prefs.core.default_float_dtype is np.float32: - pytest.skip("Need double precision for this test") - defaultclock.dt = 0.05 * ms - - # Morphology - diameter = 1 * um - length = 1 * mm - Cm = 1 * uF / cm**2 - Ri = 100 * ohm * cm - N = 1000 - morpho = Cylinder(diameter=diameter, length=length, n=N) - - # Passive channels - gL = 1.0 / (40000 * ohm * cm**2) - EL = -65 * mV - eqs = """ - Im = gL*(EL - v) : amp/meter**2 - I : amp (point current, constant) - """ - neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri) - neuron.v = EL - - neuron.I[0] = 0.1 * nA # injecting at the left end - - # Record at the two ends - mon = StateMonitor(neuron, "v", record=[0, 999], when="start", dt=0.05 * ms) - - run(250 * ms + defaultclock.dt) - - # Load the theoretical results - basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "rallpack_data") - data_0 = np.loadtxt(os.path.join(basedir, "ref_cable.0")) - data_x = np.loadtxt(os.path.join(basedir, "ref_cable.x")) - - scale_0 = max(data_0[:, 1] * volt) - min(data_0[:, 1] * volt) - scale_x = max(data_x[:, 1] * volt) - min(data_x[:, 1] * volt) - squared_diff_0 = (data_0[:, 1] * volt - mon[0].v) ** 2 - squared_diff_x = (data_x[:, 1] * volt - mon[999].v) ** 2 - rel_RMS_0 = sqrt(mean(squared_diff_0)) / scale_0 - rel_RMS_x = sqrt(mean(squared_diff_x)) / scale_x - max_rel_0 = sqrt(max(squared_diff_0)) / scale_0 - max_rel_x = sqrt(max(squared_diff_x)) / scale_x - - # sanity check: times are the same - assert_allclose(mon.t / second, data_0[:, 0]) - assert_allclose(mon.t / second, data_x[:, 0]) - - # RMS error should be < 0.1%, maximum error along the curve should be < 0.5% - assert 100 * rel_RMS_0 < 0.1 - assert 100 * rel_RMS_x < 0.1 - assert 100 * max_rel_0 < 0.5 - assert 100 * max_rel_x < 0.5 - - -@pytest.mark.standalone_compatible -@numpy_needs_scipy -def test_rallpack2(): - """ - Rallpack 2 - """ - if prefs.core.default_float_dtype is np.float32: - pytest.skip("Need double precision for this test") - defaultclock.dt = 0.1 * ms - - # Morphology - diameter = 32 * um - length = 16 * um - Cm = 1 * uF / cm**2 - Ri = 100 * ohm * cm - - # Construct binary tree according to Rall's formula - morpho = Cylinder(n=1, diameter=diameter, y=[0, float(length)] * meter) - endpoints = {morpho} - for depth in range(1, 10): - diameter /= 2.0 ** (1.0 / 3.0) - length /= 2.0 ** (2.0 / 3.0) - new_endpoints = set() - for endpoint in endpoints: - new_L = Cylinder(n=1, diameter=diameter, length=length) - new_R = Cylinder(n=1, diameter=diameter, length=length) - new_endpoints.add(new_L) - new_endpoints.add(new_R) - endpoint.L = new_L - endpoint.R = new_R - endpoints = new_endpoints - - # Passive channels - gL = 1.0 / (40000 * ohm * cm**2) - EL = -65 * mV - eqs = """ - Im = gL*(EL - v) : amp/meter**2 - I : amp (point current, constant) - """ - neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri, method="rk4") - neuron.v = EL - - neuron.I[0] = 0.1 * nA # injecting at the origin - - endpoint_indices = [endpoint.indices[0] for endpoint in endpoints] - mon = StateMonitor( - neuron, "v", record=[0] + endpoint_indices, when="start", dt=0.1 * ms - ) - - run(250 * ms + defaultclock.dt) - - # Load the theoretical results - basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "rallpack_data") - # Only use very second time step, since we run with 0.1ms instead of 0.05ms - data_0 = np.loadtxt(os.path.join(basedir, "ref_branch.0"))[::2] - data_x = np.loadtxt(os.path.join(basedir, "ref_branch.x"))[::2] - - # sanity check: times are the same - assert_allclose(mon.t / second, data_0[:, 0]) - assert_allclose(mon.t / second, data_x[:, 0]) - - # Check that all endpoints are the same: - for endpoint in endpoints: - assert_allclose(mon[endpoint].v, mon[endpoint[0]].v) - - scale_0 = max(data_0[:, 1] * volt) - min(data_0[:, 1] * volt) - scale_x = max(data_x[:, 1] * volt) - min(data_x[:, 1] * volt) - squared_diff_0 = (data_0[:, 1] * volt - mon[0].v) ** 2 - - # One endpoint - squared_diff_x = (data_x[:, 1] * volt - mon[endpoint_indices[0]].v) ** 2 - rel_RMS_0 = sqrt(mean(squared_diff_0)) / scale_0 - rel_RMS_x = sqrt(mean(squared_diff_x)) / scale_x - max_rel_0 = sqrt(max(squared_diff_0)) / scale_0 - max_rel_x = sqrt(max(squared_diff_x)) / scale_x - - # RMS error should be < 0.25%, maximum error along the curve should be < 0.5% - assert 100 * rel_RMS_0 < 0.25 - assert 100 * rel_RMS_x < 0.25 - assert 100 * max_rel_0 < 0.5 - assert 100 * max_rel_x < 0.5 - - -@pytest.mark.standalone_compatible -@pytest.mark.long -@numpy_needs_scipy -def test_rallpack3(): - """ - Rallpack 3 - """ - if prefs.core.default_float_dtype is np.float32: - pytest.skip("Need double precision for this test") - defaultclock.dt = 1 * usecond - - # Morphology - diameter = 1 * um - length = 1 * mm - N = 1000 - morpho = Cylinder(diameter=diameter, length=length, n=N) - # Passive properties - gl = 1.0 / (40000 * ohm * cm**2) - El = -65 * mV - Cm = 1 * uF / cm**2 - Ri = 100 * ohm * cm - # Active properties - ENa = 50 * mV - EK = -77 * mV - gNa = 120 * msiemens / cm**2 - gK = 36 * msiemens / cm**2 - eqs = """ - Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v) : amp/meter**2 - dm/dt = alpham * (1-m) - betam * m : 1 - dn/dt = alphan * (1-n) - betan * n : 1 - dh/dt = alphah * (1-h) - betah * h : 1 - v_shifted = v - El : volt - alpham = (0.1/mV) * (-v_shifted+25*mV) / (exp((-v_shifted+25*mV) / (10*mV)) - 1)/ms : Hz - betam = 4 * exp(-v_shifted/(18*mV))/ms : Hz - alphah = 0.07 * exp(-v_shifted/(20*mV))/ms : Hz - betah = 1/(exp((-v_shifted+30*mV) / (10*mV)) + 1)/ms : Hz - alphan = (0.01/mV) * (-v_shifted+10*mV) / (exp((-v_shifted+10*mV) / (10*mV)) - 1)/ms : Hz - betan = 0.125*exp(-v_shifted/(80*mV))/ms : Hz - I : amp (point current, constant) - """ - axon = SpatialNeuron( - morphology=morpho, model=eqs, Cm=Cm, Ri=Ri, method="exponential_euler" - ) - axon.v = El - # Pre-calculated equilibrium values at v = El - axon.m = 0.0529324852572 - axon.n = 0.317676914061 - axon.h = 0.596120753508 - axon.I[0] = 0.1 * nA # injecting at the left end - - # Record at the two ends - mon = StateMonitor(axon, "v", record=[0, 999], when="start", dt=0.05 * ms) - - run(250 * ms + defaultclock.dt) - - # Load the theoretical results - basedir = os.path.join(os.path.abspath(os.path.dirname(__file__)), "rallpack_data") - data_0 = np.loadtxt(os.path.join(basedir, "ref_axon.0.neuron")) - data_x = np.loadtxt(os.path.join(basedir, "ref_axon.x.neuron")) - - # sanity check: times are the same - assert_allclose(mon.t / second, data_0[:, 0]) - assert_allclose(mon.t / second, data_x[:, 0]) - - scale_0 = max(data_0[:, 1] * volt) - min(data_0[:, 1] * volt) - scale_x = max(data_x[:, 1] * volt) - min(data_x[:, 1] * volt) - squared_diff_0 = (data_0[:, 1] * volt - mon[0].v) ** 2 - squared_diff_x = (data_x[:, 1] * volt - mon[999].v) ** 2 - - rel_RMS_0 = sqrt(mean(squared_diff_0)) / scale_0 - rel_RMS_x = sqrt(mean(squared_diff_x)) / scale_x - max_rel_0 = sqrt(max(squared_diff_0)) / scale_0 - max_rel_x = sqrt(max(squared_diff_x)) / scale_x - - # RMS error should be < 0.1%, maximum error along the curve should be < 0.5% - # Note that this is much stricter than the original Rallpack evaluation, but - # with the 1us time step, the voltage traces are extremely similar - assert 100 * rel_RMS_0 < 0.1 - assert 100 * rel_RMS_x < 0.1 - assert 100 * max_rel_0 < 0.5 - assert 100 * max_rel_x < 0.5 - - -@pytest.mark.standalone_compatible -@numpy_needs_scipy -def test_rall(): - """ - Test simulation of a cylinder plus two branches, with diameters according to Rall's formula - """ - BrianLogger.suppress_name("resolution_conflict") - - defaultclock.dt = 0.01 * ms - - # Passive channels - gL = 1e-4 * siemens / cm**2 - EL = -70 * mV - - # Morphology - diameter = 1 * um - length = 300 * um - Cm = 1 * uF / cm**2 - Ri = 150 * ohm * cm - N = 500 - rm = 1 / (gL * pi * diameter) # membrane resistance per unit length - ra = (4 * Ri) / (pi * diameter**2) # axial resistance per unit length - la = sqrt(rm / ra) # space length - morpho = Cylinder(diameter=diameter, length=length, n=N) - d1 = 0.5 * um - L1 = 200 * um - rm = 1 / (gL * pi * d1) # membrane resistance per unit length - ra = (4 * Ri) / (pi * d1**2) # axial resistance per unit length - l1 = sqrt(rm / ra) # space length - morpho.L = Cylinder(diameter=d1, length=L1, n=N) - d2 = (diameter**1.5 - d1**1.5) ** (1.0 / 1.5) - rm = 1 / (gL * pi * d2) # membrane resistance per unit length - ra = (4 * Ri) / (pi * d2**2) # axial resistance per unit length - l2 = sqrt(rm / ra) # space length - L2 = (L1 / l1) * l2 - morpho.R = Cylinder(diameter=d2, length=L2, n=N) - - eqs = """ - Im=gL*(EL-v) : amp/meter**2 - I : amp (point current) - """ - - neuron = SpatialNeuron(morphology=morpho, model=eqs, Cm=Cm, Ri=Ri) - neuron.v = EL - - neuron.I[0] = 0.02 * nA # injecting at the left end - run(100 * ms) - - # Check space constant calculation - assert_allclose(la, neuron.space_constant[0]) - assert_allclose(l1, neuron.L.space_constant[0]) - assert_allclose(l2, neuron.R.space_constant[0]) - - # Theory - x = neuron.main.distance - ra = la * 4 * Ri / (pi * diameter**2) - l = length / la + L1 / l1 - theory = EL + ra * neuron.I[0] * cosh(l - x / la) / sinh(l) - v = neuron.main.v - assert_allclose(v - EL, theory - EL, atol=2e-6) - x = neuron.L.distance - theory = EL + ra * neuron.I[0] * cosh( - l - neuron.main.distance[-1] / la - (x - neuron.main.distance[-1]) / l1 - ) / sinh(l) - v = neuron.L.v - assert_allclose(v - EL, theory - EL, atol=2e-6) - x = neuron.R.distance - theory = EL + ra * neuron.I[0] * cosh( - l - neuron.main.distance[-1] / la - (x - neuron.main.distance[-1]) / l2 - ) / sinh(l) - v = neuron.R.v - assert_allclose(v - EL, theory - EL, atol=2e-6) - - -@pytest.mark.standalone_compatible -@numpy_needs_scipy -def test_basic_diffusion(): - # A very basic test that shows that propagation is working in a very basic - # sense, testing all morphological classes - - defaultclock.dt = 0.01 * ms - - EL = -70 * mV - gL = 1e-4 * siemens / cm**2 - target = -10 * mV - eqs = """ - Im = gL*(EL-v) + gClamp*(target-v): amp/meter**2 - gClamp : siemens/meter**2 - """ - - morph = Soma(diameter=30 * um) - morph.axon = Cylinder(n=10, diameter=10 * um, length=100 * um) - morph.dend = Section( - n=10, - diameter=[10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0.1] * um, - length=np.ones(10) * 10 * um, - ) - - neuron = SpatialNeuron(morph, eqs) - neuron.v = EL - neuron.axon.gClamp[0] = 100 * siemens / cm**2 - - mon = StateMonitor(neuron, "v", record=True) - - run(0.25 * ms) - assert all(abs(mon.v[:, -1] / mV + 10) < 0.25), mon.v[:, -1] / mV - - -@pytest.mark.codegen_independent -def test_allowed_integration(): - morph = Soma(diameter=30 * um) - EL = -70 * mV - gL = 1e-4 * siemens / cm**2 - ENa = 115 * mV - gNa = 120 * msiemens / cm**2 - VT = -50.4 * mV - DeltaT = 2 * mV - ENMDA = 0.0 * mV - - @check_units(voltage=volt, result=volt) - def user_fun(voltage): - return voltage # could be an arbitrary function and is therefore unsafe - - allowed_eqs = [ - "Im = gL*(EL-v) : amp/meter**2", - """ - Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) : amp/meter**2 - dm/dt = alpham * (1-m) - betam * m : 1 - dh/dt = alphah * (1-h) - betah * h : 1 - alpham = (0.1/mV) * (-v+25*mV) / (exp((-v+25*mV) / (10*mV)) - 1)/ms : Hz - betam = 4 * exp(-v/(18*mV))/ms : Hz - alphah = 0.07 * exp(-v/(20*mV))/ms : Hz - betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz - """, - """ - Im = gl * (El-v) : amp/meter**2 - I_ext = 1*nA + sin(2*pi*100*Hz*t)*nA : amp (point current) - """, - """ - Im = I_leak + I_spike : amp/meter**2 - I_leak = gL*(EL - v) : amp/meter**2 - I_spike = gL*DeltaT*exp((v - VT)/DeltaT): amp/meter**2 (constant over dt) - """, - """ - Im = gL*(EL-v) : amp/meter**2 - I_NMDA = gNMDA*(ENMDA-v)*Mgblock : amp (point current) - gNMDA : siemens - Mgblock = 1./(1. + exp(-0.062*v/mV)/3.57) : 1 (constant over dt) - """, - "Im = gL*(EL - v) + gL*DeltaT*exp((v - VT)/DeltaT) : amp/meter**2", - """ - Im = I_leak + I_spike : amp/meter**2 - I_leak = gL*(EL - v) : amp/meter**2 - I_spike = gL*DeltaT*exp((v - VT)/DeltaT): amp/meter**2 - """, - """ - Im = gL*(EL-v) : amp/meter**2 - I_NMDA = gNMDA*(ENMDA-v)*Mgblock : amp (point current) - gNMDA : siemens - Mgblock = 1./(1. + exp(-0.062*v/mV)/3.57) : 1 - """, - ] - forbidden_eqs = [ - """Im = gl * (El-v + user_fun(v)) : amp/meter**2""", - """Im = gl * clip(El-v, -100*mV, 100*mV) : amp/meter**2""", - ] - for eqs in allowed_eqs: - # Should not raise an error - neuron = SpatialNeuron(morph, eqs) - - for eqs in forbidden_eqs: - # Should raise an error - with pytest.raises(TypeError): - SpatialNeuron(morph, eqs) - - -@pytest.mark.codegen_independent -def test_spatialneuron_indexing(): - sec = Cylinder(length=50 * um, diameter=10 * um, n=1) - sec.sec1 = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec1.sec11 = Cylinder(length=50 * um, diameter=10 * um, n=4) - sec.sec1.sec12 = Cylinder(length=50 * um, diameter=10 * um, n=8) - sec.sec2 = Cylinder(length=50 * um, diameter=10 * um, n=16) - sec.sec2.sec21 = Cylinder(length=50 * um, diameter=10 * um, n=32) - neuron = SpatialNeuron(sec, "Im = 0*amp/meter**2 : amp/meter**2") - neuron.v = "i*volt" - # Accessing indices/variables of a subtree refers to the full subtree - assert len(neuron.indices[:]) == 1 + 2 + 4 + 8 + 16 + 32 - assert len(neuron.sec1.indices[:]) == 2 + 4 + 8 - assert len(neuron.sec1.sec11.indices[:]) == 4 - assert len(neuron.sec1.sec12.indices[:]) == 8 - assert len(neuron.sec2.indices[:]) == 16 + 32 - assert len(neuron.sec2.sec21.indices[:]) == 32 - assert len(neuron.v[:]) == 1 + 2 + 4 + 8 + 16 + 32 - assert len(neuron.sec1.v[:]) == 2 + 4 + 8 - assert len(neuron.sec1.sec11.v[:]) == 4 - assert len(neuron.sec1.sec12.v[:]) == 8 - assert len(neuron.sec2.v[:]) == 16 + 32 - assert len(neuron.sec2.sec21.v[:]) == 32 - # Accessing indices/variables with ".main" only refers to the section - assert len(neuron.main.indices[:]) == 1 - assert len(neuron.sec1.main.indices[:]) == 2 - assert len(neuron.sec1.sec11.main.indices[:]) == 4 - assert len(neuron.sec1.sec12.main.indices[:]) == 8 - assert len(neuron.sec2.main.indices[:]) == 16 - assert len(neuron.sec2.sec21.main.indices[:]) == 32 - assert len(neuron.main.v[:]) == 1 - assert len(neuron.sec1.main.v[:]) == 2 - assert len(neuron.sec1.sec11.main.v[:]) == 4 - assert len(neuron.sec1.sec12.main.v[:]) == 8 - assert len(neuron.sec2.main.v[:]) == 16 - assert len(neuron.sec2.sec21.main.v[:]) == 32 - # Accessing subgroups - assert len(neuron[0].indices[:]) == 1 - assert len(neuron[0 * um : 50 * um].indices[:]) == 1 - assert len(neuron[0:1].indices[:]) == 1 - assert len(neuron[sec.sec2.indices[:]]) == 16 - assert len(neuron[sec.sec2]) == 16 - assert_equal(neuron.sec1.sec11.v, [3, 4, 5, 6] * volt) - assert_equal(neuron.sec1.sec11[1].v, neuron.sec1.sec11.v[1]) - assert_equal(neuron.sec1.sec11[1:3].v, neuron.sec1.sec11.v[1:3]) - assert_equal(neuron.sec1.sec11[1:3].v, [4, 5] * volt) - - -@pytest.mark.codegen_independent -def test_tree_index_consistency(): - # Test all possible trees with depth 3 and a maximum of 3 branches subtree - # (a total of 84 trees) - # This tests whether the indices (i.e. where the compartments are placed in - # the overall flattened 1D structure) make sense: for the `SpatialSubgroup` - # mechanism to work correctly, each subtree has to have contiguous indices. - # Separate subtrees should of course have non-overlapping indices. - for tree_description in itertools.product( - [1, 2, 3], # children of root - [0, 1, 2, 3], # children of first branch - [0, 1, 2, 3], # children of second branch - [0, 1, 2, 3], # children of third branch - ): - sec = Cylinder(length=50 * um, diameter=10 * um, n=1) - root_children = tree_description[0] - if not all([tree_description[x] == 0 for x in range(root_children + 1, 4)]): - # skip redundant descriptions (differing number of branches in a - # subtree that does not exist) - continue - - # Create a tree according to the description - for idx in range(root_children): - setattr( - sec, - f"sec{int(idx + 1)}", - Cylinder(length=50 * um, diameter=10 * um, n=2 * (idx + 1)), - ) - for child in range(root_children): - subsec = getattr(sec, f"sec{int(child + 1)}") - subsec_children = tree_description[child + 1] - for idx in range(subsec_children): - setattr( - subsec, - f"sec{int(child + 1)}{int(idx + 1)}", - Cylinder(length=50 * um, diameter=10 * um, n=1 + (child + 1) * idx), - ) - - neuron = SpatialNeuron(sec, "Im = 0*amp/meter**2 : amp/meter**2") - # Check the indicies for the full neuron: - assert_equal(neuron.indices[:], np.arange(sec.total_compartments)) - - all_subsec_indices = [] - for child in range(root_children): - subsec = getattr(neuron, f"sec{int(child + 1)}") - sub_indices = set(subsec.main.indices[:]) - subsec_children = tree_description[child + 1] - for idx in range(subsec_children): - subsubsec = getattr(subsec, f"sec{int(child + 1)}{int(idx + 1)}") - sub_indices |= set(subsubsec.main.indices[:]) - # The indices for a full subtree should be the union of the indices - # for all subsections within that subtree - assert sub_indices == set(subsec.indices[:]) - all_subsec_indices.extend(subsec.indices[:]) - # Separate subtrees should not overlap - assert len(all_subsec_indices) == len(set(all_subsec_indices)) - - -@pytest.mark.codegen_independent -def test_spatialneuron_subtree_assignment(): - sec = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec1 = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec1.sec11 = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec1.sec12 = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec2 = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec2.sec21 = Cylinder(length=50 * um, diameter=10 * um, n=2) - neuron = SpatialNeuron(sec, "Im = 0*amp/meter**2 : amp/meter**2") - - neuron.v = 1 * volt - assert_allclose(neuron.v[:], np.ones(12) * volt) - neuron.sec1.v += 1 * volt - assert_allclose(neuron.main.v[:], np.ones(2) * volt) - assert_allclose(neuron.sec1.v[:], np.ones(6) * 2 * volt) - assert_allclose(neuron.sec1.main.v[:], np.ones(2) * 2 * volt) - assert_allclose(neuron.sec1.sec11.v[:], np.ones(2) * 2 * volt) - assert_allclose(neuron.sec1.sec12.v[:], np.ones(2) * 2 * volt) - assert_allclose(neuron.sec2.v[:], np.ones(4) * volt) - neuron.sec2.v = 5 * volt - assert_allclose(neuron.sec2.v[:], np.ones(4) * 5 * volt) - assert_allclose(neuron.sec2.main.v[:], np.ones(2) * 5 * volt) - assert_allclose(neuron.sec2.sec21.v[:], np.ones(2) * 5 * volt) - - -@pytest.mark.codegen_independent -def test_spatialneuron_morphology_assignment(): - sec = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec1 = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec1.sec11 = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec1.sec12 = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec2 = Cylinder(length=50 * um, diameter=10 * um, n=2) - sec.sec2.sec21 = Cylinder(length=50 * um, diameter=10 * um, n=2) - neuron = SpatialNeuron(sec, "Im = 0*amp/meter**2 : amp/meter**2") - - neuron.v[sec.sec1.sec11] = 1 * volt - assert_allclose(neuron.sec1.sec11.v[:], np.ones(2) * volt) - assert_allclose(neuron.sec1.sec12.v[:], np.zeros(2) * volt) - assert_allclose(neuron.sec1.main.v[:], np.zeros(2) * volt) - assert_allclose(neuron.main.v[:], np.zeros(2) * volt) - assert_allclose(neuron.sec2.v[:], np.zeros(4) * volt) - - neuron.v[sec.sec2[25 * um :]] = 2 * volt - neuron.v[sec.sec1[: 25 * um]] = 3 * volt - assert_allclose(neuron.main.v[:], np.zeros(2) * volt) - assert_allclose(neuron.sec2.main.v[:], [0, 2] * volt) - assert_allclose(neuron.sec2.sec21.v[:], np.zeros(2) * volt) - assert_allclose(neuron.sec1.main.v[:], [3, 0] * volt) - assert_allclose(neuron.sec1.sec11.v[:], np.ones(2) * volt) - assert_allclose(neuron.sec1.sec12.v[:], np.zeros(2) * volt) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -@numpy_needs_scipy -def test_spatialneuron_capacitive_currents(): - if prefs.core.default_float_dtype is np.float32: - pytest.skip("Need double precision for this test") - defaultclock.dt = 0.1 * ms - morpho = Cylinder(x=[0, 10] * cm, diameter=2 * 238 * um, n=200, type="axon") - - El = 10.613 * mV - ENa = 115 * mV - EK = -12 * mV - gl = 0.3 * msiemens / cm**2 - gNa0 = 120 * msiemens / cm**2 - gK = 36 * msiemens / cm**2 - - # Typical equations - eqs = """ - # The same equations for the whole neuron, but possibly different parameter values - # distributed transmembrane current - Im = gl * (El-v) + gNa * m**3 * h * (ENa-v) + gK * n**4 * (EK-v) : amp/meter**2 - I : amp (point current) # applied current - dm/dt = alpham * (1-m) - betam * m : 1 - dn/dt = alphan * (1-n) - betan * n : 1 - dh/dt = alphah * (1-h) - betah * h : 1 - alpham = (0.1/mV) * (-v+25*mV) / (exp((-v+25*mV) / (10*mV)) - 1)/ms : Hz - betam = 4 * exp(-v/(18*mV))/ms : Hz - alphah = 0.07 * exp(-v/(20*mV))/ms : Hz - betah = 1/(exp((-v+30*mV) / (10*mV)) + 1)/ms : Hz - alphan = (0.01/mV) * (-v+10*mV) / (exp((-v+10*mV) / (10*mV)) - 1)/ms : Hz - betan = 0.125*exp(-v/(80*mV))/ms : Hz - gNa : siemens/meter**2 - """ - - neuron = SpatialNeuron( - morphology=morpho, - model=eqs, - Cm=1 * uF / cm**2, - Ri=35.4 * ohm * cm, - method="exponential_euler", - ) - mon = StateMonitor(neuron, ["Im", "Ic"], record=True, when="end") - run(10 * ms) - neuron.I[0] = 1 * uA # current injection at one end - run(3 * ms) - neuron.I = 0 * amp - run(10 * ms) - device.build(direct_call=False, **device.build_options) - assert_allclose( - (mon.Im - mon.Ic).sum(axis=0) / (mA / cm**2), np.zeros(230), atol=1e6 - ) - - -@pytest.mark.codegen_independent -def test_point_current(): - soma = Soma(10 * um) - eqs = """Im = 0*nA/cm**2 : amp/meter**2 - I1 = 1*nA : amp (point current) - I2 = 1*nA : amp (point current, constant over dt)""" - neuron = SpatialNeuron(soma, eqs) - assert "I1/area" in neuron.equations["Im"].expr.code - assert "I2/area" in neuron.equations["Im"].expr.code # see issue #1160 - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -@numpy_needs_scipy -def test_spatialneuron_threshold_location(): - morpho = Soma(10 * um) - morpho.axon = Cylinder(1 * um, n=2, length=20 * um) - model = """ - Im = 0*nA/cm**2 : amp/meter**2 - should_spike : boolean (constant) - """ - neuron = SpatialNeuron( - morpho, model, threshold_location=morpho.axon[15 * um], threshold="should_spike" - ) - # Different variants that should do the same thing - neuron2 = SpatialNeuron( - morpho, - model, - threshold_location=morpho.axon.indices[15 * um], - threshold="should_spike", - ) - neuron3 = SpatialNeuron( - morpho, model, threshold_location=2, threshold="should_spike" - ) - # Cannot use multiple compartments - with pytest.raises(AttributeError): - SpatialNeuron( - morpho, model, threshold_location=[2, 3], threshold="should_spike" - ) - with pytest.raises(AttributeError): - SpatialNeuron( - morpho, - model, - threshold_location=morpho.axon[5 * um : 15 * um], - threshold="should_spike", - ) - neurons = [neuron, neuron2, neuron3] - monitors = [SpikeMonitor(n) for n in neurons] - - net = Network(neurons, monitors) - for n in neurons: - n.should_spike = True # all compartments want to spike - net.run(defaultclock.dt) - for n in neurons: - n.should_spike = False # no compartment wants to spike - net.run(defaultclock.dt) - for n in neurons: - n.should_spike = [False, False, True] - net.run(defaultclock.dt) - for n in neurons: - n.should_spike = [True, True, False] - net.run(defaultclock.dt) - device.build(direct_call=False, **device.build_options) - for mon in monitors: - assert len(mon.i) == 2 - assert all(mon.i == 2) - assert_allclose(mon.t, [0 * ms, 2 * defaultclock.dt]) - - -@pytest.mark.standalone_compatible -@numpy_needs_scipy -def test_spatialneuron_timedarray(): - # See GitHub issue 1427 - ta = TimedArray([0, 1] * nA, dt=1 * ms) - morpho = Soma(diameter=10 * um) - neuron = SpatialNeuron(morpho, "Im = ta(t)/area : amp/meter**2", method="euler") - mon = StateMonitor(neuron, "v", record=0, when="after_groups") - run(2 * ms) - assert_allclose( - np.diff(mon.v_[0]), - np.r_[ - np.zeros(9), - np.array( - np.ones(10) * 1 * nA / neuron.area[0] / neuron.Cm * defaultclock.dt - ), - ], - ) - - -if __name__ == "__main__": - test_custom_events() - test_construction() - test_construction_coordinates() - test_infinitecable() - test_finitecable() - test_rallpack1() - test_rallpack2() - test_rallpack3() - test_rall() - test_basic_diffusion() - test_allowed_integration() - test_spatialneuron_indexing() - test_tree_index_consistency() - test_spatialneuron_subtree_assignment() - test_spatialneuron_morphology_assignment() - test_spatialneuron_capacitive_currents() - test_spatialneuron_timedarray() diff --git a/brian2/tests/test_spikegenerator.py b/brian2/tests/test_spikegenerator.py deleted file mode 100644 index 50aa7f7d9..000000000 --- a/brian2/tests/test_spikegenerator.py +++ /dev/null @@ -1,455 +0,0 @@ -""" -Tests for `SpikeGeneratorGroup` -""" - -import os -import tempfile - -import pytest -from numpy.testing import assert_array_equal, assert_equal - -from brian2 import * -from brian2.core.network import schedule_propagation_offset -from brian2.devices.device import reinit_and_delete -from brian2.tests.utils import assert_allclose, exc_isinstance -from brian2.utils.logger import catch_logs - - -@pytest.mark.standalone_compatible -def test_spikegenerator_connected(): - """ - Test that `SpikeGeneratorGroup` connects properly. - """ - G = NeuronGroup(10, "v:1") - mon = StateMonitor(G, "v", record=True, when="end") - indices = np.array([3, 2, 1, 1, 4, 5]) - times = np.array([6, 5, 4, 3, 3, 1]) * ms - SG = SpikeGeneratorGroup(10, indices, times) - S = Synapses(SG, G, on_pre="v+=1") - S.connect(j="i") - run(7 * ms) - # The following neurons should not receive any spikes - for idx in [0, 6, 7, 8, 9]: - assert all(mon[idx].v == 0) - offset = schedule_propagation_offset() - # The following neurons should receive a single spike - for idx, time in zip([2, 3, 4, 5], [5, 6, 3, 1] * ms): - assert all(mon[idx].v[mon.t < time + offset] == 0) - assert all(mon[idx].v[mon.t >= time + offset] == 1) - # This neuron receives two spikes - assert all(mon[1].v[mon.t < 3 * ms + offset] == 0) - assert all(mon[1].v[(mon.t >= 3 * ms + offset) & (mon.t < 4 * ms + offset)] == 1) - assert all(mon[1].v[(mon.t >= 4 * ms + offset)] == 2) - - -@pytest.mark.standalone_compatible -def test_spikegenerator_basic(): - """ - Basic test for `SpikeGeneratorGroup`. - """ - indices = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) - times = np.array([1, 4, 4, 3, 2, 4, 2, 3, 2]) * ms - SG = SpikeGeneratorGroup(5, indices, times) - s_mon = SpikeMonitor(SG) - run(5 * ms) - _compare_spikes(5, indices, times, s_mon) - - -@pytest.mark.standalone_compatible -def test_spikegenerator_basic_sorted(): - """ - Basic test for `SpikeGeneratorGroup` with already sorted spike events. - """ - indices = np.array([3, 1, 2, 3, 1, 2, 1, 2, 3]) - times = np.array([1, 2, 2, 2, 3, 3, 4, 4, 4]) * ms - SG = SpikeGeneratorGroup(5, indices, times) - s_mon = SpikeMonitor(SG) - run(5 * ms) - _compare_spikes(5, indices, times, s_mon) - - -@pytest.mark.standalone_compatible -def test_spikegenerator_basic_sorted_with_sorted(): - """ - Basic test for `SpikeGeneratorGroup` with already sorted spike events. - """ - indices = np.array([3, 1, 2, 3, 1, 2, 1, 2, 3]) - times = np.array([1, 2, 2, 2, 3, 3, 4, 4, 4]) * ms - SG = SpikeGeneratorGroup(5, indices, times, sorted=True) - s_mon = SpikeMonitor(SG) - run(5 * ms) - _compare_spikes(5, indices, times, s_mon) - - -@pytest.mark.standalone_compatible -def test_spikegenerator_period(): - """ - Basic test for `SpikeGeneratorGroup`. - """ - indices = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) - times = np.array([1, 4, 4, 3, 2, 4, 2, 3, 2]) * ms - SG = SpikeGeneratorGroup(5, indices, times, period=5 * ms) - - s_mon = SpikeMonitor(SG) - run(10 * ms) - for idx in range(5): - generator_spikes = sorted( - [(idx, time) for time in times[indices == idx]] - + [(idx, time + 5 * ms) for time in times[indices == idx]] - ) - recorded_spikes = sorted([(idx, time) for time in s_mon.t[s_mon.i == idx]]) - assert_allclose(generator_spikes, recorded_spikes) - - -@pytest.mark.codegen_independent -def test_spikegenerator_extreme_period(): - """ - Basic test for `SpikeGeneratorGroup`. - """ - indices = np.array([0, 1, 2]) - times = np.array([0, 1, 2]) * ms - SG = SpikeGeneratorGroup(5, indices, times, period=1e6 * second) - s_mon = SpikeMonitor(SG) - with catch_logs() as l: - run(10 * ms) - - assert_equal(s_mon.i, np.array([0, 1, 2])) - assert_allclose(s_mon.t, [0, 1, 2] * ms) - assert len(l) == 1 and l[0][1].endswith("spikegenerator_long_period") - - -@pytest.mark.standalone_compatible -def test_spikegenerator_period_rounding(): - # See discussion in PR #1042 - # The last spike will be considered to be in the time step *after* 1s, due - # to the way our rounding works. Although probably not what the user - # expects, this should therefore raise an error. In previous versions of - # Brian, this did not raise any error but silently discarded the spike. - with pytest.raises(ValueError): - SpikeGeneratorGroup( - 1, [0, 0, 0], [0 * ms, 0.9 * ms, 0.99999 * ms], period=1 * ms, dt=0.1 * ms - ) - # This should also raise a ValueError, since the last two spikes fall into - # the same bin - s = SpikeGeneratorGroup( - 1, [0, 0, 0], [0 * ms, 0.9 * ms, 0.96 * ms], period=1 * ms, dt=0.1 * ms - ) - net = Network(s) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, ValueError) - - -def test_spikegenerator_period_repeat(): - """ - Basic test for `SpikeGeneratorGroup`. - """ - indices = np.zeros(10) - times = arange(0, 1, 0.1) * ms - - rec = np.rec.fromarrays([times, indices], names=["t", "i"]) - rec.sort() - sorted_times = np.ascontiguousarray(rec.t) * 1000 - sorted_indices = np.ascontiguousarray(rec.i) - SG = SpikeGeneratorGroup(1, indices, times, period=1 * ms) - s_mon = SpikeMonitor(SG) - net = Network(SG, s_mon) - rate = PopulationRateMonitor(SG) - for idx in range(5): - net.run(1 * ms) - assert (idx + 1) * len(SG.spike_time) == s_mon.num_spikes - - -def _compare_spikes( - N, indices, times, recorded, start_time=0 * ms, end_time=1e100 * second -): - for idx in range(N): - generator_spikes = sorted([(idx, time) for time in times[indices == idx]]) - recorded_spikes = sorted( - [ - (idx, time) - for time in recorded.t[recorded.i == idx] - if time >= start_time and time < end_time - ] - ) - assert_allclose(generator_spikes, recorded_spikes) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_spikegenerator_change_spikes(): - indices1 = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) - times1 = np.array([1, 4, 4, 3, 2, 4, 2, 3, 2]) * ms - SG = SpikeGeneratorGroup(5, indices1, times1) - s_mon = SpikeMonitor(SG) - net = Network(SG, s_mon) - net.run(5 * ms) - - indices2 = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1, 3, 3, 3, 1, 2]) - times2 = ( - np.array([1, 4, 4, 3, 2, 4, 2, 3, 2, 4.5, 4.7, 4.8, 4.5, 4.7]) * ms + 5 * ms - ) - - SG.set_spikes(indices2, times2) - net.run(5 * ms) - - indices3 = np.array([4, 1, 0]) - times3 = np.array([1, 3, 4]) * ms + 10 * ms - - SG.set_spikes(indices3, times3) - net.run(5 * ms) - device.build(direct_call=False, **device.build_options) - _compare_spikes(5, indices1, times1, s_mon, 0 * ms, 5 * ms) - _compare_spikes(5, indices2, times2, s_mon, 5 * ms, 10 * ms) - _compare_spikes(5, indices3, times3, s_mon, 10 * ms) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_spikegenerator_change_period(): - """ - Basic test for `SpikeGeneratorGroup`. - """ - indices1 = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1]) - times1 = np.array([1, 4, 4, 3, 2, 4, 2, 3, 2]) * ms - SG = SpikeGeneratorGroup(5, indices1, times1, period=5 * ms) - s_mon = SpikeMonitor(SG) - net = Network(SG, s_mon) - net.run(10 * ms) - - indices2 = np.array([3, 2, 1, 1, 2, 3, 3, 2, 1, 3, 3, 3, 1, 2]) - times2 = ( - np.array([1, 4, 4, 3, 2, 4, 2, 3, 2, 4.5, 4.7, 4.8, 4.5, 4.7]) * ms + 10 * ms - ) - - SG.set_spikes(indices2, times2) - net.run(10 * ms) # period should no longer be in effect - device.build(direct_call=False, **device.build_options) - - _compare_spikes( - 5, - np.hstack([indices1, indices1]), - np.hstack([times1, times1 + 5 * ms]), - s_mon, - 0 * ms, - 10 * ms, - ) - _compare_spikes(5, indices2, times2, s_mon, 10 * ms) - - -@pytest.mark.codegen_independent -def test_spikegenerator_incorrect_values(): - with pytest.raises(TypeError): - SpikeGeneratorGroup(0, [], [] * second) - # Floating point value for N - with pytest.raises(TypeError): - SpikeGeneratorGroup(1.5, [], [] * second) - # Negative index - with pytest.raises(ValueError): - SpikeGeneratorGroup(5, [0, 3, -1], [0, 1, 2] * ms) - # Too high index - with pytest.raises(ValueError): - SpikeGeneratorGroup(5, [0, 5, 1], [0, 1, 2] * ms) - # Negative time - with pytest.raises(ValueError): - SpikeGeneratorGroup(5, [0, 1, 2], [0, -1, 2] * ms) - - -@pytest.mark.codegen_independent -def test_spikegenerator_incorrect_period(): - """ - Test that you cannot provide incorrect period arguments or combine - inconsistent period and dt arguments. - """ - # Period is negative - with pytest.raises(ValueError): - SpikeGeneratorGroup(1, [], [] * second, period=-1 * ms) - - # Period is smaller than the highest spike time - with pytest.raises(ValueError): - SpikeGeneratorGroup(1, [0], [2] * ms, period=1 * ms) - # Period is not an integer multiple of dt - SG = SpikeGeneratorGroup(1, [], [] * second, period=1.25 * ms, dt=0.1 * ms) - net = Network(SG) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, NotImplementedError) - - SG = SpikeGeneratorGroup(1, [], [] * second, period=0.101 * ms, dt=0.1 * ms) - net = Network(SG) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, NotImplementedError) - - SG = SpikeGeneratorGroup(1, [], [] * second, period=3.333 * ms, dt=0.1 * ms) - net = Network(SG) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, NotImplementedError) - - # This should not raise an error (see #1041) - SG = SpikeGeneratorGroup(1, [], [] * ms, period=150 * ms, dt=0.1 * ms) - net = Network(SG) - net.run(0 * ms) - - # Period is smaller than dt - SG = SpikeGeneratorGroup(1, [], [] * second, period=1 * ms, dt=2 * ms) - net = Network(SG) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, ValueError) - - -def test_spikegenerator_rounding(): - # all spikes should fall into the first time bin - indices = np.arange(100) - times = np.linspace(0, 0.1, 100, endpoint=False) * ms - SG = SpikeGeneratorGroup(100, indices, times, dt=0.1 * ms) - mon = SpikeMonitor(SG) - net = Network(SG, mon) - net.run(0.1 * ms) - assert_equal(mon.count, np.ones(100)) - - # all spikes should fall in separate bins - dt = 0.1 * ms - indices = np.zeros(10000) - times = np.arange(10000) * dt - SG = SpikeGeneratorGroup(1, indices, times, dt=dt) - target = NeuronGroup( - 1, "count : 1", threshold="True", reset="count=0" - ) # set count to zero at every time step - syn = Synapses(SG, target, on_pre="count+=1") - syn.connect() - mon = StateMonitor(target, "count", record=0, when="end") - net = Network(SG, target, syn, mon) - # change the schedule so that resets are processed before synapses - net.schedule = ["start", "groups", "thresholds", "resets", "synapses", "end"] - net.run(10000 * dt) - assert_equal(mon[0].count, np.ones(10000)) - - -@pytest.mark.standalone_compatible -@pytest.mark.long -def test_spikegenerator_rounding_long(): - # all spikes should fall in separate bins - dt = 0.1 * ms - N = 1000000 - indices = np.zeros(N) - times = np.arange(N) * dt - SG = SpikeGeneratorGroup(1, indices, times, dt=dt) - target = NeuronGroup(1, "count : 1") - syn = Synapses(SG, target, on_pre="count+=1") - syn.connect() - spikes = SpikeMonitor(SG) - mon = StateMonitor(target, "count", record=0, when="end") - run(N * dt, report="text") - assert spikes.count[0] == N, f"expected {int(N)} spikes, got {int(spikes.count[0])}" - assert all(np.diff(mon[0].count[:]) == 1) - - -@pytest.mark.standalone_compatible -@pytest.mark.long -def test_spikegenerator_rounding_period(): - # all spikes should fall in separate bins - dt = 0.1 * ms - N = 100 - repeats = 10000 - indices = np.zeros(N) - times = np.arange(N) * dt - SG = SpikeGeneratorGroup(1, indices, times, dt=dt, period=N * dt) - target = NeuronGroup(1, "count : 1") - syn = Synapses(SG, target, on_pre="count+=1") - syn.connect() - spikes = SpikeMonitor(SG) - mon = StateMonitor(target, "count", record=0, when="end") - run(N * repeats * dt, report="text") - # print np.int_(np.round(spikes.t/dt)) - assert_equal(spikes.count[0], N * repeats) - assert all(np.diff(mon[0].count[:]) == 1) - - -@pytest.mark.codegen_independent -def test_spikegenerator_multiple_spikes_per_bin(): - # Multiple spikes per bin are of course fine if they don't belong to the - # same neuron - SG = SpikeGeneratorGroup(2, [0, 1], [0, 0.05] * ms, dt=0.1 * ms) - net = Network(SG) - net.run(0 * ms) - - # This should raise an error - SG = SpikeGeneratorGroup(2, [0, 0], [0, 0.05] * ms, dt=0.1 * ms) - net = Network(SG) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - print(exc.value.__cause__) - assert exc_isinstance(exc, ValueError) - - # More complicated scenario where dt changes between runs - defaultclock.dt = 0.1 * ms - SG = SpikeGeneratorGroup(2, [0, 0], [0.05, 0.15] * ms) - net = Network(SG) - net.run(0 * ms) # all is fine - defaultclock.dt = 0.2 * ms # Now the two spikes fall into the same bin - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, ValueError) - - -@pytest.mark.standalone_compatible -@pytest.mark.multiple_runs -def test_spikegenerator_multiple_runs(): - indices = np.zeros(5) - times = np.arange(5) * ms - spike_gen = SpikeGeneratorGroup(1, indices, times) # all good - spike_mon = SpikeMonitor(spike_gen) - run(5 * ms) - # Setting the same spike times again should not do anything, since they are - # before the start of the current simulation - spike_gen.set_spikes(indices, times) - # however, a warning should be raised - with catch_logs() as l: - run(5 * ms) - device.build(direct_call=False, **device.build_options) - assert len(l) == 1 and l[0][1].endswith("ignored_spikes") - assert spike_mon.num_spikes == 5 - - -def test_spikegenerator_restore(): - # Check whether SpikeGeneratorGroup works with store/restore - # See github issue #1084 - gen = SpikeGeneratorGroup(1, [0, 0, 0], [0, 1, 2] * ms) - mon = SpikeMonitor(gen) - store() - run(3 * ms) - assert_array_equal(mon.i, [0, 0, 0]) - assert_allclose(mon.t, [0, 1, 2] * ms) - restore() - run(3 * ms) - assert_array_equal(mon.i, [0, 0, 0]) - assert_allclose(mon.t, [0, 1, 2] * ms) - - -if __name__ == "__main__": - import time - - start = time.time() - - test_spikegenerator_connected() - test_spikegenerator_basic() - test_spikegenerator_basic_sorted() - test_spikegenerator_basic_sorted_with_sorted() - test_spikegenerator_period() - test_spikegenerator_period_rounding() - test_spikegenerator_extreme_period() - test_spikegenerator_period_repeat() - test_spikegenerator_change_spikes() - test_spikegenerator_change_period() - test_spikegenerator_incorrect_values() - test_spikegenerator_incorrect_period() - test_spikegenerator_rounding() - test_spikegenerator_rounding_long() - test_spikegenerator_rounding_period() - test_spikegenerator_multiple_spikes_per_bin() - test_spikegenerator_multiple_runs() - test_spikegenerator_restore() - print("Tests took", time.time() - start) diff --git a/brian2/tests/test_spikequeue.py b/brian2/tests/test_spikequeue.py deleted file mode 100644 index fccf9432f..000000000 --- a/brian2/tests/test_spikequeue.py +++ /dev/null @@ -1,65 +0,0 @@ -import numpy as np -import pytest -from numpy.testing import assert_equal - -from brian2.memory.dynamicarray import DynamicArray1D -from brian2.synapses.spikequeue import SpikeQueue -from brian2.units.stdunits import ms - - -def create_all_to_all(N, dt): - """ - Return a tuple containing `synapses` and `delays` in the form that is needed - for the `SpikeQueue` initializer. - Every synapse has a delay depending on the presynaptic neuron. - """ - data = np.repeat(np.arange(N, dtype=np.int32), N) - delays = DynamicArray1D(data.shape, dtype=np.float64) - delays[:] = data * dt - synapses = data - return synapses, delays - - -def create_one_to_one(N, dt): - """ - Return a tuple containing `synapses` and `delays` in the form that is needed - for the `SpikeQueue` initializer. - Every synapse has a delay depending on the presynaptic neuron. - """ - data = np.arange(N, dtype=np.int32) - delays = DynamicArray1D(data.shape, dtype=np.float64) - delays[:] = data * dt - synapses = data - return synapses, delays - - -@pytest.mark.codegen_independent -def test_spikequeue(): - N = 100 - dt = float(0.1 * ms) - synapses, delays = create_one_to_one(N, dt) - queue = SpikeQueue(source_start=0, source_end=N) - queue.prepare(delays[:], dt, synapses) - queue.push(np.arange(N, dtype=np.int32)) - for i in range(N): - assert_equal(queue.peek(), np.array([i])) - queue.advance() - for i in range(N): - assert_equal(queue.peek(), np.array([])) - queue.advance() - - synapses, delays = create_all_to_all(N, dt) - - queue = SpikeQueue(source_start=0, source_end=N) - queue.prepare(delays[:], dt, synapses) - queue.push(np.arange(N * N, dtype=np.int32)) - for i in range(N): - assert_equal(queue.peek(), i * N + np.arange(N)) - queue.advance() - for i in range(N): - assert_equal(queue.peek(), np.array([])) - queue.advance() - - -if __name__ == "__main__": - test_spikequeue() diff --git a/brian2/tests/test_stateupdaters.py b/brian2/tests/test_stateupdaters.py deleted file mode 100644 index e2f64d732..000000000 --- a/brian2/tests/test_stateupdaters.py +++ /dev/null @@ -1,988 +0,0 @@ -import logging -import re - -import pytest -from numpy.testing import assert_equal - -from brian2 import * -from brian2.core.variables import ArrayVariable, Constant, Variable -from brian2.stateupdaters.base import UnsupportedEquationsException -from brian2.tests.utils import assert_allclose, exc_isinstance -from brian2.utils.logger import catch_logs - - -@pytest.mark.codegen_independent -def test_explicit_stateupdater_parsing(): - """ - Test the parsing of explicit state updater descriptions. - """ - # These are valid descriptions and should not raise errors - updater = ExplicitStateUpdater("x_new = x + dt * f(x, t)") - updater(Equations("dv/dt = -v / tau : 1")) - updater = ExplicitStateUpdater( - """ - x2 = x + dt * f(x, t) - x_new = x2 - """ - ) - updater(Equations("dv/dt = -v / tau : 1")) - updater = ExplicitStateUpdater( - """ - x1 = g(x, t) * dW - x2 = x + dt * f(x, t) - x_new = x1 + x2 - """, - stochastic="multiplicative", - ) - updater(Equations("dv/dt = -v / tau + v * xi * tau**-.5: 1")) - - updater = ExplicitStateUpdater( - """ - x_support = x + dt*f(x, t) + dt**.5 * g(x, t) - g_support = g(x_support, t) - k = 1/(2*dt**.5)*(g_support - g(x, t))*(dW**2) - x_new = x + dt*f(x,t) + g(x, t) * dW + k - """, - stochastic="multiplicative", - ) - updater(Equations("dv/dt = -v / tau + v * xi * tau**-.5: 1")) - - # Examples of failed parsing - # No x_new = ... statement - with pytest.raises(SyntaxError): - ExplicitStateUpdater("x = x + dt * f(x, t)") - # Not an assigment - with pytest.raises(SyntaxError): - ExplicitStateUpdater( - """ - 2 * x - x_new = x + dt * f(x, t) - """ - ) - - # doesn't separate into stochastic and non-stochastic part - updater = ExplicitStateUpdater("x_new = x + dt * f(x, t) * g(x, t) * dW") - with pytest.raises(ValueError): - updater(Equations("")) - - -@pytest.mark.codegen_independent -def test_non_autonomous_equations(): - # Check that non-autonmous equations are handled correctly in multi-step - # updates - updater = ExplicitStateUpdater("x_new = f(x, t + 0.5*dt)") - update_step = updater(Equations("dv/dt = t : 1")) # Not a valid equation but... - # very crude test - assert "0.5*dt" in update_step - - -@pytest.mark.codegen_independent -def test_str_repr(): - """ - Assure that __str__ and __repr__ do not raise errors - """ - for integrator in [linear, euler, rk2, rk4]: - assert len(str(integrator)) - assert len(repr(integrator)) - - -@pytest.mark.codegen_independent -def test_multiple_noise_variables_basic(): - # Very basic test, only to make sure that stochastic state updaters handle - # multiple noise variables at all - eqs = Equations( - """ - dv/dt = -v / (10*ms) + xi_1 * ms ** -.5 : 1 - dw/dt = -w / (10*ms) + xi_2 * ms ** -.5 : 1 - """ - ) - for method in [euler, heun, milstein]: - code = method(eqs, {}) - assert "xi_1" in code - assert "xi_2" in code - - -def test_multiple_noise_variables_extended(): - # Some actual simulations with multiple noise variables - eqs = """ - dx/dt = y : 1 - dy/dt = - 1*ms**-1*y - 40*ms**-2*x : Hz - """ - all_eqs_noise = [ - """ - dx/dt = y : 1 - dy/dt = noise_factor*ms**-1.5*xi_1 + noise_factor*ms**-1.5*xi_2 - - 1*ms**-1*y - 40*ms**-2*x : Hz - """, - """ - dx/dt = y + noise_factor*ms**-0.5*xi_1: 1 - dy/dt = noise_factor*ms**-1.5*xi_2 - - 1*ms**-1*y - 40*ms**-2*x : Hz - """, - ] - G = NeuronGroup(2, eqs, method="euler") - G.x = [0.5, 1] - G.y = [0, 0.5] * Hz - mon = StateMonitor(G, ["x", "y"], record=True) - net = Network(G, mon) - net.run(10 * ms) - no_noise_x, no_noise_y = mon.x[:], mon.y[:] - - for eqs_noise in all_eqs_noise: - for method_name, method in [("euler", euler), ("heun", heun)]: - with catch_logs("WARNING"): - G = NeuronGroup(2, eqs_noise, method=method) - G.x = [0.5, 1] - G.y = [0, 0.5] * Hz - mon = StateMonitor(G, ["x", "y"], record=True) - net = Network(G, mon) - # We run it deterministically, but still we'd detect major errors (e.g. - # non-stochastic terms that are added twice, see #330 - net.run(10 * ms, namespace={"noise_factor": 0.0}) - assert_allclose( - mon.x[:], - no_noise_x, - err_msg=f"Method {method_name} gave incorrect results", - ) - assert_allclose( - mon.y[:], - no_noise_y, - err_msg=f"Method {method_name} gave incorrect results", - ) - - -def test_multiple_noise_variables_deterministic_noise(fake_randn_randn_fixture): - all_eqs = [ - """ - dx/dt = y : 1 - dy/dt = -y / (10*ms) + dt**-.5*0.5*ms**-1.5 + dt**-.5*0.5*ms**-1.5: Hz - """, - """ - dx/dt = y + dt**-.5*0.5*ms**-0.5: 1 - dy/dt = -y / (10*ms) + dt**-.5*0.5 * ms**-1.5 : Hz - """, - ] - all_eqs_noise = [ - """ - dx/dt = y : 1 - dy/dt = -y / (10*ms) + xi_1 * ms**-1.5 + xi_2 * ms**-1.5: Hz - """, - """ - dx/dt = y + xi_1*ms**-0.5: 1 - dy/dt = -y / (10*ms) + xi_2 * ms**-1.5 : Hz - """, - ] - for eqs, eqs_noise in zip(all_eqs, all_eqs_noise): - G = NeuronGroup(2, eqs, method="euler") - G.x = [5, 17] - G.y = [25, 5] * Hz - mon = StateMonitor(G, ["x", "y"], record=True) - net = Network(G, mon) - net.run(10 * ms) - no_noise_x, no_noise_y = mon.x[:], mon.y[:] - - for method_name, method in [("euler", euler), ("heun", heun)]: - with catch_logs("WARNING"): - G = NeuronGroup(2, eqs_noise, method=method) - G.x = [5, 17] - G.y = [25, 5] * Hz - mon = StateMonitor(G, ["x", "y"], record=True) - net = Network(G, mon) - net.run(10 * ms) - assert_allclose( - mon.x[:], - no_noise_x, - err_msg=f"Method {method_name} gave incorrect results", - ) - assert_allclose( - mon.y[:], - no_noise_y, - err_msg=f"Method {method_name} gave incorrect results", - ) - - -@pytest.mark.codegen_independent -def test_multiplicative_noise(): - # Noise is not multiplicative (constant over time step) - ta = TimedArray([0, 1], dt=defaultclock.dt * 10) - Eq = Equations("dv/dt = ta(t)*xi*(5*ms)**-0.5 :1") - group = NeuronGroup(1, Eq, method="euler") - net = Network(group) - net.run(0 * ms) # no error - - # Noise is multiplicative (multiplied with time-varying variable) - Eq1 = Equations("dv/dt = v*xi*(5*ms)**-0.5 :1") - group1 = NeuronGroup(1, Eq1, method="euler") - net1 = Network(group1) - with pytest.raises(BrianObjectException) as exc: - net1.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - # Noise is multiplicative (multiplied with time) - Eq2 = Equations("dv/dt = (t/ms)*xi*(5*ms)**-0.5 :1") - group2 = NeuronGroup(1, Eq2, method="euler") - net2 = Network(group2) - with pytest.raises(BrianObjectException) as exc: - net2.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - # Noise is multiplicative (multiplied with time-varying variable) - Eq3 = Equations( - """ - dv/dt = w*xi*(5*ms)**-0.5 :1 - dw/dt = -w/(10*ms) : 1 - """ - ) - group3 = NeuronGroup(1, Eq3, method="euler") - net3 = Network(group3) - with pytest.raises(BrianObjectException) as exc: - net3.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - # One of the equations has multiplicative noise - Eq4 = Equations( - """ - dv/dt = xi_1*(5*ms)**-0.5 : 1 - dw/dt = (t/ms)*xi_2*(5*ms)**-0.5 :1 - """ - ) - group4 = NeuronGroup(1, Eq4, method="euler") - net4 = Network(group4) - with pytest.raises(BrianObjectException) as exc: - net4.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - # One of the equations has multiplicative noise - Eq5 = Equations( - """ - dv/dt = xi_1*(5*ms)**-0.5 : 1 - dw/dt = v*xi_2*(5*ms)**-0.5 :1 - """ - ) - group5 = NeuronGroup(1, Eq5, method="euler") - net5 = Network(group4) - with pytest.raises(BrianObjectException) as exc: - net5.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - -def test_pure_noise_deterministic(fake_randn_randn_fixture): - sigma = 3.0 - eqs = Equations("dx/dt = sigma*xi/sqrt(ms) : 1") - dt = 0.1 * ms - for method in ["euler", "heun", "milstein"]: - G = NeuronGroup(1, eqs, dt=dt, method=method) - run(10 * dt) - assert_allclose( - G.x, - sqrt(dt) * sigma * 0.5 / sqrt(1 * ms) * 10, - err_msg=f"method {method} did not give the expected result", - ) - - -@pytest.mark.codegen_independent -def test_temporary_variables(): - """ - Make sure that the code does the distinction between temporary variables - in the state updater description and external variables used in the - equations. - """ - # Use a variable name that is used in the state updater description - k_2 = 5 - eqs = Equations("dv/dt = -(v + k_2)/(10*ms) : 1") - converted = rk4(eqs) - - # Use a non-problematic name - k_var = 5 - eqs = Equations("dv/dt = -(v + k_var)/(10*ms) : 1") - converted2 = rk4(eqs) - - # Make sure that the two formulations result in the same code - assert converted == converted2.replace("k_var", "k_2") - - -@pytest.mark.codegen_independent -def test_temporary_variables2(): - """ - Make sure that the code does the distinction between temporary variables - in the state updater description and external variables used in the - equations. - """ - tau = 10 * ms - # Use a variable name that is used in the state updater description - k = 5 - eqs = Equations("dv/dt = -v/tau + k*xi*tau**-0.5: 1") - converted = milstein(eqs) - - # Use a non-problematic name - k_var = 5 - eqs = Equations("dv/dt = -v/tau + k_var*xi*tau**-0.5: 1") - converted2 = milstein(eqs) - - # Make sure that the two formulations result in the same code - assert converted == converted2.replace("k_var", "k") - - -@pytest.mark.codegen_independent -def test_integrator_code(): - """ - Check whether the returned abstract code is as expected. - """ - # A very simple example where the abstract code should always look the same - eqs = Equations("dv/dt = -v / (1 * second) : 1") - - # Only test very basic stuff (expected number of lines and last line) - for integrator, lines in zip([linear, euler, rk2, rk4], [2, 2, 3, 6]): - code_lines = integrator(eqs).split("\n") - err_msg = ( - f"Returned code for integrator {integrator.__class__.__name__} had" - f" {len(code_lines)} lines instead of {int(lines)}" - ) - assert len(code_lines) == lines, err_msg - assert code_lines[-1] == "v = _v" - - # Make sure that it isn't a problem to use 'x', 'f' and 'g' as variable - # names, even though they are also used in state updater descriptions. - # The resulting code should be identical when replacing x by x0 (and ..._x by - # ..._x0) - for varname in ["x", "f", "g"]: - # We use a very similar names here to avoid slightly re-arranged - # expressions due to alphabetical sorting of terms in - # multiplications, etc. - eqs_v = Equations(f"d{varname}0/dt = -{varname}0 / (1 * second) : 1") - eqs_var = Equations(f"d{varname}/dt = -{varname} / (1 * second) : 1") - for integrator in [linear, euler, rk2, rk4]: - code_v = integrator(eqs_v) - code_var = integrator(eqs_var) - # Re-substitute the variable names in the output - code_var = re.sub(rf"\b{varname}\b", f"{varname}0", code_var) - code_var = re.sub(rf"\b(\w*)_{varname}\b", rf"\1_{varname}0", code_var) - assert code_var == code_v, f"'{code_var}' does not match '{code_v}'" - - -@pytest.mark.codegen_independent -def test_integrator_code2(): - """ - Test integration for a simple model with several state variables. - """ - eqs = Equations( - """ - dv/dt=(ge+gi-v)/tau : volt - dge/dt=-ge/taue : volt - dgi/dt=-gi/taui : volt - """ - ) - euler_integration = euler(eqs) - lines = sorted(euler_integration.split("\n")) - # Do a very basic check that the right variables are used in every line - for varname, line in zip(["_ge", "_gi", "_v", "ge", "gi", "v"], lines): - assert line.startswith( - f"{varname} = " - ), f'line "{line}" does not start with {varname}' - for variables, line in zip( - [ - ["dt", "ge", "taue"], - ["dt", "gi", "taui"], - ["dt", "ge", "gi", "v", "tau"], - ["_ge"], - ["_gi"], - ["_v"], - ], - lines, - ): - rhs = line.split("=")[1] - for variable in variables: - assert variable in rhs, f'{variable} not in RHS: "{rhs}"' - - -@pytest.mark.codegen_independent -def test_illegal_calls(): - eqs = Equations("dv/dt = -v / (10*ms) : 1") - clock = Clock(dt=0.1 * ms) - variables = { - "v": ArrayVariable( - name="name", - size=10, - owner=None, - device=None, - dtype=np.float64, - constant=False, - ), - "t": clock.variables["t"], - "dt": clock.variables["dt"], - } - with pytest.raises(TypeError): - StateUpdateMethod.apply_stateupdater(eqs, variables, object()) - with pytest.raises(TypeError): - StateUpdateMethod.apply_stateupdater( - eqs, variables, group_name="my_name", method=object() - ) - with pytest.raises(TypeError): - StateUpdateMethod.apply_stateupdater(eqs, variables, [object(), "euler"]) - with pytest.raises(TypeError): - StateUpdateMethod.apply_stateupdater( - eqs, variables, group_name="my_name", method=[object(), "euler"] - ) - - -def check_integration(eqs, variables, can_integrate): - # can_integrate maps integrators to True/False/None - # True/False means that the integrator should/should not integrate the equations - # None means that it *might* integrate the equations (only needed for the - # exact integration, since it can depend on the sympy version) - for integrator, able in can_integrate.items(): - try: - integrator(eqs, variables) - if able is False: - raise AssertionError( - "Should not be able to integrate these " - f"equations (equations: '{eqs}') with " - f"integrator {integrator.__class__.__name__}" - ) - except UnsupportedEquationsException: - if able is True: - raise AssertionError( - "Should be able to integrate these " - f"equations (equations: '{eqs}') with " - f"integrator {integrator.__class__.__name__}" - ) - - -@pytest.mark.codegen_independent -def test_priority(): - updater = ExplicitStateUpdater("x_new = x + dt * f(x, t)") - # Equations that work for the state updater - eqs = Equations("dv/dt = -v / (10*ms) : 1") - clock = Clock(dt=0.1 * ms) - variables = { - "v": ArrayVariable( - name="name", - size=10, - owner=None, - device=None, - dtype=np.float64, - constant=False, - ), - "w": ArrayVariable( - name="name", - size=10, - owner=None, - device=None, - dtype=np.float64, - constant=False, - ), - "t": clock.variables["t"], - "dt": clock.variables["dt"], - } - updater(eqs, variables) # should not raise an error - - # External parameter in the coefficient, linear integration should work - param = 1 - eqs = Equations("dv/dt = -param * v / (10*ms) : 1") - updater(eqs, variables) # should not raise an error - can_integrate = { - linear: True, - euler: True, - exponential_euler: True, - rk2: True, - rk4: True, - heun: True, - milstein: True, - } - - check_integration(eqs, variables, can_integrate) - - # Constant equation, should work for all except linear (see #1010) - param = 1 - eqs = Equations( - """dv/dt = 10*Hz : 1 - dw/dt = -v/(10*ms) : 1""" - ) - updater(eqs, variables) # should not raise an error - can_integrate = { - linear: None, - euler: True, - exponential_euler: True, - rk2: True, - rk4: True, - heun: True, - milstein: True, - } - - check_integration(eqs, variables, can_integrate) - - # Equations resulting in complex linear solution for older versions of sympy - eqs = Equations( - """ - dv/dt = (ge+gi-(v+49*mV))/(20*ms) : volt - dge/dt = -ge/(5*ms) : volt - dgi/dt = Dgi/(5*ms) : volt - dDgi/dt = ((-2./5) * Dgi - (1./5**2)*gi)/(10*ms) : volt - """ - ) - can_integrate = { - linear: None, - euler: True, - exponential_euler: True, - rk2: True, - rk4: True, - heun: True, - milstein: True, - } - check_integration(eqs, variables, can_integrate) - - # Equation with additive noise - eqs = Equations("dv/dt = -v / (10*ms) + xi/(10*ms)**.5 : 1") - with pytest.raises(UnsupportedEquationsException): - updater(eqs, variables) - - can_integrate = { - linear: False, - euler: True, - exponential_euler: False, - rk2: False, - rk4: False, - heun: True, - milstein: True, - } - - check_integration(eqs, variables, can_integrate) - - # Equation with multiplicative noise - eqs = Equations("dv/dt = -v / (10*ms) + v*xi/(10*ms)**.5 : 1") - with pytest.raises(UnsupportedEquationsException): - updater(eqs, variables) - - can_integrate = { - linear: False, - euler: False, - exponential_euler: False, - rk2: False, - rk4: False, - heun: True, - milstein: True, - } - - check_integration(eqs, variables, can_integrate) - - -@pytest.mark.codegen_independent -def test_registration(): - """ - Test state updater registration. - """ - # Save state before tests - before = dict(StateUpdateMethod.stateupdaters) - - lazy_updater = ExplicitStateUpdater("x_new = x") - StateUpdateMethod.register("lazy", lazy_updater) - - # Trying to register again - with pytest.raises(ValueError): - StateUpdateMethod.register("lazy", lazy_updater) - - # Trying to register something that is not a state updater - with pytest.raises(ValueError): - StateUpdateMethod.register("foo", "just a string") - - # Trying to register with an invalid index - with pytest.raises(TypeError): - StateUpdateMethod.register("foo", lazy_updater, index="not an index") - - # reset to state before the test - StateUpdateMethod.stateupdaters = before - - -@pytest.mark.codegen_independent -def test_determination(): - """ - Test the determination of suitable state updaters. - """ - # To save some typing - apply_stateupdater = StateUpdateMethod.apply_stateupdater - - eqs = Equations("dv/dt = -v / (10*ms) : 1") - # Just make sure that state updaters know about the two state variables - variables = {"v": Variable(name="v"), "w": Variable(name="w")} - - # all methods should work for these equations. - # First, specify them explicitly (using the object) - for integrator in ( - linear, - euler, - exponential_euler, # TODO: Removed "independent" here due to the issue in sympy 0.7.4 - rk2, - rk4, - heun, - milstein, - ): - with catch_logs() as logs: - returned = apply_stateupdater(eqs, variables, method=integrator) - assert ( - len(logs) == 0 - ), f"Got {len(logs)} unexpected warnings: {str([l[2] for l in logs])}" - - # Equation with multiplicative noise, only milstein and heun should work - eqs = Equations("dv/dt = -v / (10*ms) + v*xi*second**-.5: 1") - for integrator in (linear, independent, euler, exponential_euler, rk2, rk4): - with pytest.raises(UnsupportedEquationsException): - apply_stateupdater(eqs, variables, integrator) - - for integrator in (heun, milstein): - with catch_logs() as logs: - returned = apply_stateupdater(eqs, variables, method=integrator) - assert ( - len(logs) == 0 - ), f"Got {len(logs)} unexpected warnings: {str([l[2] for l in logs])}" - - # Arbitrary functions (converting equations into abstract code) should - # always work - my_stateupdater = lambda eqs, vars, options: "x_new = x" - with catch_logs() as logs: - returned = apply_stateupdater(eqs, variables, method=my_stateupdater) - # No warning here - assert len(logs) == 0 - - # Specification with names - eqs = Equations("dv/dt = -v / (10*ms) : 1") - for name, integrator in [ - ("exact", exact), - ("linear", linear), - ("euler", euler), - # ('independent', independent), #TODO: Removed "independent" here due to the issue in sympy 0.7.4 - ("exponential_euler", exponential_euler), - ("rk2", rk2), - ("rk4", rk4), - ("heun", heun), - ("milstein", milstein), - ]: - with catch_logs() as logs: - returned = apply_stateupdater(eqs, variables, method=name) - # No warning here - assert len(logs) == 0 - - # Now all except heun and milstein should refuse to work - eqs = Equations("dv/dt = -v / (10*ms) + v*xi*second**-.5: 1") - for name in [ - "linear", - "exact", - "independent", - "euler", - "exponential_euler", - "rk2", - "rk4", - ]: - with pytest.raises(UnsupportedEquationsException): - apply_stateupdater(eqs, variables, method=name) - - # milstein should work - with catch_logs() as logs: - apply_stateupdater(eqs, variables, method="milstein") - assert len(logs) == 0 - - # heun should work - with catch_logs() as logs: - apply_stateupdater(eqs, variables, method="heun") - assert len(logs) == 0 - - # non-existing name - with pytest.raises(ValueError): - apply_stateupdater(eqs, variables, method="does_not_exist") - - # Automatic state updater choice should return linear for linear equations, - # euler for non-linear, non-stochastic equations and equations with - # additive noise, heun for equations with multiplicative noise - # Because it is somewhat fragile, the "independent" state updater is not - # included in this list - all_methods = ["linear", "exact", "exponential_euler", "euler", "heun", "milstein"] - eqs = Equations("dv/dt = -v / (10*ms) : 1") - with catch_logs(log_level=logging.INFO) as logs: - apply_stateupdater(eqs, variables, all_methods) - assert len(logs) == 1 - assert ("linear" in logs[0][2]) or ("exact" in logs[0][2]) - - # This is conditionally linear - eqs = Equations( - """dv/dt = -(v + w**2)/ (10*ms) : 1 - dw/dt = -w/ (10*ms) : 1""" - ) - with catch_logs(log_level=logging.INFO) as logs: - apply_stateupdater(eqs, variables, all_methods) - assert len(logs) == 1 - assert "exponential_euler" in logs[0][2] - - # # Do not test for now - # eqs = Equations('dv/dt = sin(t) / (10*ms) : 1') - # assert apply_stateupdater(eqs, variables) is independent - - eqs = Equations("dv/dt = -sqrt(v) / (10*ms) : 1") - with catch_logs(log_level=logging.INFO) as logs: - apply_stateupdater(eqs, variables, all_methods) - assert len(logs) == 1 - assert "'euler'" in logs[0][2] - - eqs = Equations("dv/dt = -v / (10*ms) + 0.1*second**-.5*xi: 1") - with catch_logs(log_level=logging.INFO) as logs: - apply_stateupdater(eqs, variables, all_methods) - assert len(logs) == 1 - assert "'euler'" in logs[0][2] - - eqs = Equations("dv/dt = -v / (10*ms) + v*0.1*second**-.5*xi: 1") - with catch_logs(log_level=logging.INFO) as logs: - apply_stateupdater(eqs, variables, all_methods) - assert len(logs) == 1 - assert "'heun'" in logs[0][2] - - -@pytest.mark.standalone_compatible -def test_subexpressions_basic(): - """ - Make sure that the integration of a (non-stochastic) differential equation - does not depend on whether it's formulated using subexpressions. - """ - # no subexpression - eqs1 = "dv/dt = (-v + sin(2*pi*100*Hz*t)) / (10*ms) : 1" - # same with subexpression - eqs2 = """dv/dt = I / (10*ms) : 1 - I = -v + sin(2*pi*100*Hz*t): 1""" - method = "euler" - G1 = NeuronGroup(1, eqs1, method=method) - G1.v = 1 - G2 = NeuronGroup(1, eqs2, method=method) - G2.v = 1 - mon1 = StateMonitor(G1, "v", record=True) - mon2 = StateMonitor(G2, "v", record=True) - run(10 * ms) - assert_equal(mon1.v, mon2.v, f"Results for method {method} differed!") - - -def test_subexpressions(): - """ - Make sure that the integration of a (non-stochastic) differential equation - does not depend on whether it's formulated using subexpressions. - """ - # no subexpression - eqs1 = "dv/dt = (-v + sin(2*pi*100*Hz*t)) / (10*ms) : 1" - # same with subexpression - eqs2 = """dv/dt = I / (10*ms) : 1 - I = -v + sin(2*pi*100*Hz*t): 1""" - - methods = [ - "exponential_euler", - "rk2", - "rk4", - ] # euler is tested in test_subexpressions_basic - for method in methods: - G1 = NeuronGroup(1, eqs1, method=method) - G1.v = 1 - G2 = NeuronGroup(1, eqs2, method=method) - G2.v = 1 - mon1 = StateMonitor(G1, "v", record=True) - mon2 = StateMonitor(G2, "v", record=True) - net = Network(G1, mon1, G2, mon2) - net.run(10 * ms) - assert_equal(mon1.v, mon2.v, f"Results for method {method} differed!") - - -@pytest.mark.codegen_independent -def test_locally_constant_check(): - default_dt = defaultclock.dt - # The linear state update can handle additive time-dependent functions - # (e.g. a TimedArray) but only if it can be safely assumed that the function - # is constant over a single time check - ta0 = TimedArray(np.array([1]), dt=default_dt) # ok - ta1 = TimedArray(np.array([1]), dt=2 * default_dt) # ok - ta2 = TimedArray(np.array([1]), dt=default_dt / 2) # not ok - ta3 = TimedArray(np.array([1]), dt=default_dt * 1.5) # not ok - - for ta_func, ok in zip([ta0, ta1, ta2, ta3], [True, True, False, False]): - # additive - G = NeuronGroup( - 1, - "dv/dt = -v/(10*ms) + ta(t)*Hz : 1", - method="exact", - namespace={"ta": ta_func}, - ) - net = Network(G) - if ok: - # This should work - net.run(0 * ms) - else: - # This should not - with catch_logs(): - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc.errisinstance(UnsupportedEquationsException) - - # multiplicative - G = NeuronGroup( - 1, "dv/dt = -v*ta(t)/(10*ms) : 1", method="exact", namespace={"ta": ta_func} - ) - net = Network(G) - if ok: - # This should work - net.run(0 * ms) - else: - # This should not - with catch_logs(): - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc.errisinstance(UnsupportedEquationsException) - - # If the argument is more than just "t", we cannot guarantee that it is - # actually locally constant - G = NeuronGroup( - 1, "dv/dt = -v*ta(t/2.0)/(10*ms) : 1", method="exact", namespace={"ta": ta0} - ) - net = Network(G) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - # Arbitrary functions are not constant over a time step - G = NeuronGroup(1, "dv/dt = -v/(10*ms) + sin(2*pi*100*Hz*t)*Hz : 1", method="exact") - net = Network(G) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - # Stateful functions aren't either - G = NeuronGroup(1, "dv/dt = -v/(10*ms) + rand()*Hz : 1", method="exact") - net = Network(G) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - # Neither is "t" itself - G = NeuronGroup(1, "dv/dt = -v/(10*ms) + t/second**2 : 1", method="exact") - net = Network(G) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - # But if the argument is not referring to t, all should be well - G = NeuronGroup( - 1, "dv/dt = -v/(10*ms) + sin(2*pi*100*Hz*5*second)*Hz : 1", method="exact" - ) - net = Network(G) - net.run(0 * ms) - - -def test_refractory(): - # Compare integration with and without the addition of refractoriness -- - # note that the cell here is not spiking, so it should never be in the - # refractory period and therefore the results should be exactly identical - # with and without (unless refractory) - eqs_base = "dv/dt = -v/(10*ms) : 1" - for method in [ - "linear", - "exact", - "independent", - "euler", - "exponential_euler", - "rk2", - "rk4", - ]: - G_no_ref = NeuronGroup(10, eqs_base, method=method) - G_no_ref.v = "(i+1)/11." - G_ref = NeuronGroup( - 10, f"{eqs_base}(unless refractory)", refractory=1 * ms, method=method - ) - G_ref.v = "(i+1)/11." - net = Network(G_ref, G_no_ref) - net.run(10 * ms) - assert_allclose( - G_no_ref.v[:], - G_ref.v[:], - err_msg="Results with and without refractoriness differ for method %s." - % method, - ) - - -def test_refractory_stochastic(fake_randn_randn_fixture): - eqs_base = "dv/dt = -v/(10*ms) + second**-.5*xi : 1" - - for method in ["euler", "heun", "milstein"]: - G_no_ref = NeuronGroup(10, eqs_base, method=method) - G_no_ref.v = "(i+1)/11." - G_ref = NeuronGroup( - 10, f"{eqs_base} (unless refractory)", refractory=1 * ms, method=method - ) - G_ref.v = "(i+1)/11." - net = Network(G_ref, G_no_ref) - net.run(10 * ms) - assert_allclose( - G_no_ref.v[:], - G_ref.v[:], - err_msg="Results with and without refractoriness differ for method %s." - % method, - ) - - -@pytest.mark.standalone_compatible -def test_check_for_invalid_values_linear_integrator(): - # A differential equation that cannot be solved by the linear - # integrator should return nan values to warn the user, and not silently - # return incorrect values. See discussion on - # https://github.com/brian-team/brian2/issues/626 - a = 0.0 / ms - b = 1.0 / ms - c = -0.5 / ms - d = -0.1 / ms - eqs = """ - dx/dt = a * x + b * y : 1 - dy/dt = c * x + d * y : 1 - """ - G = NeuronGroup( - 1, - eqs, - threshold="x > 100", - reset="x = 0", - method="exact", - method_options={"simplify": False}, - ) - G.x = 1 - BrianLogger._log_messages.clear() # because the log message is set to be shown only once - with catch_logs() as clog: - try: - run(1 * ms) - # this check allows for the possibility that we improve the linear - # integrator in the future so that it can handle this equation - if numpy.isnan(G.x[0]): - assert "invalid_values" in repr(clog) - else: - assert G.x[0] != 0 - except BrianObjectException as exc: - assert isinstance(exc.__cause__, UnsupportedEquationsException) - - -if __name__ == "__main__": - import time - - from brian2 import prefs - - start = time.time() - - test_determination() - test_explicit_stateupdater_parsing() - test_non_autonomous_equations() - test_str_repr() - test_multiplicative_noise() - test_multiple_noise_variables_basic() - test_multiple_noise_variables_extended() - test_temporary_variables() - test_temporary_variables2() - test_integrator_code() - test_integrator_code2() - test_illegal_calls() - test_priority() - test_registration() - test_subexpressions() - test_locally_constant_check() - test_refractory() - # # Need the fake random number generator from tests/conftest.py - # test_refractory_stochastic() - # test_multiple_noise_variables_deterministic_noise() - # test_pure_noise_deterministic() - test_check_for_invalid_values_linear_integrator() - print("Tests took", time.time() - start) diff --git a/brian2/tests/test_subgroup.py b/brian2/tests/test_subgroup.py deleted file mode 100644 index c3e99664b..000000000 --- a/brian2/tests/test_subgroup.py +++ /dev/null @@ -1,926 +0,0 @@ -import pytest -from numpy.testing import assert_array_equal, assert_equal - -from brian2 import * -from brian2.core.network import schedule_propagation_offset -from brian2.devices.device import reinit_and_delete -from brian2.tests.utils import assert_allclose -from brian2.units.fundamentalunits import DIMENSIONLESS -from brian2.utils.logger import catch_logs - - -@pytest.mark.codegen_independent -def test_str_repr(): - """ - Test the string representation of a subgroup. - """ - G = NeuronGroup(10, "v:1") - SG = G[5:8] - # very basic test, only make sure no error is raised - assert len(str(SG)) - assert len(repr(SG)) - - -def test_state_variables(): - """ - Test the setting and accessing of state variables in subgroups. - """ - G = NeuronGroup(10, "v : volt") - SG = G[4:9] - with pytest.raises(DimensionMismatchError): - SG.__setattr__("v", -70) - SG.v_ = float(-80 * mV) - assert_allclose(G.v, np.array([0, 0, 0, 0, -80, -80, -80, -80, -80, 0]) * mV) - assert_allclose(SG.v, np.array([-80, -80, -80, -80, -80]) * mV) - assert_allclose( - G.v_, np.array([0, 0, 0, 0, -80, -80, -80, -80, -80, 0]) * float(mV) - ) - assert_allclose(SG.v_, np.array([-80, -80, -80, -80, -80]) * float(mV)) - # You should also be able to set variables with a string - SG.v = "v + i*mV" - assert_allclose(SG.v[0], -80 * mV) - assert_allclose(SG.v[4], -76 * mV) - assert_allclose(G.v[4:9], -80 * mV + np.arange(5) * mV) - - # Calculating with state variables should work too - assert all(G.v[4:9] - SG.v == 0) - - # And in-place modification should work as well - SG.v += 10 * mV - assert_allclose(G.v[4:9], -70 * mV + np.arange(5) * mV) - SG.v *= 2 - assert_allclose(G.v[4:9], 2 * (-70 * mV + np.arange(5) * mV)) - # with unit checking - with pytest.raises(DimensionMismatchError): - SG.v.__iadd__(3 * second) - with pytest.raises(DimensionMismatchError): - SG.v.__iadd__(3) - with pytest.raises(DimensionMismatchError): - SG.v.__imul__(3 * second) - - # Indexing with subgroups - assert_equal(G.v[SG], SG.v[:]) - - -@pytest.mark.standalone_compatible -def test_state_variables_simple(): - G = NeuronGroup( - 10, - """ - a : 1 - b : 1 - c : 1 - d : 1 - """, - ) - SG = G[3:7] - SG.a = 1 - SG.a["i == 0"] = 2 - SG.b = "i" - SG.b["i == 3"] = "i * 2" - SG.c = np.arange(3, 7) - SG.d[1:2] = 4 - SG.d[2:4] = [1, 2] - run(0 * ms) - assert_equal(G.a[:], [0, 0, 0, 2, 1, 1, 1, 0, 0, 0]) - assert_equal(G.b[:], [0, 0, 0, 0, 1, 2, 6, 0, 0, 0]) - assert_equal(G.c[:], [0, 0, 0, 3, 4, 5, 6, 0, 0, 0]) - assert_equal(G.d[:], [0, 0, 0, 0, 4, 1, 2, 0, 0, 0]) - - -def test_state_variables_string_indices(): - """ - Test accessing subgroups with string indices. - """ - G = NeuronGroup(10, "v : volt") - SG = G[4:9] - assert len(SG.v["i>3"]) == 1 - - G.v = np.arange(10) * mV - assert len(SG.v["v>7.5*mV"]) == 1 - - # Combined string indexing and assignment - SG.v["i > 3"] = "i*10*mV" - - assert_allclose(G.v[:], [0, 1, 2, 3, 4, 5, 6, 7, 40, 9] * mV) - - -@pytest.mark.codegen_independent -def test_state_variables_group_as_index(): - G = NeuronGroup(10, "v : 1") - SG = G[4:9] - G.v[SG] = 1 - assert_equal(G.v[:], np.array([0, 0, 0, 0, 1, 1, 1, 1, 1, 0])) - G.v = 1 - G.v[SG] = "2*v" - assert_equal(G.v[:], np.array([1, 1, 1, 1, 2, 2, 2, 2, 2, 1])) - - -@pytest.mark.codegen_independent -def test_state_variables_group_as_index_problematic(): - G = NeuronGroup(10, "v : 1") - SG = G[4:9] - G.v = 1 - tests = [("i", 1), ("N", 1), ("N + i", 2), ("v", 0)] - for value, n_warnings in tests: - with catch_logs() as l: - G.v.__setitem__(SG, value) - assert ( - len(l) == n_warnings - ), f"expected {int(n_warnings)}, got {len(l)} warnings" - assert all( - [entry[1].endswith("ambiguous_string_expression") for entry in l] - ) - - -@pytest.mark.standalone_compatible -def test_variableview_calculations(): - # Check that you can directly calculate with "variable views" - G = NeuronGroup( - 10, - """ - x : 1 - y : volt - idx : integer - """, - ) - G.x = np.arange(10) - G.y = np.arange(10)[::-1] * mV - G.idx = np.arange(10, dtype=int) - SG = G[3:8] - - assert_allclose(SG.x * SG.y, np.arange(3, 8) * np.arange(6, 1, -1) * mV) - assert_allclose(-SG.x, -np.arange(3, 8)) - assert_allclose(-SG.y, -np.arange(6, 1, -1) * mV) - - assert_allclose(3 * SG.x, 3 * np.arange(3, 8)) - assert_allclose(3 * SG.y, 3 * np.arange(6, 1, -1) * mV) - assert_allclose(SG.x * 3, 3 * np.arange(3, 8)) - assert_allclose(SG.y * 3, 3 * np.arange(6, 1, -1) * mV) - assert_allclose(SG.x / 2.0, np.arange(3, 8) / 2.0) - assert_allclose(SG.y / 2, np.arange(6, 1, -1) * mV / 2) - assert_equal(SG.idx % 2, np.arange(3, 8, dtype=int) % 2) - assert_allclose(SG.x + 2, 2 + np.arange(3, 8)) - assert_allclose(SG.y + 2 * mV, 2 * mV + np.arange(6, 1, -1) * mV) - assert_allclose(2 + SG.x, 2 + np.arange(3, 8)) - assert_allclose(2 * mV + SG.y, 2 * mV + np.arange(6, 1, -1) * mV) - assert_allclose(SG.x - 2, np.arange(3, 8) - 2) - assert_allclose(SG.y - 2 * mV, np.arange(6, 1, -1) * mV - 2 * mV) - assert_allclose(2 - SG.x, 2 - np.arange(3, 8)) - assert_allclose(2 * mV - SG.y, 2 * mV - np.arange(6, 1, -1) * mV) - assert_allclose(SG.x**2, np.arange(3, 8) ** 2) - assert_allclose(SG.y**2, (np.arange(6, 1, -1) * mV) ** 2) - assert_allclose(2**SG.x, 2 ** np.arange(3, 8)) - - # incorrect units - with pytest.raises(DimensionMismatchError): - SG.x + SG.y - with pytest.raises(DimensionMismatchError): - SG.x[:] + SG.y - with pytest.raises(DimensionMismatchError): - SG.x + SG.y[:] - with pytest.raises(DimensionMismatchError): - SG.x + 3 * mV - with pytest.raises(DimensionMismatchError): - 3 * mV + SG.x - with pytest.raises(DimensionMismatchError): - SG.y + 3 - with pytest.raises(DimensionMismatchError): - 3 + SG.y - with pytest.raises(TypeError): - 2**SG.y # raising to a power with units - - -@pytest.mark.standalone_compatible -def test_variableview_properties(): - G = NeuronGroup( - 10, - """ - x : 1 - y : volt - idx : integer - """, - ) - # The below properties should not require access to the values - G.x = "rand()" - G.y = "rand()*mV" - G.idx = "int(rand()*10)" - SG = G[3:8] - - assert have_same_dimensions(SG.x.unit, DIMENSIONLESS) - assert have_same_dimensions(SG.y.unit, volt) - assert have_same_dimensions(SG.idx.unit, DIMENSIONLESS) - # See github issue #1555 - assert SG.x.shape == SG.y.shape == SG.idx.shape == (5,) - assert SG.x.ndim == SG.y.ndim == SG.idx.ndim == 1 - assert SG.x.dtype == SG.y.dtype == prefs.core.default_float_dtype - assert SG.idx.dtype == np.int32 - - -@pytest.mark.standalone_compatible -def test_state_monitor(): - G = NeuronGroup(10, "v : volt") - G.v = np.arange(10) * volt - SG = G[5:] - mon_all = StateMonitor(SG, "v", record=True) - mon_0 = StateMonitor(SG, "v", record=0) - run(defaultclock.dt) - - assert_allclose(mon_0[0].v, mon_all[0].v) - assert_allclose(mon_0[0].v, np.array([5]) * volt) - assert_allclose(mon_all.v.flatten(), np.arange(5, 10) * volt) - - with pytest.raises(IndexError): - mon_all[5] - - -def test_shared_variable(): - """Make sure that shared variables work with subgroups""" - G = NeuronGroup(10, "v : volt (shared)") - G.v = 1 * volt - SG = G[5:] - assert SG.v == 1 * volt - - -@pytest.mark.standalone_compatible -def test_synapse_creation(): - G1 = NeuronGroup(10, "") - G2 = NeuronGroup(20, "") - SG1 = G1[:5] - SG2 = G2[10:] - S = Synapses(SG1, SG2) - S.connect(i=2, j=2) # Should correspond to (2, 12) - S.connect("i==2 and j==5") # Should correspond to (2, 15) - - run(0 * ms) # for standalone - - # Internally, the "real" neuron indices should be used - assert_equal(S._synaptic_pre[:], np.array([2, 2])) - assert_equal(S._synaptic_post[:], np.array([12, 15])) - # For the user, the subgroup-relative indices should be presented - assert_equal(S.i[:], np.array([2, 2])) - assert_equal(S.j[:], np.array([2, 5])) - # N_incoming and N_outgoing should also be correct - assert all(S.N_outgoing[2, :] == 2) - assert all(S.N_incoming[:, 2] == 1) - assert all(S.N_incoming[:, 5] == 1) - - -@pytest.mark.standalone_compatible -def test_synapse_creation_state_vars(): - G1 = NeuronGroup(10, "v : 1") - G2 = NeuronGroup(20, "v : 1") - G1.v = "i" - G2.v = "10 + i" - SG1 = G1[:5] - SG2 = G2[10:] - - # connect based on pre-/postsynaptic state variables - S2 = Synapses(SG1, SG2, "w:1") - S2.connect("v_pre > 2") - - S3 = Synapses(SG1, SG2, "w:1") - S3.connect("v_post < 25") - - S4 = Synapses(SG2, SG1, "w:1") - S4.connect("v_post > 2") - - S5 = Synapses(SG2, SG1, "w:1") - S5.connect("v_pre < 25") - - run(0 * ms) # for standalone - - assert len(S2) == 2 * len(SG2), str(len(S2)) - assert all(S2.v_pre[:] > 2) - assert len(S3) == 5 * len(SG1), f"{len(S3)} != {5 * len(SG1)} " - assert all(S3.v_post[:] < 25) - - assert len(S4) == 2 * len(SG2), str(len(S4)) - assert all(S4.v_post[:] > 2) - assert len(S5) == 5 * len(SG1), f"{len(53)} != {5 * len(SG1)} " - assert all(S5.v_pre[:] < 25) - - -@pytest.mark.standalone_compatible -def test_synapse_creation_generator(): - G1 = NeuronGroup(10, "v:1") - G2 = NeuronGroup(20, "v:1") - G1.v = "i" - G2.v = "10 + i" - SG1 = G1[:5] - SG2 = G2[10:] - S = Synapses(SG1, SG2, "w:1") - S.connect(j="i*2 + k for k in range(2)") # diverging connections - - # connect based on pre-/postsynaptic state variables - S2 = Synapses(SG1, SG2, "w:1") - S2.connect(j="k for k in range(N_post) if v_pre > 2") - - S3 = Synapses(SG1, SG2, "w:1") - S3.connect(j="k for k in range(N_post) if v_post < 25") - - S4 = Synapses(SG2, SG1, "w:1") - S4.connect(j="k for k in range(N_post) if v_post > 2") - - S5 = Synapses(SG2, SG1, "w:1") - S5.connect(j="k for k in range(N_post) if v_pre < 25") - - run(0 * ms) # for standalone - - # Internally, the "real" neuron indices should be used - assert_equal(S._synaptic_pre[:], np.arange(5).repeat(2)) - assert_equal(S._synaptic_post[:], np.arange(10) + 10) - # For the user, the subgroup-relative indices should be presented - assert_equal(S.i[:], np.arange(5).repeat(2)) - assert_equal(S.j[:], np.arange(10)) - - # N_incoming and N_outgoing should also be correct - assert all(S.N_outgoing[:] == 2) - assert all(S.N_incoming[:] == 1) - - assert len(S2) == 2 * len(SG2), str(len(S2)) - assert all(S2.v_pre[:] > 2) - assert len(S3) == 5 * len(SG1), f"{len(S3)} != {5 * len(SG1)} " - assert all(S3.v_post[:] < 25) - - assert len(S4) == 2 * len(SG2), str(len(S4)) - assert all(S4.v_post[:] > 2) - assert len(S5) == 5 * len(SG1), f"{len(S5)} != {5 * len(SG1)} " - assert all(S5.v_pre[:] < 25) - - -@pytest.mark.standalone_compatible -def test_synapse_creation_generator_multiple_synapses(): - G1 = NeuronGroup(10, "v:1") - G2 = NeuronGroup(20, "v:1") - G1.v = "i" - G2.v = "10 + i" - SG1 = G1[:5] - SG2 = G2[10:] - S1 = Synapses(SG1, SG2) - S1.connect(j="k for k in range(N_post)", n="i") - - S2 = Synapses(SG1, SG2) - S2.connect(j="k for k in range(N_post)", n="j") - - S3 = Synapses(SG2, SG1) - S3.connect(j="k for k in range(N_post)", n="i") - - S4 = Synapses(SG2, SG1) - S4.connect(j="k for k in range(N_post)", n="j") - - S5 = Synapses(SG1, SG2) - S5.connect(j="k for k in range(N_post)", n="i+j") - - S6 = Synapses(SG2, SG1) - S6.connect(j="k for k in range(N_post)", n="i+j") - - S7 = Synapses(SG1, SG2) - S7.connect(j="k for k in range(N_post)", n="int(v_pre>2)*2") - - S8 = Synapses(SG2, SG1) - S8.connect(j="k for k in range(N_post)", n="int(v_post>2)*2") - - S9 = Synapses(SG1, SG2) - S9.connect(j="k for k in range(N_post)", n="int(v_post>22)*2") - - S10 = Synapses(SG2, SG1) - S10.connect(j="k for k in range(N_post)", n="int(v_pre>22)*2") - - run(0 * ms) # for standalone - - # straightforward loop instead of doing something clever... - for source in range(len(SG1)): - assert_equal(S1.j[source, :], np.arange(len(SG2)).repeat(source)) - assert_equal(S2.j[source, :], np.arange(len(SG2)).repeat(np.arange(len(SG2)))) - assert_equal(S3.i[:, source], np.arange(len(SG2)).repeat(np.arange(len(SG2)))) - assert_equal(S4.i[:, source], np.arange(len(SG2)).repeat(source)) - assert_equal( - S5.j[source, :], np.arange(len(SG2)).repeat(np.arange(len(SG2)) + source) - ) - assert_equal( - S6.i[:, source], np.arange(len(SG2)).repeat(np.arange(len(SG2)) + source) - ) - if source > 2: - assert_equal(S7.j[source, :], np.arange(len(SG2)).repeat(2)) - assert_equal(S8.i[:, source], np.arange(len(SG2)).repeat(2)) - else: - assert len(S7.j[source, :]) == 0 - assert len(S8.i[:, source]) == 0 - assert_equal(S9.j[source, :], np.arange(3, len(SG2)).repeat(2)) - assert_equal(S10.i[:, source], np.arange(3, len(SG2)).repeat(2)) - - -@pytest.mark.standalone_compatible -def test_synapse_creation_generator_complex_ranges(): - G1 = NeuronGroup(10, "v:1") - G2 = NeuronGroup(20, "v:1") - G1.v = "i" - G2.v = "10 + i" - SG1 = G1[:5] - SG2 = G2[10:] - S = Synapses(SG1, SG2) - S.connect(j="i+k for k in range(N_post-i)") # Connect to all j>i - - # connect based on pre-/postsynaptic state variables - S2 = Synapses(SG1, SG2) - S2.connect(j="k for k in range(N_post * int(v_pre > 2))") - - # connect based on pre-/postsynaptic state variables - S3 = Synapses(SG2, SG1) - S3.connect(j="k for k in range(N_post * int(v_pre > 22))") - - run(0 * ms) # for standalone - - for syn_source in range(5): - # Internally, the "real" neuron indices should be used - assert_equal( - S._synaptic_post[syn_source, :], - 10 + syn_source + np.arange(10 - syn_source), - ) - # For the user, the subgroup-relative indices should be presented - assert_equal(S.j[syn_source, :], syn_source + np.arange(10 - syn_source)) - - assert len(S2) == 2 * len(SG2), str(len(S2)) - assert all(S2.v_pre[:] > 2) - assert len(S3) == 7 * len(SG1), str(len(S3)) - assert all(S3.v_pre[:] > 22) - - -@pytest.mark.standalone_compatible -def test_synapse_creation_generator_random(): - G1 = NeuronGroup(10, "v:1") - G2 = NeuronGroup(20, "v:1") - G1.v = "i" - G2.v = "10 + i" - SG1 = G1[:5] - SG2 = G2[10:] - - # connect based on pre-/postsynaptic state variables - S2 = Synapses(SG1, SG2) - S2.connect(j="k for k in sample(N_post, p=1.0*int(v_pre > 2))") - - S3 = Synapses(SG2, SG1) - S3.connect(j="k for k in sample(N_post, p=1.0*int(v_pre > 22))") - - run(0 * ms) # for standalone - - assert len(S2) == 2 * len(SG2), str(len(S2)) - assert all(S2.v_pre[:] > 2) - assert len(S3) == 7 * len(SG1), str(len(S3)) - assert all(S3.v_pre[:] > 22) - - -def test_synapse_access(): - G1 = NeuronGroup(10, "v:1") - G1.v = "i" - G2 = NeuronGroup(20, "v:1") - G2.v = "i" - SG1 = G1[:5] - SG2 = G2[10:] - S = Synapses(SG1, SG2, "w:1") - S.connect(True) - S.w["j == 0"] = 5 - assert all(S.w["j==0"] == 5) - S.w[2, 2] = 7 - assert all(S.w["i==2 and j==2"] == 7) - S.w = "2*j" - assert all(S.w[:, 1] == 2) - - assert len(S.w[:, 10]) == 0 - assert len(S.w["j==10"]) == 0 - - # Test referencing pre- and postsynaptic variables - assert_equal(S.w[2:, :], S.w["v_pre >= 2"]) - assert_equal(S.w[:, :5], S.w["v_post < 15"]) - S.w = "v_post" - assert_equal(S.w[:], S.j[:] + 10) - S.w = "v_post + v_pre" - assert_equal(S.w[:], S.j[:] + 10 + S.i[:]) - - # Test using subgroups as indices - assert len(S) == len(S.w[SG1, SG2]) - assert_equal(S.w[SG1, 1], S.w[:, 1]) - assert_equal(S.w[1, SG2], S.w[1, :]) - assert len(S.w[SG1, 10]) == 0 - - -def test_synapses_access_subgroups(): - G1 = NeuronGroup(5, "x:1") - G2 = NeuronGroup(10, "y:1") - SG1 = G1[2:5] - SG2 = G2[4:9] - S = Synapses(G1, G2, "w:1") - S.connect() - S.w[SG1, SG2] = 1 - assert_equal(S.w["(i>=2 and i<5) and (j>=4 and j<9)"], 1) - assert_equal(S.w["not ((i>=2 and i<5) and (j>=4 and j<9))"], 0) - S.w = 0 - S.w[SG1, :] = 1 - assert_equal(S.w["i>=2 and i<5"], 1) - assert_equal(S.w["not (i>=2 and i<5)"], 0) - S.w = 0 - S.w[:, SG2] = 1 - assert_equal(S.w["j>=4 and j<9"], 1) - assert_equal(S.w["not (j>=4 and j<9)"], 0) - - -@pytest.mark.codegen_independent -def test_synapses_access_subgroups_problematic(): - G1 = NeuronGroup(5, "x:1") - G2 = NeuronGroup(10, "y:1") - SG1 = G1[2:5] - SG2 = G2[4:9] - S = Synapses(G1, G2, "w:1") - S.connect() - - # Note that "j" is not ambiguous, because the equivalent in the target group - # is called "i" (this previously raised a warning) - tests = [ - ((SG1, slice(None)), "i", 1), - ((SG1, slice(None)), "i + N_pre", 2), - ((SG1, slice(None)), "N_pre", 1), - ((slice(None), SG2), "j", 0), - ((slice(None), SG2), "N_post", 1), - ((slice(None), SG2), "N", 1), - ((SG1, SG2), "i", 1), - ((SG1, SG2), "i + j", 1), - ((SG1, SG2), "N_pre", 1), - ((SG1, SG2), "j", 0), - ((SG1, SG2), "N_post", 1), - ((SG1, SG2), "N", 1), - # These should not raise a warning - ((SG1, SG2), "w", 0), - ((SG1, SG2), "x_pre", 0), - ((SG1, SG2), "y_post", 0), - ((SG1, SG2), "y", 0), - ] - for item, value, n_warnings in tests: - with catch_logs() as l: - S.w.__setitem__(item, value) - assert ( - len(l) == n_warnings - ), f"expected {int(n_warnings)}, got {len(l)} warnings" - assert all( - [entry[1].endswith("ambiguous_string_expression") for entry in l] - ) - - -@pytest.mark.standalone_compatible -def test_subgroup_summed_variable(): - # Check in particular that only neurons targeted are reset to 0 (see github issue #925) - source = NeuronGroup(1, "") - target = NeuronGroup(5, "Iin : 1") - target.Iin = 10 - target1 = target[1:2] - target2 = target[3:] - - syn1 = Synapses(source, target1, "Iin_post = 5 : 1 (summed)") - syn1.connect(True) - syn2 = Synapses(source, target2, "Iin_post = 1 : 1 (summed)") - syn2.connect(True) - - run(2 * defaultclock.dt) - - assert_array_equal(target.Iin, [10, 5, 10, 1, 1]) - - -def test_subexpression_references(): - """ - Assure that subexpressions in targeted groups are handled correctly. - """ - G = NeuronGroup( - 10, - """ - v : 1 - v2 = 2*v : 1 - """, - ) - G.v = np.arange(10) - SG1 = G[:5] - SG2 = G[5:] - - S1 = Synapses( - SG1, - SG2, - """ - w : 1 - u = v2_post + 1 : 1 - x = v2_pre + 1 : 1 - """, - ) - S1.connect("i==(5-1-j)") - assert_equal(S1.i[:], np.arange(5)) - assert_equal(S1.j[:], np.arange(5)[::-1]) - assert_equal(S1.u[:], np.arange(10)[:-6:-1] * 2 + 1) - assert_equal(S1.x[:], np.arange(5) * 2 + 1) - - S2 = Synapses( - G, - SG2, - """ - w : 1 - u = v2_post + 1 : 1 - x = v2_pre + 1 : 1 - """, - ) - S2.connect("i==(5-1-j)") - assert_equal(S2.i[:], np.arange(5)) - assert_equal(S2.j[:], np.arange(5)[::-1]) - assert_equal(S2.u[:], np.arange(10)[:-6:-1] * 2 + 1) - assert_equal(S2.x[:], np.arange(5) * 2 + 1) - - S3 = Synapses( - SG1, - G, - """ - w : 1 - u = v2_post + 1 : 1 - x = v2_pre + 1 : 1 - """, - ) - S3.connect("i==(10-1-j)") - assert_equal(S3.i[:], np.arange(5)) - assert_equal(S3.j[:], np.arange(10)[:-6:-1]) - assert_equal(S3.u[:], np.arange(10)[:-6:-1] * 2 + 1) - assert_equal(S3.x[:], np.arange(5) * 2 + 1) - - -def test_subexpression_no_references(): - """ - Assure that subexpressions are handled correctly, even - when the subgroups are created on-the-fly. - """ - G = NeuronGroup( - 10, - """ - v : 1 - v2 = 2*v : 1 - """, - ) - G.v = np.arange(10) - - assert_equal(G[5:].v2, np.arange(5, 10) * 2) - - S1 = Synapses( - G[:5], - G[5:], - """ - w : 1 - u = v2_post + 1 : 1 - x = v2_pre + 1 : 1 - """, - ) - S1.connect("i==(5-1-j)") - assert_equal(S1.i[:], np.arange(5)) - assert_equal(S1.j[:], np.arange(5)[::-1]) - assert_equal(S1.u[:], np.arange(10)[:-6:-1] * 2 + 1) - assert_equal(S1.x[:], np.arange(5) * 2 + 1) - - S2 = Synapses( - G, - G[5:], - """ - w : 1 - u = v2_post + 1 : 1 - x = v2_pre + 1 : 1 - """, - ) - S2.connect("i==(5-1-j)") - assert_equal(S2.i[:], np.arange(5)) - assert_equal(S2.j[:], np.arange(5)[::-1]) - assert_equal(S2.u[:], np.arange(10)[:-6:-1] * 2 + 1) - assert_equal(S2.x[:], np.arange(5) * 2 + 1) - - S3 = Synapses( - G[:5], - G, - """ - w : 1 - u = v2_post + 1 : 1 - x = v2_pre + 1 : 1 - """, - ) - S3.connect("i==(10-1-j)") - assert_equal(S3.i[:], np.arange(5)) - assert_equal(S3.j[:], np.arange(10)[:-6:-1]) - assert_equal(S3.u[:], np.arange(10)[:-6:-1] * 2 + 1) - assert_equal(S3.x[:], np.arange(5) * 2 + 1) - - -@pytest.mark.standalone_compatible -def test_synaptic_propagation(): - G1 = NeuronGroup(10, "v:1", threshold="v>1", reset="v=0") - G1.v["i%2==1"] = 1.1 # odd numbers should spike - G2 = NeuronGroup(20, "v:1") - SG1 = G1[1:6] - SG2 = G2[10:] - S = Synapses(SG1, SG2, on_pre="v+=1") - S.connect("i==j") - run(defaultclock.dt + schedule_propagation_offset()) - expected = np.zeros(len(G2)) - # Neurons 1, 3, 5 spiked and are connected to 10, 12, 14 - expected[[10, 12, 14]] = 1 - assert_equal(np.asarray(G2.v).flatten(), expected) - - -@pytest.mark.standalone_compatible -def test_synaptic_propagation_2(): - # This tests for the bug in github issue #461 - source = NeuronGroup(100, "", threshold="True") - sub_source = source[99:] - target = NeuronGroup(1, "v:1") - syn = Synapses(sub_source, target, on_pre="v+=1") - syn.connect() - run(defaultclock.dt + schedule_propagation_offset()) - assert target.v[0] == 1.0 - - -@pytest.mark.standalone_compatible -def test_run_regularly(): - # See github issue #922 - - group = NeuronGroup(10, "v: integer") - # Full group - group.run_regularly("v += 16") - # Subgroup with explicit reference - subgroup = group[:2] - subgroup.run_regularly("v += 8") - # Subgroup with explicit reference and reference for run_regularly operation - subgroup2 = group[2:4] - updater = subgroup2.run_regularly("v += 4") - # Subgroup without reference - group[4:6].run_regularly("v += 2") - # Subgroup without reference, with reference for run_regularly operation - updater2 = group[6:8].run_regularly("v += 1") - - run(defaultclock.dt) - assert_array_equal(group.v, [24, 24, 20, 20, 18, 18, 17, 17, 16, 16]) - - -@pytest.mark.standalone_compatible -def test_spike_monitor(): - G = NeuronGroup(10, "v:1", threshold="v>1", reset="v=0") - G.v[0] = 1.1 - G.v[2] = 1.1 - G.v[5] = 1.1 - SG = G[3:] - SG2 = G[:3] - s_mon = SpikeMonitor(G) - sub_s_mon = SpikeMonitor(SG) - sub_s_mon2 = SpikeMonitor(SG2) - run(defaultclock.dt) - assert_equal(s_mon.i, np.array([0, 2, 5])) - assert_equal(s_mon.t_, np.zeros(3)) - assert_equal(sub_s_mon.i, np.array([2])) - assert_equal(sub_s_mon.t_, np.zeros(1)) - assert_equal(sub_s_mon2.i, np.array([0, 2])) - assert_equal(sub_s_mon2.t_, np.zeros(2)) - expected = np.zeros(10, dtype=int) - expected[[0, 2, 5]] = 1 - assert_equal(s_mon.count, expected) - expected = np.zeros(7, dtype=int) - expected[[2]] = 1 - assert_equal(sub_s_mon.count, expected) - assert_equal(sub_s_mon2.count, np.array([1, 0, 1])) - - -@pytest.mark.codegen_independent -def test_wrong_indexing(): - G = NeuronGroup(10, "v:1") - with pytest.raises(TypeError): - G["string"] - - with pytest.raises(IndexError): - G[10] - with pytest.raises(IndexError): - G[10:] - with pytest.raises(IndexError): - G[::2] - with pytest.raises(IndexError): - G[3:2] - with pytest.raises(IndexError): - G[[5, 4, 3]] - with pytest.raises(IndexError): - G[[2, 4, 6]] - with pytest.raises(IndexError): - G[[-1, 0, 1]] - with pytest.raises(IndexError): - G[[9, 10, 11]] - with pytest.raises(IndexError): - G[[9, 10]] - with pytest.raises(IndexError): - G[[10, 11]] - with pytest.raises(TypeError): - G[[2.5, 3.5, 4.5]] - - -@pytest.mark.codegen_independent -def test_alternative_indexing(): - G = NeuronGroup(10, "v : integer") - G.v = "i" - assert_equal(G[-3:].v, np.array([7, 8, 9])) - assert_equal(G[3].v, np.array([3])) - assert_equal(G[[3, 4, 5]].v, np.array([3, 4, 5])) - - -def test_no_reference_1(): - """ - Using subgroups without keeping an explicit reference. Basic access. - """ - G = NeuronGroup(10, "v:1") - G.v = np.arange(10) - assert_equal(G[:5].v[:], G.v[:5]) - - -@pytest.mark.standalone_compatible -def test_no_reference_2(): - """ - Using subgroups without keeping an explicit reference. Monitors - """ - G = NeuronGroup(2, "v:1", threshold="v>1", reset="v=0") - G.v = [0, 1.1] - state_mon = StateMonitor(G[:1], "v", record=True) - spike_mon = SpikeMonitor(G[1:]) - rate_mon = PopulationRateMonitor(G[:2]) - run(2 * defaultclock.dt) - assert_equal(state_mon[0].v[:], np.zeros(2)) - assert_equal(spike_mon.i[:], np.array([0])) - assert_equal(spike_mon.t[:], np.array([0]) * second) - assert_equal(rate_mon.rate[:], np.array([0.5, 0]) / defaultclock.dt) - - -@pytest.mark.standalone_compatible -def test_no_reference_3(): - """ - Using subgroups without keeping an explicit reference. Monitors - """ - G = NeuronGroup(2, "v:1", threshold="v>1", reset="v=0") - G.v = [1.1, 0] - S = Synapses(G[:1], G[1:], on_pre="v+=1") - S.connect() - run(defaultclock.dt + schedule_propagation_offset()) - assert_equal(G.v[:], np.array([0, 1])) - - -@pytest.mark.standalone_compatible -def test_no_reference_4(): - """ - Using subgroups without keeping an explicit reference. Synapses - """ - G1 = NeuronGroup(10, "v:1", threshold="v>1", reset="v=0") - G1.v["i%2==1"] = 1.1 # odd numbers should spike - G2 = NeuronGroup(20, "v:1") - S = Synapses(G1[1:6], G2[10:], on_pre="v+=1") - S.connect("i==j") - run(defaultclock.dt + schedule_propagation_offset()) - expected = np.zeros(len(G2)) - # Neurons 1, 3, 5 spiked and are connected to 10, 12, 14 - expected[[10, 12, 14]] = 1 - assert_equal(np.asarray(G2.v).flatten(), expected) - - -def test_recursive_subgroup(): - """ - Create a subgroup of a subgroup - """ - G = NeuronGroup(10, "v : 1") - G.v = "i" - SG = G[3:8] - SG2 = SG[2:4] - assert_equal(SG2.v[:], np.array([5, 6])) - assert_equal(SG2.v[:], SG.v[2:4]) - assert SG2.source.name == G.name - - -if __name__ == "__main__": - test_str_repr() - test_state_variables() - test_state_variables_simple() - test_state_variables_string_indices() - test_state_variables_group_as_index() - test_state_variables_group_as_index_problematic() - test_state_monitor() - test_shared_variable() - test_synapse_creation() - test_synapse_creation_state_vars() - test_synapse_creation_generator() - test_synapse_creation_generator_complex_ranges() - test_synapse_creation_generator_random() - test_synapse_creation_generator_multiple_synapses() - test_synapse_access() - test_synapses_access_subgroups() - test_synapses_access_subgroups_problematic() - test_subgroup_summed_variable() - test_subexpression_references() - test_subexpression_no_references() - test_synaptic_propagation() - test_synaptic_propagation_2() - test_run_regularly() - test_spike_monitor() - test_wrong_indexing() - test_no_reference_1() - test_no_reference_2() - test_no_reference_3() - test_no_reference_4() - test_recursive_subgroup() diff --git a/brian2/tests/test_synapses.py b/brian2/tests/test_synapses.py deleted file mode 100644 index 7e00063f9..000000000 --- a/brian2/tests/test_synapses.py +++ /dev/null @@ -1,3776 +0,0 @@ -import logging -import uuid - -import pytest -import sympy -from numpy.testing import assert_array_equal, assert_equal - -from brian2 import * -from brian2.codegen.generators import NumpyCodeGenerator -from brian2.codegen.permutation_analysis import ( - OrderDependenceError, - check_for_order_independence, -) -from brian2.codegen.translation import make_statements -from brian2.core.functions import DEFAULT_FUNCTIONS -from brian2.core.network import schedule_propagation_offset -from brian2.core.variables import ArrayVariable, Constant, variables_by_owner -from brian2.devices.cpp_standalone.device import CPPStandaloneDevice -from brian2.devices.device import all_devices, get_device, reinit_and_delete -from brian2.equations.equations import EquationError -from brian2.stateupdaters.base import UnsupportedEquationsException -from brian2.synapses.parse_synaptic_generator_syntax import parse_synapse_generator -from brian2.tests.utils import assert_allclose, exc_isinstance -from brian2.utils.logger import catch_logs -from brian2.utils.stringtools import deindent, get_identifiers, indent, word_substitute - - -def _compare(synapses, expected): - conn_matrix = np.zeros((len(synapses.source), len(synapses.target)), dtype=np.int32) - for _i, _j in zip(synapses.i[:], synapses.j[:]): - conn_matrix[_i, _j] += 1 - - assert_equal(conn_matrix, expected) - # also compare the correct numbers of incoming and outgoing synapses - incoming = conn_matrix.sum(axis=0) - outgoing = conn_matrix.sum(axis=1) - assert all( - synapses.N_outgoing[:] == outgoing[synapses.i[:]] - ), "N_outgoing returned an incorrect value" - assert_array_equal( - synapses.N_outgoing_pre, outgoing - ), "N_outgoing_pre returned an incorrect value" - assert all( - synapses.N_incoming[:] == incoming[synapses.j[:]] - ), "N_incoming returned an incorrect value" - assert_array_equal( - synapses.N_incoming_post, incoming - ), "N_incoming_post returned an incorrect value" - - # Compare the "synapse number" if it exists - if synapses.multisynaptic_index is not None: - # Build an array of synapse numbers by counting the number of times - # a source/target combination exists - synapse_numbers = np.zeros_like(synapses.i[:]) - numbers = {} - for _i, (source, target) in enumerate(zip(synapses.i[:], synapses.j[:])): - number = numbers.get((source, target), 0) - synapse_numbers[_i] = number - numbers[(source, target)] = number + 1 - assert all( - synapses.state(synapses.multisynaptic_index)[:] == synapse_numbers - ), "synapse_number returned an incorrect value" - - -@pytest.mark.codegen_independent -def test_creation(): - """ - A basic test that creating a Synapses object works. - """ - G = NeuronGroup(42, "v: 1", threshold="False") - S = Synapses(G, G, "w:1", on_pre="v+=w") - # We store weakref proxys, so we can't directly compare the objects - assert S.source.name == S.target.name == G.name - assert len(S) == 0 - S = Synapses(G, model="w:1", on_pre="v+=w") - assert S.source.name == S.target.name == G.name - - -@pytest.mark.codegen_independent -def test_creation_errors(): - G = NeuronGroup(42, "v: 1", threshold="False") - # Check that the old Synapses(..., connect=...) syntax raises an error - with pytest.raises(TypeError): - Synapses(G, G, "w:1", on_pre="v+=w", connect=True) - # Check that using pre and on_pre (resp. post/on_post) at the same time - # raises an error - with pytest.raises(TypeError): - Synapses(G, G, "w:1", pre="v+=w", on_pre="v+=w", connect=True) - with pytest.raises(TypeError): - Synapses(G, G, "w:1", post="v+=w", on_post="v+=w", connect=True) - - -@pytest.mark.codegen_independent -def test_connect_errors(): - G = NeuronGroup(42, "") - S = Synapses(G, G) - - # Not a boolean condition - with pytest.raises(TypeError): - S.connect("i*2") - - # Unit error - with pytest.raises(DimensionMismatchError): - S.connect("i > 3*mV") - - # Syntax error - with pytest.raises(SyntaxError): - S.connect("sin(3, 4) > 1") - - # Unit error in p argument - with pytest.raises(TypeError): - S.connect("1*mV") - - # Syntax error in p argument - with pytest.raises(SyntaxError): - S.connect(p="sin(3, 4)") - - -@pytest.mark.codegen_independent -def test_name_clashes(): - # Using identical names for synaptic and pre- or post-synaptic variables - # is confusing and should be forbidden - G1 = NeuronGroup(1, "a : 1") - G2 = NeuronGroup(1, "b : 1") - with pytest.raises(ValueError): - Synapses(G1, G2, "a : 1") - with pytest.raises(ValueError): - Synapses(G1, G2, "b : 1") - - # Using _pre or _post as variable names is confusing (even if it is non- - # ambiguous in unconnected NeuronGroups) - with pytest.raises(ValueError): - Synapses(G1, G2, "x_pre : 1") - with pytest.raises(ValueError): - Synapses(G1, G2, "x_post : 1") - with pytest.raises(ValueError): - Synapses(G1, G2, "x_pre = 1 : 1") - with pytest.raises(ValueError): - Synapses(G1, G2, "x_post = 1 : 1") - with pytest.raises(ValueError): - NeuronGroup(1, "x_pre : 1") - with pytest.raises(ValueError): - NeuronGroup(1, "x_post : 1") - with pytest.raises(ValueError): - NeuronGroup(1, "x_pre = 1 : 1") - with pytest.raises(ValueError): - NeuronGroup(1, "x_post = 1 : 1") - - # this should all be ok - Synapses(G1, G2, "c : 1") - Synapses(G1, G2, "a_syn : 1") - Synapses(G1, G2, "b_syn : 1") - - -@pytest.mark.standalone_compatible -def test_incoming_outgoing(): - """ - Test the count of outgoing/incoming synapses per neuron. - (It will be also automatically tested for all connection patterns that - use the above _compare function for testing) - """ - G1 = NeuronGroup(5, "") - G2 = NeuronGroup(5, "") - S = Synapses(G1, G2, "") - S.connect(i=[0, 0, 0, 1, 1, 2], j=[0, 1, 2, 1, 2, 3]) - run(0 * ms) # to make this work for standalone - # First source neuron has 3 outgoing synapses, the second 2, the third 1 - assert all(S.N_outgoing[0, :] == 3) - assert all(S.N_outgoing[1, :] == 2) - assert all(S.N_outgoing[2, :] == 1) - assert all(S.N_outgoing[3:, :] == 0) - assert_array_equal(S.N_outgoing_pre, [3, 2, 1, 0, 0]) - # First target neuron receives 1 input, the second+third each 2, the fourth receives 1 - assert all(S.N_incoming[:, 0] == 1) - assert all(S.N_incoming[:, 1] == 2) - assert all(S.N_incoming[:, 2] == 2) - assert all(S.N_incoming[:, 3] == 1) - assert all(S.N_incoming[:, 4:] == 0) - assert_array_equal(S.N_incoming_post, [1, 2, 2, 1, 0]) - - -@pytest.mark.standalone_compatible -def test_connection_arrays(): - """ - Test connecting synapses with explictly given arrays - """ - G = NeuronGroup(42, "") - G2 = NeuronGroup(17, "") - - # one-to-one - expected1 = np.eye(len(G2)) - S1 = Synapses(G2) - S1.connect(i=np.arange(len(G2)), j=np.arange(len(G2))) - - # full - expected2 = np.ones((len(G), len(G2))) - S2 = Synapses(G, G2) - X, Y = np.meshgrid(np.arange(len(G)), np.arange(len(G2))) - S2.connect(i=X.flatten(), j=Y.flatten()) - - # Multiple synapses - expected3 = np.zeros((len(G), len(G2))) - expected3[3, 3] = 2 - S3 = Synapses(G, G2) - S3.connect(i=[3, 3], j=[3, 3]) - - run(0 * ms) # for standalone - _compare(S1, expected1) - _compare(S2, expected2) - _compare(S3, expected3) - - # Incorrect usage - S = Synapses(G, G2) - with pytest.raises(TypeError): - S.connect(i=[1.1, 2.2], j=[1.1, 2.2]) - with pytest.raises(TypeError): - S.connect(i=[1, 2], j="string") - with pytest.raises(TypeError): - S.connect(i=[1, 2], j=[1, 2], n="i") - with pytest.raises(TypeError): - S.connect([1, 2]) - with pytest.raises(ValueError): - S.connect(i=[1, 2, 3], j=[1, 2]) - with pytest.raises(ValueError): - S.connect(i=np.ones((3, 3), dtype=np.int32), j=np.ones((3, 1), dtype=np.int32)) - with pytest.raises(IndexError): - S.connect(i=[41, 42], j=[0, 1]) # source index > max - with pytest.raises(IndexError): - S.connect(i=[0, 1], j=[16, 17]) # target index > max - with pytest.raises(IndexError): - S.connect(i=[0, -1], j=[0, 1]) # source index < 0 - with pytest.raises(IndexError): - S.connect(i=[0, 1], j=[0, -1]) # target index < 0 - with pytest.raises(ValueError): - S.connect("i==j", j=np.arange(10)) - with pytest.raises(TypeError): - S.connect("i==j", n=object()) - with pytest.raises(TypeError): - S.connect("i==j", p=object()) - with pytest.raises(TypeError): - S.connect(object()) - - -@pytest.mark.standalone_compatible -def test_connection_string_deterministic_full(): - G = NeuronGroup(17, "") - G2 = NeuronGroup(4, "") - - # Full connection - expected_full = np.ones((len(G), len(G2))) - - S1 = Synapses(G, G2, "") - S1.connect(True) - - S2 = Synapses(G, G2, "") - S2.connect("True") - - run(0 * ms) # for standalone - - _compare(S1, expected_full) - _compare(S2, expected_full) - - -@pytest.mark.standalone_compatible -def test_connection_string_deterministic_full_no_self(): - G = NeuronGroup(17, "v : 1") - G.v = "i" - G2 = NeuronGroup(4, "v : 1") - G2.v = "17 + i" - - # Full connection without self-connections - expected_no_self = np.ones((len(G), len(G))) - np.eye(len(G)) - - S1 = Synapses(G, G) - S1.connect("i != j") - - S2 = Synapses(G, G) - S2.connect("v_pre != v_post") - - S3 = Synapses(G, G) - S3.connect(condition="i != j") - - run(0 * ms) # for standalone - - _compare(S1, expected_no_self) - _compare(S2, expected_no_self) - _compare(S3, expected_no_self) - - -@pytest.mark.standalone_compatible -def test_connection_string_deterministic_full_one_to_one(): - G = NeuronGroup(17, "v : 1") - G.v = "i" - G2 = NeuronGroup(4, "v : 1") - G2.v = "17 + i" - - # One-to-one connectivity - expected_one_to_one = np.eye(len(G)) - - S1 = Synapses(G, G) - S1.connect("i == j") - - S2 = Synapses(G, G) - S2.connect("v_pre == v_post") - - S3 = Synapses( - G, - G, - """ - sub_1 = v_pre : 1 - sub_2 = v_post : 1 - w:1 - """, - ) - S3.connect("sub_1 == sub_2") - - S4 = Synapses(G, G) - S4.connect(j="i") - - run(0 * ms) # for standalone - - _compare(S1, expected_one_to_one) - _compare(S2, expected_one_to_one) - _compare(S3, expected_one_to_one) - _compare(S4, expected_one_to_one) - - -@pytest.mark.standalone_compatible -def test_connection_string_deterministic_full_custom(): - G = NeuronGroup(17, "") - G2 = NeuronGroup(4, "") - # Everything except for the upper [2, 2] quadrant - number = 2 - expected_custom = np.ones((len(G), len(G))) - expected_custom[:number, :number] = 0 - S1 = Synapses(G, G) - S1.connect("(i >= number) or (j >= number)") - - S2 = Synapses(G, G) - S2.connect( - "(i >= explicit_number) or (j >= explicit_number)", - namespace={"explicit_number": number}, - ) - - # check that this mistaken syntax raises an error - with pytest.raises(ValueError): - S2.connect("k for k in range(1)") - - # check that trying to connect to a neuron outside the range raises an error - if get_device() == all_devices["runtime"]: - with pytest.raises(BrianObjectException) as exc: - S2.connect(j="20") - assert exc_isinstance(exc, IndexError) - - run(0 * ms) # for standalone - - _compare(S1, expected_custom) - _compare(S2, expected_custom) - - -@pytest.mark.standalone_compatible -def test_connection_string_deterministic_multiple_and(): - # In Brian versions 2.1.0-2.1.2, this fails on the numpy target - # See github issue 900 - group = NeuronGroup(10, "") - synapses = Synapses(group, group) - synapses.connect("i>=5 and i<10 and j>=5") - run(0 * ms) # for standalone - assert len(synapses) == 25 - - -@pytest.mark.standalone_compatible -def test_connection_random_with_condition(): - G = NeuronGroup(4, "") - - S1 = Synapses(G, G) - S1.connect("i!=j", p=0.0) - - S2 = Synapses(G, G) - S2.connect("i!=j", p=1.0) - expected2 = np.ones((len(G), len(G))) - np.eye(len(G)) - - S3 = Synapses(G, G) - S3.connect("i>=2", p=0.0) - - S4 = Synapses(G, G) - S4.connect("i>=2", p=1.0) - expected4 = np.zeros((len(G), len(G))) - expected4[2, :] = 1 - expected4[3, :] = 1 - - S5 = Synapses(G, G) - S5.connect("j<2", p=0.0) - S6 = Synapses(G, G) - S6.connect("j<2", p=1.0) - expected6 = np.zeros((len(G), len(G))) - expected6[:, 0] = 1 - expected6[:, 1] = 1 - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - _compare(S2, expected2) - assert len(S3) == 0 - _compare(S4, expected4) - assert len(S5) == 0 - _compare(S6, expected6) - - -@pytest.mark.standalone_compatible -@pytest.mark.long -def test_connection_random_with_condition_2(): - G = NeuronGroup(4, "") - - # Just checking that everything works in principle (we can't check the - # actual connections) - S7 = Synapses(G, G) - S7.connect("i!=j", p=0.01) - - S8 = Synapses(G, G) - S8.connect("i!=j", p=0.03) - - S9 = Synapses(G, G) - S9.connect("i!=j", p=0.3) - - S10 = Synapses(G, G) - S10.connect("i>=2", p=0.01) - - S11 = Synapses(G, G) - S11.connect("i>=2", p=0.03) - - S12 = Synapses(G, G) - S12.connect("i>=2", p=0.3) - - S13 = Synapses(G, G) - S13.connect("j>=2", p=0.01) - - S14 = Synapses(G, G) - S14.connect("j>=2", p=0.03) - - S15 = Synapses(G, G) - S15.connect("j>=2", p=0.3) - - S16 = Synapses(G, G) - S16.connect("i!=j", p="i*0.1") - - S17 = Synapses(G, G) - S17.connect("i!=j", p="j*0.1") - - # Forces the use of the "jump algorithm" - big_group = NeuronGroup(10000, "") - S18 = Synapses(big_group, big_group) - S18.connect("i != j", p=0.001) - - # See github issue #835 -- this failed when using numpy - S19 = Synapses(big_group, big_group) - S19.connect("i < int(N_post*0.5)", p=0.001) - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert not any(S7.i == S7.j) - assert not any(S8.i == S8.j) - assert not any(S9.i == S9.j) - assert all(S10.i >= 2) - assert all(S11.i >= 2) - assert all(S12.i >= 2) - assert all(S13.j >= 2) - assert all(S14.j >= 2) - assert all(S15.j >= 2) - assert not any(S16.i == 0) - assert not any(S17.j == 0) - - -@pytest.mark.standalone_compatible -def test_connection_random_with_indices(): - """ - Test random connections. - """ - G = NeuronGroup(4, "") - G2 = NeuronGroup(7, "") - - S1 = Synapses(G, G2) - S1.connect(i=0, j=0, p=0.0) - expected1 = np.zeros((len(G), len(G2))) - - S2 = Synapses(G, G2) - S2.connect(i=0, j=0, p=1.0) - expected2 = np.zeros((len(G), len(G2))) - expected2[0, 0] = 1 - - S3 = Synapses(G, G2) - S3.connect(i=[0, 1], j=[0, 2], p=1.0) - expected3 = np.zeros((len(G), len(G2))) - expected3[0, 0] = 1 - expected3[1, 2] = 1 - - # Just checking that it works in principle - S4 = Synapses(G, G) - S4.connect(i=0, j=0, p=0.01) - S5 = Synapses(G, G) - S5.connect(i=[0, 1], j=[0, 2], p=0.01) - - S6 = Synapses(G, G) - S6.connect(i=0, j=0, p=0.03) - - S7 = Synapses(G, G) - S7.connect(i=[0, 1], j=[0, 2], p=0.03) - - S8 = Synapses(G, G) - S8.connect(i=0, j=0, p=0.3) - - S9 = Synapses(G, G) - S9.connect(i=[0, 1], j=[0, 2], p=0.3) - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - _compare(S1, expected1) - _compare(S2, expected2) - _compare(S3, expected3) - assert 0 <= len(S4) <= 1 - assert 0 <= len(S5) <= 2 - assert 0 <= len(S6) <= 1 - assert 0 <= len(S7) <= 2 - assert 0 <= len(S8) <= 1 - assert 0 <= len(S9) <= 2 - - -@pytest.mark.standalone_compatible -def test_connection_random_without_condition(): - G = NeuronGroup( - 4, - """ - v: 1 - x : integer - """, - ) - G.x = "i" - G2 = NeuronGroup( - 7, - """ - v: 1 - y : 1 - """, - ) - G2.y = "1.0*i/N" - - S1 = Synapses(G, G2) - S1.connect(True, p=0.0) - - S2 = Synapses(G, G2) - S2.connect(True, p=1.0) - - # Just make sure using values between 0 and 1 work in principle - S3 = Synapses(G, G2) - S3.connect(True, p=0.3) - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S4 = Synapses(G, G2) - S4.connect(True, p="int(x_pre==2)*1.0") - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S5 = Synapses(G, G2) - S5.connect(True, p="int(x_pre==2 and y_post > 0.5)*1.0") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - _compare(S1, np.zeros((len(G), len(G2)))) - _compare(S2, np.ones((len(G), len(G2)))) - assert 0 <= len(S3) <= len(G) * len(G2) - assert len(S4) == 7 - assert_equal(S4.i, np.ones(7) * 2) - assert_equal(S4.j, np.arange(7)) - assert len(S5) == 3 - assert_equal(S5.i, np.ones(3) * 2) - assert_equal(S5.j, np.arange(3) + 4) - - -@pytest.mark.standalone_compatible -def test_connection_multiple_synapses(): - """ - Test multiple synapses per connection. - """ - G = NeuronGroup(42, "v: 1") - G.v = "i" - G2 = NeuronGroup(17, "v: 1") - G2.v = "i" - - S1 = Synapses(G, G2) - S1.connect(True, n=0) - - S2 = Synapses(G, G2) - S2.connect(True, n=2) - - S3 = Synapses(G, G2) - S3.connect(True, n="j") - - S4 = Synapses(G, G2) - S4.connect(True, n="i") - - S5 = Synapses(G, G2) - S5.connect(True, n="int(i>j)*2") - - S6 = Synapses(G, G2) - S6.connect(True, n="int(v_pre>v_post)*2") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - _compare(S2, 2 * np.ones((len(G), len(G2)))) - _compare(S3, np.arange(len(G2)).reshape(1, len(G2)).repeat(len(G), axis=0)) - - _compare(S4, np.arange(len(G)).reshape(len(G), 1).repeat(len(G2), axis=1)) - expected = np.zeros((len(G), len(G2)), dtype=np.int32) - for source in range(len(G)): - expected[source, :source] = 2 - _compare(S5, expected) - _compare(S6, expected) - - -def test_state_variable_assignment(): - """ - Assign values to state variables in various ways - """ - - G = NeuronGroup(10, "v: volt") - G.v = "i*mV" - S = Synapses(G, G, "w:volt") - S.connect(True) - - # with unit checking - assignment_expected = [ - (5 * mV, np.ones(100) * 5 * mV), - (7 * mV, np.ones(100) * 7 * mV), - (S.i[:] * mV, S.i[:] * np.ones(100) * mV), - ("5*mV", np.ones(100) * 5 * mV), - ("i*mV", np.ones(100) * S.i[:] * mV), - ("i*mV +j*mV", S.i[:] * mV + S.j[:] * mV), - # reference to pre- and postsynaptic state variables - ("v_pre", S.i[:] * mV), - ("v_post", S.j[:] * mV), - # ('i*mV + j*mV + k*mV', S.i[:]*mV + S.j[:]*mV + S.k[:]*mV) #not supported yet - ] - - for assignment, expected in assignment_expected: - S.w = 0 * volt - S.w = assignment - assert_allclose( - S.w[:], expected, err_msg="Assigning %r gave incorrect result" % assignment - ) - S.w = 0 * volt - S.w[:] = assignment - assert_allclose( - S.w[:], expected, err_msg="Assigning %r gave incorrect result" % assignment - ) - - # without unit checking - assignment_expected = [ - (5, np.ones(100) * 5 * volt), - (7, np.ones(100) * 7 * volt), - (S.i[:], S.i[:] * np.ones(100) * volt), - ("5", np.ones(100) * 5 * volt), - ("i", np.ones(100) * S.i[:] * volt), - ("i +j", S.i[:] * volt + S.j[:] * volt), - # ('i + j + k', S.i[:]*volt + S.j[:]*volt + S.k[:]*volt) #not supported yet - ] - - for assignment, expected in assignment_expected: - S.w = 0 * volt - S.w_ = assignment - assert_allclose( - S.w[:], expected, err_msg="Assigning %r gave incorrect result" % assignment - ) - S.w = 0 * volt - S.w_[:] = assignment - assert_allclose( - S.w[:], expected, err_msg="Assigning %r gave incorrect result" % assignment - ) - - -def test_state_variable_indexing(): - G1 = NeuronGroup(5, "v:volt") - G1.v = "i*mV" - G2 = NeuronGroup(7, "v:volt") - G2.v = "10*mV + i*mV" - S = Synapses(G1, G2, "w:1", multisynaptic_index="k") - S.connect(True, n=2) - S.w[:, :, 0] = "5*i + j" - S.w[:, :, 1] = "35 + 5*i + j" - - # Slicing - assert len(S.w[:]) == len(S.w[:, :]) == len(S.w[:, :, :]) == len(G1) * len(G2) * 2 - assert len(S.w[0:, 0:]) == len(S.w[0:, 0:, 0:]) == len(G1) * len(G2) * 2 - assert len(S.w[0::2, 0:]) == 3 * len(G2) * 2 - assert len(S.w[0, :]) == len(S.w[0, :, :]) == len(G2) * 2 - assert len(S.w[0:2, :]) == len(S.w[0:2, :, :]) == 2 * len(G2) * 2 - assert len(S.w[:2, :]) == len(S.w[:2, :, :]) == 2 * len(G2) * 2 - assert len(S.w[0:4:2, :]) == len(S.w[0:4:2, :, :]) == 2 * len(G2) * 2 - assert len(S.w[:4:2, :]) == len(S.w[:4:2, :, :]) == 2 * len(G2) * 2 - assert len(S.w[:, 0]) == len(S.w[:, 0, :]) == len(G1) * 2 - assert len(S.w[:, 0:2]) == len(S.w[:, 0:2, :]) == 2 * len(G1) * 2 - assert len(S.w[:, :2]) == len(S.w[:, :2, :]) == 2 * len(G1) * 2 - assert len(S.w[:, 0:4:2]) == len(S.w[:, 0:4:2, :]) == 2 * len(G1) * 2 - assert len(S.w[:, :4:2]) == len(S.w[:, :4:2, :]) == 2 * len(G1) * 2 - assert len(S.w[:, :, 0]) == len(G1) * len(G2) - assert len(S.w[:, :, 0:2]) == len(G1) * len(G2) * 2 - assert len(S.w[:, :, :2]) == len(G1) * len(G2) * 2 - assert len(S.w[:, :, 0:2:2]) == len(G1) * len(G2) - assert len(S.w[:, :, :2:2]) == len(G1) * len(G2) - - # 1d indexing is directly indexing synapses! - assert len(S.w[:]) == len(S.w[0:]) - assert len(S.w[[0, 1]]) == len(S.w[3:5]) == 2 - assert len(S.w[:]) == len(S.w[np.arange(len(G1) * len(G2) * 2)]) - assert S.w[3] == S.w[np.int32(3)] == S.w[np.int64(3)] # See issue #888 - - # Array-indexing (not yet supported for synapse index) - assert_equal(S.w[:, 0:3], S.w[:, [0, 1, 2]]) - assert_equal(S.w[:, 0:3], S.w[np.arange(len(G1)), [0, 1, 2]]) - - # string-based indexing - assert_equal(S.w[0:3, :], S.w["i<3"]) - assert_equal(S.w[:, 0:3], S.w["j<3"]) - assert_equal(S.w[:, :, 0], S.w["k == 0"]) - assert_equal(S.w[0:3, :], S.w["v_pre < 2.5*mV"]) - assert_equal(S.w[:, 0:3], S.w["v_post < 12.5*mV"]) - - # invalid indices - with pytest.raises(IndexError): - S.w.__getitem__((1, 2, 3, 4)) - with pytest.raises(IndexError): - S.w.__getitem__(object()) - with pytest.raises(IndexError): - S.w.__getitem__(1.5) - - -def test_indices(): - G = NeuronGroup(10, "v : 1") - S = Synapses(G, G, "") - S.connect() - G.v = "i" - - assert_equal(S.indices[:], np.arange(10 * 10)) - assert len(S.indices[5, :]) == 10 - assert_equal(S.indices["v_pre >=5"], S.indices[5:, :]) - assert_equal(S.indices["j >=5"], S.indices[:, 5:]) - - -def test_subexpression_references(): - """ - Assure that subexpressions in targeted groups are handled correctly. - """ - G = NeuronGroup( - 10, - """ - v : 1 - v2 = 2*v : 1 - """, - ) - G.v = np.arange(10) - S = Synapses( - G, - G, - """ - w : 1 - u = v2_post + 1 : 1 - x = v2_pre + 1 : 1 - """, - ) - S.connect("i==(10-1-j)") - assert_equal(S.u[:], np.arange(10)[::-1] * 2 + 1) - assert_equal(S.x[:], np.arange(10) * 2 + 1) - - -@pytest.mark.standalone_compatible -def test_constant_variable_subexpression_in_synapses(): - G = NeuronGroup(10, "") - S = Synapses( - G, - G, - """ - dv1/dt = -v1**2 / (10*ms) : 1 (clock-driven) - dv2/dt = -v_const**2 / (10*ms) : 1 (clock-driven) - dv3/dt = -v_var**2 / (10*ms) : 1 (clock-driven) - dv4/dt = -v_noflag**2 / (10*ms) : 1 (clock-driven) - v_const = v2 : 1 (constant over dt) - v_var = v3 : 1 - v_noflag = v4 : 1 - """, - method="rk2", - ) - S.connect(j="i") - S.v1 = "1.0*i/N" - S.v2 = "1.0*i/N" - S.v3 = "1.0*i/N" - S.v4 = "1.0*i/N" - - run(10 * ms) - # "variable over dt" subexpressions are directly inserted into the equation - assert_allclose(S.v3[:], S.v1[:]) - assert_allclose(S.v4[:], S.v1[:]) - # "constant over dt" subexpressions will keep a fixed value over the time - # step and therefore give a slightly different result for multi-step - # methods - assert np.sum((S.v2 - S.v1) ** 2) > 1e-10 - - -@pytest.mark.standalone_compatible -def test_nested_subexpression_references(): - """ - Assure that subexpressions in targeted groups are handled correctly. - """ - G = NeuronGroup( - 10, - """ - v : 1 - v2 = 2*v : 1 - v3 = 1.5*v2 : 1 - """, - threshold="v>=5", - ) - G2 = NeuronGroup(10, "v : 1") - G.v = np.arange(10) - S = Synapses(G, G2, on_pre="v_post += v3_pre") - S.connect(j="i") - run(defaultclock.dt) - assert_allclose(G2.v[:5], 0.0) - assert_allclose(G2.v[5:], (5 + np.arange(5)) * 3) - - -@pytest.mark.codegen_independent -def test_equations_unit_check(): - group = NeuronGroup(1, "v : volt", threshold="True") - syn = Synapses( - group, - group, - """ - sub1 = 3 : 1 - sub2 = sub1 + 1*mV : volt - """, - on_pre="v += sub2", - ) - syn.connect() - net = Network(group, syn) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, DimensionMismatchError) - - -def test_delay_specification(): - # By default delays are state variables (i.e. arrays), but if they are - # specified in the initializer, they are scalars. - G = NeuronGroup(10, "x : meter", threshold="False") - G.x = "i*mmeter" - # Array delay - S = Synapses(G, G, "w:1", on_pre="v+=w") - S.connect(j="i") - assert len(S.delay[:]) == len(G) - S.delay = "i*ms" - assert_allclose(S.delay[:], np.arange(len(G)) * ms) - velocity = 1 * meter / second - S.delay = "abs(x_pre - (N_post-j)*mmeter)/velocity" - assert_allclose(S.delay[:], abs(G.x - (10 - G.i) * mmeter) / velocity) - S.delay = 5 * ms - assert_allclose(S.delay[:], np.ones(len(G)) * 5 * ms) - # Setting delays without units - S.delay_ = float(7 * ms) - assert_allclose(S.delay[:], np.ones(len(G)) * 7 * ms) - - # Scalar delay - S = Synapses(G, G, "w:1", on_pre="v+=w", delay=5 * ms) - assert_allclose(S.delay[:], 5 * ms) - S.connect(j="i") - S.delay = "3*ms" - assert_allclose(S.delay[:], 3 * ms) - S.delay = 10 * ms - assert_allclose(S.delay[:], 10 * ms) - # Without units - S.delay_ = float(20 * ms) - assert_allclose(S.delay[:], 20 * ms) - - # Invalid arguments - with pytest.raises(DimensionMismatchError): - Synapses(G, G, "w:1", on_pre="v+=w", delay=5 * mV) - with pytest.raises(TypeError): - Synapses(G, G, "w:1", on_pre="v+=w", delay=object()) - with pytest.raises(ValueError): - Synapses(G, G, "w:1", delay=5 * ms) - with pytest.raises(ValueError): - Synapses(G, G, "w:1", on_pre="v+=w", delay={"post": 5 * ms}) - - -def test_delays_pathways(): - G = NeuronGroup(10, "x: meter", threshold="False") - G.x = "i*mmeter" - # Array delay - S = Synapses(G, G, "w:1", on_pre={"pre1": "v+=w", "pre2": "v+=w"}, on_post="v-=w") - S.connect(j="i") - assert len(S.pre1.delay[:]) == len(G) - assert len(S.pre2.delay[:]) == len(G) - assert len(S.post.delay[:]) == len(G) - S.pre1.delay = "i*ms" - S.pre2.delay = "j*ms" - velocity = 1 * meter / second - S.post.delay = "abs(x_pre - (N_post-j)*mmeter)/velocity" - assert_allclose(S.pre1.delay[:], np.arange(len(G)) * ms) - assert_allclose(S.pre2.delay[:], np.arange(len(G)) * ms) - assert_allclose(S.post.delay[:], abs(G.x - (10 - G.i) * mmeter) / velocity) - S.pre1.delay = 5 * ms - S.pre2.delay = 10 * ms - S.post.delay = 1 * ms - assert_allclose(S.pre1.delay[:], np.ones(len(G)) * 5 * ms) - assert_allclose(S.pre2.delay[:], np.ones(len(G)) * 10 * ms) - assert_allclose(S.post.delay[:], np.ones(len(G)) * 1 * ms) - # Indexing with strings - assert len(S.pre1.delay["j<5"]) == 5 - assert_allclose(S.pre1.delay["j<5"], 5 * ms) - # Indexing with 2d indices - assert len(S.post.delay[[3, 4], :]) == 2 - assert_allclose(S.post.delay[[3, 4], :], 1 * ms) - assert len(S.pre2.delay[:, 7]) == 1 - assert_allclose(S.pre2.delay[:, 7], 10 * ms) - assert len(S.pre1.delay[[1, 2], [1, 2]]) == 2 - assert_allclose(S.pre1.delay[[1, 2], [1, 2]], 5 * ms) - - # Scalar delay - S = Synapses( - G, - G, - "w:1", - on_pre={"pre1": "v+=w", "pre2": "v+=w"}, - on_post="v-=w", - delay={"pre1": 5 * ms, "post": 1 * ms}, - ) - assert_allclose(S.pre1.delay[:], 5 * ms) - assert_allclose(S.post.delay[:], 1 * ms) - S.connect(j="i") - assert len(S.pre2.delay[:]) == len(G) - S.pre1.delay = 10 * ms - assert_allclose(S.pre1.delay[:], 10 * ms) - S.post.delay = "3*ms" - assert_allclose(S.post.delay[:], 3 * ms) - - -def test_delays_pathways_subgroups(): - G = NeuronGroup(10, "x: meter", threshold="False") - G.x = "i*mmeter" - # Array delay - S = Synapses( - G[:5], G[5:], "w:1", on_pre={"pre1": "v+=w", "pre2": "v+=w"}, on_post="v-=w" - ) - S.connect(j="i") - assert len(S.pre1.delay[:]) == 5 - assert len(S.pre2.delay[:]) == 5 - assert len(S.post.delay[:]) == 5 - S.pre1.delay = "i*ms" - S.pre2.delay = "j*ms" - velocity = 1 * meter / second - S.post.delay = "abs(x_pre - (N_post-j)*mmeter)/velocity" - assert_allclose(S.pre1.delay[:], np.arange(5) * ms) - assert_allclose(S.pre2.delay[:], np.arange(5) * ms) - assert_allclose(S.post.delay[:], abs(G[:5].x - (5 - G[:5].i) * mmeter) / velocity) - S.pre1.delay = 5 * ms - S.pre2.delay = 10 * ms - S.post.delay = 1 * ms - assert_allclose(S.pre1.delay[:], np.ones(5) * 5 * ms) - assert_allclose(S.pre2.delay[:], np.ones(5) * 10 * ms) - assert_allclose(S.post.delay[:], np.ones(5) * 1 * ms) - - -@pytest.mark.codegen_independent -def test_pre_before_post(): - # The pre pathway should be executed before the post pathway - G = NeuronGroup( - 1, - """ - x : 1 - y : 1 - """, - threshold="True", - ) - S = Synapses(G, G, "", on_pre="x=1; y=1", on_post="x=2") - S.connect() - run(defaultclock.dt) - # Both pathways should have been executed, but post should have overriden - # the x value (because it was executed later) - assert G.x == 2 - assert G.y == 1 - - -@pytest.mark.standalone_compatible -def test_pre_post_simple(): - # Test that pre and post still work correctly - G1 = SpikeGeneratorGroup(1, [0], [1] * ms) - G2 = SpikeGeneratorGroup(1, [0], [2] * ms) - with catch_logs() as l: - S = Synapses( - G1, - G2, - """ - pre_value : 1 - post_value : 1 - """, - pre="pre_value +=1", - post="post_value +=2", - ) - S.connect() - syn_mon = StateMonitor(S, ["pre_value", "post_value"], record=[0], when="end") - run(3 * ms) - offset = schedule_propagation_offset() - assert_allclose(syn_mon.pre_value[0][syn_mon.t < 1 * ms + offset], 0) - assert_allclose(syn_mon.pre_value[0][syn_mon.t >= 1 * ms + offset], 1) - assert_allclose(syn_mon.post_value[0][syn_mon.t < 2 * ms + offset], 0) - assert_allclose(syn_mon.post_value[0][syn_mon.t >= 2 * ms + offset], 2) - - -@pytest.mark.standalone_compatible -def test_transmission_simple(): - source = SpikeGeneratorGroup(2, [0, 1], [2, 1] * ms) - target = NeuronGroup(2, "v : 1") - syn = Synapses(source, target, on_pre="v += 1") - syn.connect(j="i") - mon = StateMonitor(target, "v", record=True, when="end") - run(2.5 * ms) - offset = schedule_propagation_offset() - assert_allclose(mon[0].v[mon.t < 2 * ms + offset], 0.0) - assert_allclose(mon[0].v[mon.t >= 2 * ms + offset], 1.0) - assert_allclose(mon[1].v[mon.t < 1 * ms + offset], 0.0) - assert_allclose(mon[1].v[mon.t >= 1 * ms + offset], 1.0) - - -@pytest.mark.standalone_compatible -def test_transmission_custom_event(): - source = NeuronGroup( - 2, - "", - events={ - "custom": ( - "timestep(t,dt)>=timestep((2-i)*ms, dt) " - "and timestep(t,dt)= 2 * ms], 1.0) - assert_allclose(mon[1].v[mon.t < 1 * ms], 0.0) - assert_allclose(mon[1].v[mon.t >= 1 * ms], 1.0) - - -@pytest.mark.standalone_compatible -def test_transmission_custom_event_complex(): - # This was broken with release 2.6, entries in on_event where checked against pre/post - # instead of the pathway name - group = NeuronGroup(3, "v:1", threshold="v>1 and v<2", events={"over_2": "v>2"}) - group.v = [0, 1.5, 2.5] - - s = Synapses( - group, - group, - model="""a:integer - b:integer - c:integer - d:integer""", - on_event={ - "path_a": "spike", - "path_b": "over_2", - "path_c": "spike", - "path_d": "over_2", - }, - on_pre={"path_a": "a+=1", "path_b": "b+=1"}, - on_post={"path_c": "c+=1", "path_d": "d+=1"}, - ) - s.connect() - run(defaultclock.dt) - assert all(s.a[:] == [0, 0, 0, 1, 1, 1, 0, 0, 0]) - assert all(s.b[:] == [0, 0, 0, 0, 0, 0, 1, 1, 1]) - assert all(s.c[:] == [0, 1, 0, 0, 1, 0, 0, 1, 0]) - assert all(s.d[:] == [0, 0, 1, 0, 0, 1, 0, 0, 1]) - - -@pytest.mark.codegen_independent -def test_invalid_custom_event(): - group1 = NeuronGroup( - 2, - "v : 1", - events={ - "custom": ( - "timestep(t,dt)>=timesteep((2-i)*ms,dt) " - "and timestep(t, dt)= 0.5 * ms + offset - defaultclock.dt / 2], 1) - assert_allclose(mon[1].v[mon.t < 1.5 * ms + offset - defaultclock.dt / 2], 0) - assert_allclose(mon[1].v[mon.t >= 1.5 * ms + offset - defaultclock.dt / 2], 1) - - -@pytest.mark.standalone_compatible -def test_transmission_scalar_delay_different_clocks(): - inp = SpikeGeneratorGroup( - 2, - [0, 1], - [0, 1] * ms, - dt=0.5 * ms, - # give the group a unique name to always - # get a 'fresh' warning - name="sg_%d" % uuid.uuid4(), - ) - target = NeuronGroup(2, "v:1", dt=0.1 * ms) - S = Synapses(inp, target, on_pre="v+=1", delay=0.5 * ms) - S.connect(j="i") - mon = StateMonitor(target, "v", record=True, when="end") - - if get_device() == all_devices["runtime"]: - # We should get a warning when using inconsistent dts - with catch_logs() as l: - run(2 * ms) - assert len(l) == 1, "expected a warning, got %d" % len(l) - assert l[0][1].endswith("synapses_dt_mismatch") - - run(0 * ms) - assert_allclose(mon[0].v[mon.t < 0.5 * ms], 0) - assert_allclose(mon[0].v[mon.t >= 0.5 * ms], 1) - assert_allclose(mon[1].v[mon.t < 1.5 * ms], 0) - assert_allclose(mon[1].v[mon.t >= 1.5 * ms], 1) - - -@pytest.mark.standalone_compatible -def test_transmission_boolean_variable(): - source = SpikeGeneratorGroup(4, [0, 1, 2, 3], [2, 1, 2, 1] * ms) - target = NeuronGroup(4, "v : 1") - syn = Synapses(source, target, "use : boolean (constant)", on_pre="v += int(use)") - syn.connect(j="i") - syn.use = "i<2" - mon = StateMonitor(target, "v", record=True, when="end") - run(2.5 * ms) - offset = schedule_propagation_offset() - assert_allclose(mon[0].v[mon.t < 2 * ms + offset], 0.0) - assert_allclose(mon[0].v[mon.t >= 2 * ms + offset], 1.0) - assert_allclose(mon[1].v[mon.t < 1 * ms + offset], 0.0) - assert_allclose(mon[1].v[mon.t >= 1 * ms + offset], 1.0) - assert_allclose(mon[2].v, 0.0) - assert_allclose(mon[3].v, 0.0) - - -@pytest.mark.codegen_independent -def test_clocks(): - """ - Make sure that a `Synapse` object uses the correct clocks. - """ - source_dt = 0.05 * ms - target_dt = 0.1 * ms - synapse_dt = 0.2 * ms - source = NeuronGroup(1, "v:1", dt=source_dt, threshold="False") - target = NeuronGroup(1, "v:1", dt=target_dt, threshold="False") - synapse = Synapses( - source, target, "w:1", on_pre="v+=1", on_post="v+=1", dt=synapse_dt - ) - synapse.connect() - - assert synapse.pre.clock is source.clock - assert synapse.post.clock is target.clock - assert synapse.pre._clock.dt == source_dt - assert synapse.post._clock.dt == target_dt - assert synapse._clock.dt == synapse_dt - - -def test_equations_with_clocks(): - """ - Make sure that dt of a `Synapse` object is correctly resolved. - """ - source_dt = 0.1 * ms - synapse_dt = 1 * ms - source_target = NeuronGroup(1, "v:1", dt=source_dt, threshold="False") - synapse = Synapses( - source_target, - source_target, - "dw/dt = 1/ms : 1 (clock-driven)", - dt=synapse_dt, - method="euler", - ) - synapse.connect() - synapse.w = 0 - run(1 * ms) - - assert synapse.w[0] == 1 - - -def test_changed_dt_spikes_in_queue(): - defaultclock.dt = 0.5 * ms - G1 = NeuronGroup(1, "v:1", threshold="v>1", reset="v=0") - G1.v = 1.1 - G2 = NeuronGroup(10, "v:1", threshold="v>1", reset="v=0") - S = Synapses(G1, G2, on_pre="v+=1.1") - S.connect(True) - S.delay = "j*ms" - mon = SpikeMonitor(G2) - net = Network(G1, G2, S, mon) - net.run(5 * ms) - defaultclock.dt = 1 * ms - net.run(3 * ms) - defaultclock.dt = 0.1 * ms - net.run(2 * ms) - # Spikes should have delays of 0, 1, 2, ... ms and always - # trigger a spike one dt later - expected = [ - 0.5, - 1.5, - 2.5, - 3.5, - 4.5, # dt=0.5ms - 6, - 7, - 8, # dt = 1ms - 8.1, - 9.1, # dt=0.1ms - ] * ms - assert_allclose(mon.t[:], expected) - - -@pytest.mark.codegen_independent -def test_no_synapses(): - # Synaptic pathway but no synapses - G1 = NeuronGroup(1, "", threshold="True") - G2 = NeuronGroup(1, "v:1") - S = Synapses(G1, G2, on_pre="v+=1") - net = Network(G1, G2, S) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, TypeError) - - -@pytest.mark.codegen_independent -def test_no_synapses_variable_write(): - # Synaptic pathway but no synapses - G1 = NeuronGroup(1, "", threshold="True") - G2 = NeuronGroup(1, "v:1") - S = Synapses(G1, G2, "w : 1", on_pre="v+=w") - # Setting synaptic variables before calling connect is not allowed - with pytest.raises(TypeError): - setattr(S, "w", 1) - with pytest.raises(TypeError): - setattr(S, "delay", 1 * ms) - - -@pytest.mark.standalone_compatible -def test_summed_variable(): - source = NeuronGroup(2, "v : volt", threshold="v>1*volt", reset="v=0*volt") - source.v = 1.1 * volt # will spike immediately - target = NeuronGroup(2, "v : volt") - S = Synapses( - source, - target, - """ - w : volt - x : volt - v_post = 2*x : volt (summed) - """, - on_pre="x+=w", - multisynaptic_index="k", - ) - S.connect("i==j", n=2) - S.w["k == 0"] = "i*volt" - S.w["k == 1"] = "(i + 0.5)*volt" - net = Network(source, target, S) - net.run(1 * ms) - - # v of the target should be the sum of the two weights - assert_allclose(target.v, np.array([1.0, 5.0]) * volt) - - -@pytest.mark.standalone_compatible -def test_summed_variable_pre_and_post(): - G1 = NeuronGroup( - 4, - """ - neuron_var : 1 - syn_sum : 1 - neuron_sum : 1 - """, - ) - G1.neuron_var = "i" - G2 = NeuronGroup( - 4, - """ - neuron_var : 1 - syn_sum : 1 - neuron_sum : 1 - """, - ) - G2.neuron_var = "i+4" - - synapses = Synapses( - G1, - G2, - """ - syn_var : 1 - neuron_sum_pre = neuron_var_post : 1 (summed) - syn_sum_pre = syn_var : 1 (summed) - neuron_sum_post = neuron_var_pre : 1 (summed) - syn_sum_post = syn_var : 1 (summed) - """, - ) - # The first three cells in G1 connect to the first cell in G2 - # The remaining three cells of G2 all connect to the last cell of G1 - synapses.connect(i=[0, 1, 2, 3, 3, 3], j=[0, 0, 0, 1, 2, 3]) - synapses.syn_var = [0, 1, 2, 3, 4, 5] - - run(defaultclock.dt) - assert_allclose(G1.syn_sum[:], [0, 1, 2, 12]) - assert_allclose(G1.neuron_sum[:], [4, 4, 4, 18]) - assert_allclose(G2.syn_sum[:], [3, 3, 4, 5]) - assert_allclose(G2.neuron_sum[:], [3, 3, 3, 3]) - - -@pytest.mark.standalone_compatible -def test_summed_variable_differing_group_size(): - G1 = NeuronGroup(2, "var : 1", name="G1") - G2 = NeuronGroup(10, "var : 1", name="G2") - G2.var[:5] = 1 - G2.var[5:] = 10 - syn1 = Synapses( - G1, - G2, - """ - syn_var : 1 - var_pre = syn_var + var_post : 1 (summed) - """, - ) - syn1.connect(i=0, j=[0, 1, 2, 3, 4]) - syn1.connect(i=1, j=[5, 6, 7, 8, 9]) - syn1.syn_var = np.arange(10) - # The same in the other direction - G3 = NeuronGroup(10, "var : 1", name="G3") - G4 = NeuronGroup(2, "var : 1", name="G4") - G3.var[:5] = 1 - G3.var[5:] = 10 - syn2 = Synapses( - G3, - G4, - """ - syn_var : 1 - var_post = syn_var + var_pre : 1 (summed) - """, - ) - syn2.connect(i=[0, 1, 2, 3, 4], j=0) - syn2.connect(i=[5, 6, 7, 8, 9], j=1) - syn2.syn_var = np.arange(10) - - run(defaultclock.dt) - - assert_allclose(G1.var[0], 5 * 1 + 0 + 1 + 2 + 3 + 4) - assert_allclose(G1.var[1], 5 * 10 + 5 + 6 + 7 + 8 + 9) - - assert_allclose(G4.var[0], 5 * 1 + 0 + 1 + 2 + 3 + 4) - assert_allclose(G4.var[1], 5 * 10 + 5 + 6 + 7 + 8 + 9) - - -def test_summed_variable_errors(): - G = NeuronGroup( - 10, - """ - dv/dt = -v / (10*ms) : volt - sub = 2*v : volt - p : volt - """, - threshold="False", - reset="", - ) - - # Using the (summed) flag for a differential equation or a parameter - with pytest.raises(ValueError): - Synapses(G, G, """dp_post/dt = -p_post / (10*ms) : volt (summed)""") - with pytest.raises(ValueError): - Synapses(G, G, """p_post : volt (summed)""") - - # Using the (summed) flag for a variable name without _pre or _post suffix - with pytest.raises(ValueError): - Synapses(G, G, """p = 3*volt : volt (summed)""") - # Using the name of a variable that does not exist - with pytest.raises(ValueError): - Synapses(G, G, """q_post = 3*volt : volt (summed)""") - - # Target equation is not a parameter - with pytest.raises(ValueError): - Synapses(G, G, """sub_post = 3*volt : volt (summed)""") - with pytest.raises(ValueError): - Synapses(G, G, """v_post = 3*volt : volt (summed)""") - - # Unit mismatch between synapses and target - with pytest.raises(DimensionMismatchError): - Synapses(G, G, """p_post = 3*second : second (summed)""") - - # Two summed variable equations targetting the same variable - with pytest.raises(ValueError): - Synapses( - G, - G, - """ - p_post = 3*volt : volt (summed) - p_pre = 3*volt : volt (summed) - """, - ) - - # Summed variable referring to an event-driven variable - with pytest.raises(EquationError) as ex: - Synapses( - G, - G, - """ - ds/dt = -s/(3*ms) : volt (event-driven) - p_post = s : volt (summed) - """, - on_pre="s += 1*mV", - ) - assert "'p_post'" in str(ex.value) and "'s'" in str(ex.value) - - # Indirect dependency - with pytest.raises(EquationError) as ex: - Synapses( - G, - G, - """ - ds/dt = -s/(3*ms) : volt (event-driven) - x = s : volt - y = x : volt - p_post = y : 1 (summed) - """, - on_pre="s += 1*mV", - ) - assert "'p_post'" in str(ex.value) and "'s'" in str(ex.value) - assert "'x'" in str(ex.value) and "'y'" in str(ex.value) - - with pytest.raises(BrianObjectException) as ex: - S = Synapses( - G, - G, - """ - y : siemens - p_post = y : volt (summed) - """, - ) - run(0 * ms) - - assert isinstance(ex.value.__cause__, DimensionMismatchError) - - -@pytest.mark.codegen_independent -def test_multiple_summed_variables(): - # See github issue #766 - source = NeuronGroup(1, "") - target = NeuronGroup(10, "v : 1") - syn1 = Synapses(source, target, "v_post = 1 : 1 (summed)") - syn1.connect() - syn2 = Synapses(source, target, "v_post = 1 : 1 (summed)") - syn2.connect() - net = Network(collect()) - with pytest.raises(NotImplementedError): - net.run(0 * ms) - - -@pytest.mark.standalone_compatible -def test_summed_variables_subgroups(): - source = NeuronGroup(1, "") - target = NeuronGroup(10, "v : 1") - subgroup1 = target[:6] - subgroup2 = target[6:] - syn1 = Synapses(source, subgroup1, "v_post = 1 : 1 (summed)") - syn1.connect(n=2) - syn2 = Synapses(source, subgroup2, "v_post = 1 : 1 (summed)") - syn2.connect() - run(defaultclock.dt) - assert_allclose(target.v[:6], 2 * np.ones(6)) - assert_allclose(target.v[6:], 1 * np.ones(4)) - - -@pytest.mark.codegen_independent -def test_summed_variables_overlapping_subgroups(): - # See github issue #766 - source = NeuronGroup(1, "") - target = NeuronGroup(10, "v : 1") - # overlapping subgroups - subgroup1 = target[:7] - subgroup2 = target[6:] - syn1 = Synapses(source, subgroup1, "v_post = 1 : 1 (summed)") - syn1.connect(n=2) - syn2 = Synapses(source, subgroup2, "v_post = 1 : 1 (summed)") - syn2.connect() - net = Network(collect()) - with pytest.raises(NotImplementedError): - net.run(0 * ms) - - -@pytest.mark.codegen_independent -def test_summed_variables_linked_variables(): - source = NeuronGroup(1, "") - target1 = NeuronGroup(10, "v : 1") - target2 = NeuronGroup(10, "v : 1 (linked)") - target2.v = linked_var(target1.v) - # Seemingly independent targets, but the variable is the same - syn1 = Synapses(source, target1, "v_post = 1 : 1 (summed)") - syn1.connect() - syn2 = Synapses(source, target2, "v_post = 1 : 1 (summed)") - syn2.connect() - net = Network(collect()) - with pytest.raises(NotImplementedError): - net.run(0 * ms) - - -def test_scalar_parameter_access(): - G = NeuronGroup( - 10, - """ - v : 1 - scalar : Hz (shared) - """, - threshold="False", - ) - S = Synapses( - G, - G, - """ - w : 1 - s : Hz (shared) - number : 1 (shared) - """, - on_pre="v+=w*number", - ) - S.connect() - - # Try setting a scalar variable - S.s = 100 * Hz - assert_allclose(S.s[:], 100 * Hz) - S.s[:] = 200 * Hz - assert_allclose(S.s[:], 200 * Hz) - S.s = "s - 50*Hz + number*Hz" - assert_allclose(S.s[:], 150 * Hz) - S.s[:] = "50*Hz" - assert_allclose(S.s[:], 50 * Hz) - - # Set a postsynaptic scalar variable - S.scalar_post = 100 * Hz - assert_allclose(G.scalar[:], 100 * Hz) - S.scalar_post[:] = 100 * Hz - assert_allclose(G.scalar[:], 100 * Hz) - - # Check the second method of accessing that works - assert_allclose(np.asanyarray(S.s), 50 * Hz) - - # Check error messages - with pytest.raises(IndexError): - S.s[0] - with pytest.raises(IndexError): - S.s[1] - with pytest.raises(IndexError): - S.s[0:1] - with pytest.raises(IndexError): - S.s["i>5"] - - with pytest.raises(ValueError): - S.s.set_item(slice(None), [0, 1] * Hz) - with pytest.raises(IndexError): - S.s.set_item(0, 100 * Hz) - with pytest.raises(IndexError): - S.s.set_item(1, 100 * Hz) - with pytest.raises(IndexError): - S.s.set_item("i>5", 100 * Hz) - - -def test_scalar_subexpression(): - G = NeuronGroup( - 10, - """ - v : 1 - number : 1 (shared) - """, - threshold="False", - ) - S = Synapses( - G, - G, - """ - s : 1 (shared) - sub = number_post + s : 1 (shared) - """, - on_pre="v+=s", - ) - S.connect() - S.s = 100 - G.number = 50 - assert S.sub[:] == 150 - - with pytest.raises(SyntaxError): - Synapses( - G, - G, - """ - s : 1 (shared) - sub = v_post + s : 1 (shared) - """, - on_pre="v+=s", - ) - - -@pytest.mark.standalone_compatible -def test_sim_with_scalar_variable(): - inp = SpikeGeneratorGroup(2, [0, 1], [0, 0] * ms) - out = NeuronGroup(2, "v : 1") - syn = Synapses( - inp, - out, - """ - w : 1 - s : 1 (shared) - """, - on_pre="v += s + w", - ) - syn.connect(j="i") - syn.w = [1, 2] - syn.s = 5 - run(2 * defaultclock.dt) - assert_allclose(out.v[:], [6, 7]) - - -@pytest.mark.standalone_compatible -def test_sim_with_scalar_subexpression(): - inp = SpikeGeneratorGroup(2, [0, 1], [0, 0] * ms) - out = NeuronGroup(2, "v : 1") - syn = Synapses( - inp, - out, - """ - w : 1 - s = 5 : 1 (shared) - """, - on_pre="v += s + w", - ) - syn.connect(j="i") - syn.w = [1, 2] - run(2 * defaultclock.dt) - assert_allclose(out.v[:], [6, 7]) - - -@pytest.mark.standalone_compatible -def test_sim_with_constant_subexpression(): - inp = SpikeGeneratorGroup(2, [0, 1], [0, 0] * ms) - out = NeuronGroup(2, "v : 1") - syn = Synapses( - inp, - out, - """ - w : 1 - s = 5 : 1 (constant over dt) - """, - on_pre="v += s + w", - ) - syn.connect(j="i") - syn.w = [1, 2] - run(2 * defaultclock.dt) - assert_allclose(out.v[:], [6, 7]) - - -@pytest.mark.standalone_compatible -def test_external_variables(): - # Make sure that external variables are correctly resolved - source = SpikeGeneratorGroup(1, [0], [0] * ms) - target = NeuronGroup(1, "v:1") - w_var = 1 - amplitude = 2 - syn = Synapses(source, target, "w=w_var : 1", on_pre="v+=amplitude*w") - syn.connect() - run(defaultclock.dt) - assert target.v[0] == 2 - - -@pytest.mark.standalone_compatible -def test_event_driven(): - # Fake example, where the synapse is actually not changing the state of the - # postsynaptic neuron, the pre- and post spiketrains are regular spike - # trains with different rates - pre = NeuronGroup( - 2, - """ - dv/dt = rate : 1 - rate : Hz - """, - threshold="v>1", - reset="v=0", - ) - pre.rate = [1000, 1500] * Hz - post = NeuronGroup( - 2, - """ - dv/dt = rate : 1 - rate : Hz - """, - threshold="v>1", - reset="v=0", - ) - post.rate = [1100, 1400] * Hz - # event-driven formulation - taupre = 20 * ms - taupost = taupre - gmax = 0.01 - dApre = 0.01 - dApost = -dApre * taupre / taupost * 1.05 - dApost *= gmax - dApre *= gmax - # event-driven - S1 = Synapses( - pre, - post, - """ - w : 1 - dApre/dt = -Apre/taupre : 1 (event-driven) - dApost/dt = -Apost/taupost : 1 (event-driven) - """, - on_pre=""" - Apre += dApre - w = clip(w+Apost, 0, gmax) - """, - on_post=""" - Apost += dApost - w = clip(w+Apre, 0, gmax) - """, - ) - S1.connect(j="i") - # not event-driven - S2 = Synapses( - pre, - post, - """ - w : 1 - Apre : 1 - Apost : 1 - lastupdate : second - """, - on_pre=""" - Apre=Apre*exp((lastupdate-t)/taupre)+dApre - Apost=Apost*exp((lastupdate-t)/taupost) - w = clip(w+Apost, 0, gmax) - lastupdate = t - """, - on_post=""" - Apre=Apre*exp((lastupdate-t)/taupre) - Apost=Apost*exp((lastupdate-t)/taupost) +dApost - w = clip(w+Apre, 0, gmax) - lastupdate = t - """, - ) - S2.connect(j="i") - S1.w = 0.5 * gmax - S2.w = 0.5 * gmax - run(25 * ms) - # The two formulations should yield identical results - assert_allclose(S1.w[:], S2.w[:]) - - -@pytest.mark.codegen_independent -def test_event_driven_dependency_checks(): - dummy = NeuronGroup(1, "", threshold="False", reset="") - - # Dependency on parameter - syn = Synapses( - dummy, - dummy, - """ - da/dt = (a-b) / (5*ms): 1 (event-driven) - b : 1""", - on_pre="b+=1", - ) - syn.connect() - - # Dependency on parameter via subexpression - syn2 = Synapses( - dummy, - dummy, - """ - da/dt = (a-b) / (5*ms): 1 (event-driven) - b = c : 1 - c : 1""", - on_pre="c+=1", - ) - syn2.connect() - run(0 * ms) - - -@pytest.mark.codegen_independent -def test_event_driven_dependency_error(): - stim = SpikeGeneratorGroup(1, [0], [0] * ms, period=5 * ms) - syn = Synapses( - stim, - stim, - """ - da/dt = -a / (5*ms) : 1 (event-driven) - db/dt = -b / (5*ms) : 1 (event-driven) - dc/dt = a*b / (5*ms) : 1 (event-driven)""", - on_pre="a+=1", - ) - syn.connect() - net = Network(collect()) - with pytest.raises(BrianObjectException) as exc: - net.run(0 * ms) - assert exc_isinstance(exc, UnsupportedEquationsException) - - -@pytest.mark.codegen_independent -def test_event_driven_dependency_error2(): - stim = SpikeGeneratorGroup(1, [0], [0] * ms, period=5 * ms) - tau = 5 * ms - with pytest.raises(EquationError) as exc: - syn = Synapses( - stim, - stim, - """ - da/dt = -a / (5*ms) : 1 (clock-driven) - db/dt = -b / (5*ms) : 1 (clock-driven) - dc/dt = a*b / (5*ms) : 1 (event-driven) - """, - on_pre="a+=1", - ) - assert "'c'" in str(exc.value) and ( - "'a'" in str(exc.value) or "'b'" in str(exc.value) - ) - - # Indirect dependency - with pytest.raises(EquationError) as exc: - syn = Synapses( - stim, - stim, - """ - da/dt = -a / (5*ms) : 1 (clock-driven) - b = a : 1 - dc/dt = b / (5*ms) : 1 (event-driven) - """, - on_pre="a+=1", - ) - assert ( - "'c'" in str(exc.value) and "'a'" in str(exc.value) and "'b'" in str(exc.value) - ) - - -@pytest.mark.codegen_independent -def test_event_driven_dependency_error3(): - P = NeuronGroup(10, "dv/dt = -v/(10*ms) : volt") - with pytest.raises(EquationError) as ex: - Synapses( - P, - P, - """ - ds/dt = -s/(3*ms) : 1 (event-driven) - df/dt = f*s/(5*ms) : 1 (clock-driven) - """, - on_pre="s += 1", - ) - assert "'s'" in str(ex.value) and "'f'" in str(ex.value) - - # Indirect dependency - with pytest.raises(EquationError) as ex: - Synapses( - P, - P, - """ - ds/dt = -s/(3*ms) : 1 (event-driven) - x = s : 1 - y = x : 1 - df/dt = f*y/(5*ms) : 1 (clock-driven) - """, - on_pre="s += 1", - ) - assert "'s'" in str(ex.value) and "'f'" in str(ex.value) - assert "'x'" in str(ex.value) and "'y'" in str(ex.value) - - -@pytest.mark.codegen_independent -def test_repr(): - G = NeuronGroup(1, "v: volt", threshold="False") - S = Synapses( - G, - G, - """ - w : 1 - dApre/dt = -Apre/taupre : 1 (event-driven) - dApost/dt = -Apost/taupost : 1 (event-driven) - """, - on_pre=""" - Apre += dApre - w = clip(w+Apost, 0, gmax) - """, - on_post=""" - Apost += dApost - w = clip(w+Apre, 0, gmax) - """, - ) - # Test that string/LaTeX representations do not raise errors - for func in [str, repr, sympy.latex]: - assert len(func(S.equations)) - - -@pytest.mark.codegen_independent -def test_pre_post_variables(): - G = NeuronGroup(10, "v : 1", threshold="False") - G2 = NeuronGroup( - 10, - """ - v : 1 - w : 1 - """, - threshold="False", - ) - S = Synapses(G, G2, "x : 1") - # Check for the most important variables - for var in [ - "v_pre", - "v", - "v_post", - "w", - "w_post", - "x", - "N_pre", - "N_post", - "N_incoming", - "N_outgoing", - "i", - "j", - "t", - "dt", - ]: - assert var in S.variables - # Check that postsynaptic variables without suffix refer to the correct - # variable - assert S.variables["v"] is S.variables["v_post"] - assert S.variables["w"] is S.variables["w_post"] - - # Check that internal pre-/post-synaptic variables are not accessible - assert "_spikespace_pre" not in S.variables - assert "_spikespace" not in S.variables - assert "_spikespace_post" not in S.variables - - -@pytest.mark.codegen_independent -def test_variables_by_owner(): - # Test the `variables_by_owner` convenience function - G = NeuronGroup(10, "v : 1") - G2 = NeuronGroup( - 10, - """ - v : 1 - w : 1 - """, - ) - S = Synapses(G, G2, "x : 1") - - # Check that the variables returned as owned by the pre/post groups are the - # variables stored in the respective groups. We only compare the `Variable` - # objects, as the names may be different (e.g. ``v_post`` vs. ``v``) - G_variables = { - key: value for key, value in G.variables.items() if value.owner.name == G.name - } # exclude dt - G2_variables = { - key: value for key, value in G2.variables.items() if value.owner.name == G2.name - } - assert set(G_variables.values()) == set(variables_by_owner(S.variables, G).values()) - assert set(G2_variables.values()) == set( - variables_by_owner(S.variables, G2).values() - ) - assert len(set(variables_by_owner(S.variables, S)) & set(G_variables.values())) == 0 - assert ( - len(set(variables_by_owner(S.variables, S)) & set(G2_variables.values())) == 0 - ) - # Just test a few examples for synaptic variables - assert all( - varname in variables_by_owner(S.variables, S) - for varname in ["x", "N", "N_incoming", "N_outgoing"] - ) - - -@pytest.mark.codegen_independent -def check_permutation_code(code): - from collections import defaultdict - - vars = get_identifiers(code) - indices = defaultdict(lambda: "_idx") - for var in vars: - if var.endswith("_syn"): - indices[var] = "_idx" - elif var.endswith("_pre"): - indices[var] = "_presynaptic_idx" - elif var.endswith("_post"): - indices[var] = "_postsynaptic_idx" - elif var.endswith("_const"): - indices[var] = "0" - variables = dict() - variables.update(DEFAULT_FUNCTIONS) - for var in indices: - if var.endswith("_const"): - variables[var] = Constant(var, 42, owner=device) - else: - variables[var] = ArrayVariable(var, None, 10, device) - variables["_presynaptic_idx"] = ArrayVariable(var, None, 10, device) - variables["_postsynaptic_idx"] = ArrayVariable(var, None, 10, device) - scalar_statements, vector_statements = make_statements(code, variables, float64) - check_for_order_independence(vector_statements, variables, indices) - - -def numerically_check_permutation_code(code): - # numerically checks that a code block used in the test below is permutation-independent by creating a - # presynaptic and postsynaptic group of 3 neurons each, and a full connectivity matrix between them, then - # repeatedly filling in random values for each of the variables, and checking for several random shuffles of - # the synapse order that the result doesn't depend on it. This is a sort of test of the test itself, to make - # sure we didn't accidentally assign a good/bad example to the wrong class. - code = deindent(code) - from collections import defaultdict - - vars = get_identifiers(code) - indices = defaultdict(lambda: "_idx") - vals = {} - for var in vars: - if var.endswith("_syn"): - indices[var] = "_idx" - vals[var] = zeros(9) - elif var.endswith("_pre"): - indices[var] = "_presynaptic_idx" - vals[var] = zeros(3) - elif var.endswith("_post"): - indices[var] = "_postsynaptic_idx" - vals[var] = zeros(3) - elif var.endswith("_shared"): - indices[var] = "0" - vals[var] = zeros(1) - elif var.endswith("_const"): - indices[var] = "0" - vals[var] = 42 - subs = { - var: var + "[" + idx + "]" - for var, idx in indices.items() - if not var.endswith("_const") - } - code = word_substitute(code, subs) - code = f""" -from numpy import * -from numpy.random import rand, randn -for _idx in shuffled_indices: - _presynaptic_idx = presyn[_idx] - _postsynaptic_idx = postsyn[_idx] -{indent(code)} - """ - ns = vals.copy() - ns["shuffled_indices"] = arange(9) - ns["presyn"] = arange(9) % 3 - ns["postsyn"] = arange(9) / 3 - for _ in range(10): - origvals = {} - for k, v in vals.items(): - if not k.endswith("_const"): - v[:] = randn(len(v)) - origvals[k] = v.copy() - exec(code, ns) - endvals = {} - for k, v in vals.items(): - endvals[k] = copy(v) - for _ in range(10): - for k, v in vals.items(): - if not k.endswith("_const"): - v[:] = origvals[k] - shuffle(ns["shuffled_indices"]) - exec(code, ns) - for k, v in vals.items(): - try: - assert_allclose(v, endvals[k]) - except AssertionError: - raise OrderDependenceError() - - -SANITY_CHECK_PERMUTATION_ANALYSIS_EXAMPLE = False - -permutation_analysis_good_examples = [ - "v_post += w_syn", - "v_post *= w_syn", - "v_post = v_post + w_syn", - "v_post = v_post * w_syn", - "v_post = w_syn * v_post", - "v_post += 1", - "v_post = 1", - "v_post = c_const", - "v_post = x_shared", - "v_post += v_post # NOT_UFUNC_AT_VECTORISABLE", - "v_post += c_const", - "v_post += x_shared", - #'v_post += w_syn*v_post', # this is a hard one (it is good for w*v but bad for w+v) - "v_post += sin(-v_post) # NOT_UFUNC_AT_VECTORISABLE", - "v_post += u_post", - "v_post += w_syn*v_pre", - "v_post += sin(-v_post) # NOT_UFUNC_AT_VECTORISABLE", - "v_post -= sin(v_post) # NOT_UFUNC_AT_VECTORISABLE", - "v_post += v_pre", - "v_pre += v_post", - "v_pre += c_const", - "v_pre += x_shared", - "w_syn = v_pre", - "w_syn = a_syn", - "w_syn += a_syn", - "w_syn *= a_syn", - "w_syn -= a_syn", - "w_syn /= a_syn", - "w_syn += 1", - "w_syn += c_const", - "w_syn += x_shared", - "w_syn *= 2", - "w_syn *= c_const", - "w_syn *= x_shared", - """ - w_syn = a_syn - a_syn += 1 - """, - """ - w_syn = a_syn - a_syn += c_const - """, - """ - w_syn = a_syn - a_syn += x_shared - """, - "v_post *= 2", - "v_post *= w_syn", - """ - v_pre = 0 - w_syn = v_pre - """, - """ - v_pre = c_const - w_syn = v_pre - """, - """ - v_pre = x_shared - w_syn = v_pre - """, - """ - ge_syn += w_syn - Apre_syn += 3 - w_syn = clip(w_syn + Apost_syn, 0, 10) - """, - """ - ge_syn += w_syn - Apre_syn += c_const - w_syn = clip(w_syn + Apost_syn, 0, 10) - """, - """ - ge_syn += w_syn - Apre_syn += x_shared - w_syn = clip(w_syn + Apost_syn, 0, 10) - """, - """ - a_syn = v_pre - v_post += a_syn - """, - """ - v_post += v_post # NOT_UFUNC_AT_VECTORISABLE - v_post += v_post - """, - """ - v_post += 1 - x = v_post - """, -] - -permutation_analysis_bad_examples = [ - "v_pre = w_syn", - "v_post = v_pre", - "v_post = w_syn", - "v_post += w_syn+v_post", - "v_post += rand()", # rand() has state, and therefore this is order dependent - """ - a_syn = v_post - v_post += w_syn - """, - """ - x = w_syn - v_pre = x - """, - """ - x = v_pre - v_post = x - """, - """ - v_post += v_pre - v_pre += v_post - """, - """ - b_syn = v_post - v_post += a_syn - """, - """ - v_post += w_syn - u_post += v_post - """, - """ - v_post += 1 - w_syn = v_post - """, -] - - -@pytest.mark.codegen_independent -def test_permutation_analysis(): - # Examples that should work - for example in permutation_analysis_good_examples: - if SANITY_CHECK_PERMUTATION_ANALYSIS_EXAMPLE: - try: - numerically_check_permutation_code(example) - except OrderDependenceError: - raise AssertionError( - "Test unexpectedly raised a numerical " - "OrderDependenceError on these " - "statements:\n" + example - ) - try: - check_permutation_code(example) - except OrderDependenceError: - raise AssertionError( - "Test unexpectedly raised an " - "OrderDependenceError on these " - "statements:\n" + example - ) - - for example in permutation_analysis_bad_examples: - if SANITY_CHECK_PERMUTATION_ANALYSIS_EXAMPLE: - try: - with pytest.raises(OrderDependenceError): - numerically_check_permutation_code(example) - except AssertionError: - raise AssertionError( - "Order dependence not raised numerically for example: " + example - ) - try: - with pytest.raises(OrderDependenceError): - check_permutation_code(example) - except AssertionError: - raise AssertionError("Order dependence not raised for example: " + example) - - -@pytest.mark.standalone_compatible -def test_vectorisation(): - source = NeuronGroup(10, "v : 1", threshold="v>1") - target = NeuronGroup( - 10, - """ - x : 1 - y : 1 - """, - ) - syn = Synapses( - source, - target, - "w_syn : 1", - on_pre=""" - v_pre += w_syn - x_post = y_post - """, - ) - syn.connect() - syn.w_syn = 1 - source.v["i<5"] = 2 - target.y = "i" - run(defaultclock.dt) - assert_allclose(source.v[:5], 12) - assert_allclose(source.v[5:], 0) - assert_allclose(target.x[:], target.y[:]) - - -@pytest.mark.standalone_compatible -def test_vectorisation_STDP_like(): - # Test the use of pre- and post-synaptic traces that are stored in the - # pre/post group instead of in the synapses - w_max = 10 - neurons = NeuronGroup( - 6, - """ - dv/dt = rate : 1 - ge : 1 - rate : Hz - dA/dt = -A/(1*ms) : 1 - """, - threshold="v>1", - reset="v=0", - ) - # Note that the synapse does not actually increase the target v, we want - # to have simple control about when neurons spike. Also, we separate the - # "depression" and "facilitation" completely. The example also uses - # subgroups, which should complicate things further. - # This test should try to capture the spirit of indexing in such a use case, - # it simply compares the results to fixed pre-calculated values - syn = Synapses( - neurons[:3], - neurons[3:], - """ - w_dep : 1 - w_fac : 1 - """, - on_pre=""" - ge_post += w_dep - w_fac - A_pre += 1 - w_dep = clip(w_dep + A_post, 0, w_max) - """, - on_post=""" - A_post += 1 - w_fac = clip(w_fac + A_pre, 0, w_max) - """, - ) - syn.connect() - neurons.rate = 1000 * Hz - neurons.v = "abs(3-i)*0.1 + 0.7" - run(2 * ms) - # Make sure that this test is invariant to synapse order - indices = np.argsort( - np.array(list(zip(syn.i[:], syn.j[:])), dtype=[("i", "=j") - summed_conn.x = "i" - run(defaultclock.dt) - assert_array_equal(conn.w[:], [10, 10, 9, 7, 4]) - - -@pytest.mark.codegen_independent -def test_synapse_generator_syntax(): - parsed = parse_synapse_generator("k for k in sample(1, N, p=p) if abs(i-k)<10") - assert parsed["element"] == "k" - assert parsed["inner_variable"] == "k" - assert parsed["iterator_func"] == "sample" - assert parsed["iterator_kwds"]["low"] == "1" - assert parsed["iterator_kwds"]["high"] == "N" - assert parsed["iterator_kwds"]["step"] == "1" - assert parsed["iterator_kwds"]["p"] == "p" - assert parsed["iterator_kwds"]["size"] is None - assert parsed["iterator_kwds"]["sample_size"] == "random" - assert parsed["if_expression"] == "abs(i - k) < 10" - parsed = parse_synapse_generator("k for k in sample(N, size=5) if abs(i-k)<10") - assert parsed["element"] == "k" - assert parsed["inner_variable"] == "k" - assert parsed["iterator_func"] == "sample" - assert parsed["iterator_kwds"]["low"] == "0" - assert parsed["iterator_kwds"]["high"] == "N" - assert parsed["iterator_kwds"]["step"] == "1" - assert parsed["iterator_kwds"]["p"] is None - assert parsed["iterator_kwds"]["size"] == "5" - assert parsed["iterator_kwds"]["sample_size"] == "fixed" - assert parsed["if_expression"] == "abs(i - k) < 10" - parsed = parse_synapse_generator("k+1 for k in range(i-100, i+100, 2)") - assert parsed["element"] == "k + 1" - assert parsed["inner_variable"] == "k" - assert parsed["iterator_func"] == "range" - assert parsed["iterator_kwds"]["low"] == "i - 100" - assert parsed["iterator_kwds"]["high"] == "i + 100" - assert parsed["iterator_kwds"]["step"] == "2" - assert parsed["if_expression"] == "True" - with pytest.raises(SyntaxError): - parse_synapse_generator("mad rubbish") - with pytest.raises(SyntaxError): - parse_synapse_generator("k+1") - with pytest.raises(SyntaxError): - parse_synapse_generator("k for k in range()") - with pytest.raises(SyntaxError): - parse_synapse_generator("k for k in range(1,2,3,4)") - with pytest.raises(SyntaxError): - parse_synapse_generator("k for k in range(1,2,3) if ") - with pytest.raises(SyntaxError): - parse_synapse_generator("k[1:3] for k in range(1,2,3)") - with pytest.raises(SyntaxError): - parse_synapse_generator("k for k in x") - with pytest.raises(SyntaxError): - parse_synapse_generator("k for k in x[1:5]") - with pytest.raises(SyntaxError): - parse_synapse_generator("k for k in sample()") - with pytest.raises(SyntaxError): - parse_synapse_generator("k for k in sample(N, p=0.1, size=5)") - with pytest.raises(SyntaxError): - parse_synapse_generator("k for k in sample(N, q=0.1)") - - -def test_synapse_generator_out_of_range(): - G = NeuronGroup(16, "v : 1") - G2 = NeuronGroup(4, "v : 1") - G2.v = "16 + i" - - S1 = Synapses(G, G2, "") - with pytest.raises(BrianObjectException) as exc: - S1.connect(j="k for k in range(0, N_post*2)") - exc.errisinstance(IndexError) - - # This should be fine - S2 = Synapses(G, G, "") - S2.connect(j="i+k for k in range(0, 5) if i <= N_post-5") - expected = np.zeros((len(G), len(G))) - expected[np.triu_indices(len(G))] = 1 - expected[np.triu_indices(len(G), 5)] = 0 - expected[len(G) - 4 :, :] = 0 - _compare(S2, expected) - - # This should be fine (see #1037) - S2 = Synapses(G, G, "") - S2.connect(j="i+k for k in range(0, 5) if i <= N_post-5 and rand() <= 1") - _compare(S2, expected) - - # This could in principle be fine, but we cannot test the condition without - # accessing the post-synaptic variable outside of its range. By analyzing - # the post-synaptic condition, we could find out that the value of this - # variable is actually irrelevant, but that makes things too complicated. - S3 = Synapses(G, G, "") - with pytest.raises(BrianObjectException) as exc: - S3.connect(j="i+k for k in range(0, 5) if i <= N_post-5 and v_post >= 0") - assert exc_isinstance(exc, IndexError) - assert "outside allowed range" in str(exc.value.__cause__) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_deterministic(): - # Same as "test_connection_string_deterministic" but using the generator - # syntax - G = NeuronGroup(16, "v : 1") - G.v = "i" - G2 = NeuronGroup(4, "v : 1") - G2.v = "16 + i" - - # Full connection - expected_full = np.ones((len(G), len(G2))) - - S1 = Synapses(G, G2) - S1.connect(j="k for k in range(N_post)") - - # Full connection without self-connections - expected_no_self = np.ones((len(G), len(G))) - np.eye(len(G)) - - S2 = Synapses(G, G) - S2.connect(j="k for k in range(N_post) if k != i") - - S3 = Synapses(G, G) - # slightly confusing with j on the RHS, but it should work... - S3.connect(j="k for k in range(N_post) if j != i") - - S4 = Synapses(G, G) - S4.connect(j="k for k in range(N_post) if v_post != v_pre") - - # One-to-one connectivity - expected_one_to_one = np.eye(len(G)) - - S5 = Synapses(G, G) - S5.connect(j="k for k in range(N_post) if k == i") # inefficient - - S6 = Synapses(G, G) - # slightly confusing with j on the RHS, but it should work... - S6.connect(j="k for k in range(N_post) if j == i") # inefficient - - S7 = Synapses(G, G) - S7.connect(j="k for k in range(N_post) if v_pre == v_post") # inefficient - - S8 = Synapses(G, G) - S8.connect(j="i for _ in range(1)") # efficient - - S9 = Synapses(G, G) - S9.connect(j="i") # short form of the above - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - _compare(S1, expected_full) - _compare(S2, expected_no_self) - _compare(S3, expected_no_self) - _compare(S4, expected_no_self) - _compare(S5, expected_one_to_one) - _compare(S6, expected_one_to_one) - _compare(S7, expected_one_to_one) - _compare(S8, expected_one_to_one) - _compare(S9, expected_one_to_one) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_deterministic_over_postsynaptic(): - # Same as "test_connection_string_deterministic" but using the generator - # syntax and iterating over post-synaptic variables - G = NeuronGroup(16, "v : 1") - G.v = "i" - G2 = NeuronGroup(4, "v : 1") - G2.v = "16 + i" - - # Full connection - expected_full = np.ones((len(G), len(G2))) - - S1 = Synapses(G, G2) - S1.connect(i="k for k in range(N_pre)") - - # Full connection without self-connections - expected_no_self = np.ones((len(G), len(G))) - np.eye(len(G)) - - S2 = Synapses(G, G) - S2.connect(i="k for k in range(N_pre) if k != j") - - S3 = Synapses(G, G) - # slightly confusing with i on the RHS, but it should work... - S3.connect(i="k for k in range(N_pre) if i != j") - - S4 = Synapses(G, G) - S4.connect(j="k for k in range(N_pre) if v_pre != v_post") - - # One-to-one connectivity - expected_one_to_one = np.eye(len(G)) - - S5 = Synapses(G, G) - S5.connect(i="k for k in range(N_pre) if k == j") # inefficient - - S6 = Synapses(G, G) - # slightly confusing with j on the RHS, but it should work... - S6.connect(i="k for k in range(N_pre) if i == j") # inefficient - - S7 = Synapses(G, G) - S7.connect(i="k for k in range(N_pre) if v_pre == v_post") # inefficient - - S8 = Synapses(G, G) - S8.connect(i="j for _ in range(1)") # efficient - - S9 = Synapses(G, G) - S9.connect(i="j") # short form of the above - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - _compare(S1, expected_full) - _compare(S2, expected_no_self) - _compare(S3, expected_no_self) - _compare(S4, expected_no_self) - _compare(S5, expected_one_to_one) - _compare(S6, expected_one_to_one) - _compare(S7, expected_one_to_one) - _compare(S8, expected_one_to_one) - _compare(S9, expected_one_to_one) - - -@pytest.mark.standalone_compatible -@pytest.mark.long -def test_synapse_generator_deterministic_2(): - # Same as "test_connection_string_deterministic" but using the generator - # syntax - G = NeuronGroup(16, "") - G2 = NeuronGroup(4, "") - # A few more tests of deterministic connections where the generator syntax - # is particularly useful - - # Ring structure - S10 = Synapses(G, G) - S10.connect(j="(i + (-1)**k) % N_post for k in range(2)") - expected_ring = np.zeros((len(G), len(G)), dtype=np.int32) - expected_ring[np.arange(15), np.arange(15) + 1] = 1 # Next cell - expected_ring[np.arange(1, 16), np.arange(15)] = 1 # Previous cell - expected_ring[[0, 15], [15, 0]] = 1 # wrap around the ring - - # Diverging connection pattern - S11 = Synapses(G2, G) - S11.connect(j="i*4 + k for k in range(4)") - expected_diverging = np.zeros((len(G2), len(G)), dtype=np.int32) - for source in range(4): - expected_diverging[source, np.arange(4) + source * 4] = 1 - - # Diverging connection pattern within population (no self-connections) - S11b = Synapses(G2, G2) - S11b.connect(j="k for k in range(i-3, i+4) if i!=k", skip_if_invalid=True) - expected_diverging_b = np.zeros((len(G2), len(G2)), dtype=np.int32) - for source in range(len(G2)): - expected_diverging_b[ - source, np.clip(np.arange(-3, 4) + source, 0, len(G2) - 1) - ] = 1 - expected_diverging_b[source, source] = 0 - - # Converging connection pattern - S12 = Synapses(G, G2) - S12.connect(j="int(i/4)") - expected_converging = np.zeros((len(G), len(G2)), dtype=np.int32) - for target in range(4): - expected_converging[np.arange(4) + target * 4, target] = 1 - - # skip if invalid - S13 = Synapses(G2, G2) - S13.connect(j="i+(-1)**k for k in range(2)", skip_if_invalid=True) - expected_offdiagonal = np.zeros((len(G2), len(G2)), dtype=np.int32) - expected_offdiagonal[np.arange(len(G2) - 1), np.arange(len(G2) - 1) + 1] = 1 - expected_offdiagonal[np.arange(len(G2) - 1) + 1, np.arange(len(G2) - 1)] = 1 - - # Converging connection pattern with restriction - S14 = Synapses(G, G2) - S14.connect(j="int(i/4) if i % 2 == 0") - expected_converging_restricted = np.zeros((len(G), len(G2)), dtype=np.int32) - for target in range(4): - expected_converging_restricted[np.arange(4, step=2) + target * 4, target] = 1 - - # Connecting to post indices >= source index - expected_diagonal = np.zeros((len(G), len(G)), dtype=np.int32) - expected_diagonal[np.triu_indices(len(G))] = 1 - S15 = Synapses(G, G) - S15.connect(j="i + k for k in range(0, N_post-i)") - - S15b = Synapses(G, G) - S15b.connect(j="i + k for k in range(0, N_post)", skip_if_invalid=True) - - S15c = Synapses(G, G) - S15c.connect(j="i + k for k in range(0, N_post) if j < N_post") - - S15d = Synapses(G, G) - S15d.connect(j="i + k for k in range(0, N_post) if i + k < N_post") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - _compare(S10, expected_ring) - _compare(S11, expected_diverging) - _compare(S11b, expected_diverging_b) - _compare(S12, expected_converging) - _compare(S13, expected_offdiagonal) - _compare(S14, expected_converging_restricted) - _compare(S15, expected_diagonal) - _compare(S15b, expected_diagonal) - _compare(S15c, expected_diagonal) - _compare(S15d, expected_diagonal) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_random(): - # The same tests as test_connection_random_without_condition, but using - # the generator syntax - G = NeuronGroup(4, "x : integer") - G.x = "i" - G2 = NeuronGroup(7, "") - - S1 = Synapses(G, G2) - S1.connect(j="k for k in sample(N_post, p=0)") - - S2 = Synapses(G, G2) - S2.connect(j="k for k in sample(N_post, p=1)") - - # Just make sure using values between 0 and 1 work in principle - S3 = Synapses(G, G2) - S3.connect(j="k for k in sample(N_post, p=0.3)") - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S4 = Synapses(G, G2) - S4.connect(j="k for k in sample(N_post, p=int(x_pre==2)*1.0)") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - _compare(S2, np.ones((len(G), len(G2)))) - assert 0 <= len(S3) <= len(G) * len(G2) - assert len(S4) == 7 - assert_equal(S4.i, np.ones(7) * 2) - assert_equal(S4.j, np.arange(7)) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_random_over_postsynaptic(): - # The same tests as test_connection_random_without_condition, but using - # the generator syntax and iterating over post-synaptic neurons - G = NeuronGroup(4, "") - G2 = NeuronGroup(7, "y : 1") - G2.y = "i" - - S1 = Synapses(G, G2) - S1.connect(i="k for k in sample(N_pre, p=0)") - - S2 = Synapses(G, G2) - S2.connect(i="k for k in sample(N_pre, p=1)") - - # Just make sure using values between 0 and 1 work in principle - S3 = Synapses(G, G2) - S3.connect(i="k for k in sample(N_pre, p=0.3)") - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S4 = Synapses(G, G2) - S4.connect(i="k for k in sample(N_pre, p=int(y_post==2)*1.0)") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - _compare(S2, np.ones((len(G), len(G2)))) - assert 0 <= len(S3) <= len(G) * len(G2) - assert len(S4) == 4 - assert_equal(S4.i, np.arange(4)) - assert_equal(S4.j, np.ones(4) * 2) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_random_positive_steps(): - # Test generator with sampling from stepped ranges (e.g. all even numbers) - G = NeuronGroup(4, "x : integer") - G.x = "i" - G2 = NeuronGroup(7, "") - - S1 = Synapses(G, G2) - S1.connect(j="k for k in sample(2, N_post, 2, p=0)") - - S2 = Synapses(G, G2) - S2.connect(j="k for k in sample(2, N_post, 2, p=1)") - - # Just make sure using values between 0 and 1 work in principle (note that - # 0.25 is the cutoff between the general method and the "jump method", so - # we test a value above and below - S3 = Synapses(G, G2) - S3.connect(j="k for k in sample(2, N_post, 2, p=0.2)") - - S3b = Synapses(G, G2) - S3b.connect(j="k for k in sample(2, N_post, 2, p=0.3)") - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S4 = Synapses(G, G2) - S4.connect(j="k for k in sample(2, N_post, 2, p=int(x_pre==2)*1.0)") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - S2_comp = np.zeros((len(G), len(G2))) - S2_comp[:, 2::2] = 1 - _compare(S2, S2_comp) - assert 0 <= len(S3) <= len(G) * 3 - assert all(S3.j[:] % 2 == 0) - assert all(S3.j >= 2) - assert 0 <= len(S3b) <= len(G) * 3 - assert all(S3b.j[:] % 2 == 0) - assert all(S3b.j >= 2) - assert len(S4) == 3 - assert_equal(S4.i, np.ones(3) * 2) - assert_equal(S4.j, np.arange(2, 7, 2)) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_random_negative_steps(): - # Test generator with sampling from stepped ranges (e.g. all even numbers) - # going backwards - G = NeuronGroup(4, "x : integer") - G.x = "i" - G2 = NeuronGroup(7, "") - - S1 = Synapses(G, G2) - S1.connect(j="k for k in sample(N_post-1, 0, -2, p=0)") - - S2 = Synapses(G, G2) - S2.connect(j="k for k in sample(N_post-1, 0, -2, p=1)") - - # Just make sure using values between 0 and 1 work in principle (note that - # 0.25 is the cutoff between the general method and the "jump method", so - # we test a value above and below - S3 = Synapses(G, G2) - S3.connect(j="k for k in sample(N_post-1, 0, -2, p=0.2)") - - S3b = Synapses(G, G2) - S3b.connect(j="k for k in sample(N_post-1, 0, -2, p=0.3)") - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S4 = Synapses(G, G2) - S4.connect(j="k for k in sample(N_post-1, 0, -2, p=int(x_pre==2)*1.0)") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - S2_comp = np.zeros((len(G), len(G2))) - S2_comp[:, 2::2] = 1 - _compare(S2, S2_comp) - assert 0 <= len(S3) <= len(G) * 3 - assert all(S3.j[:] % 2 == 0) - assert all(S3.j >= 2) - assert 0 <= len(S3b) <= len(G) * 3 - assert all(S3b.j[:] % 2 == 0) - assert all(S3b.j >= 2) - assert len(S4) == 3 - assert_array_equal(S4.i, np.ones(3) * 2) - assert_array_equal(S4.j, [6, 4, 2]) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_fixed_random(): - # Random samples with fixed size - G = NeuronGroup(4, "x : integer") - G.x = "i" - G2 = NeuronGroup(7, "") - - S1 = Synapses(G, G2) - S1.connect(j="k for k in sample(N_post, size=0)") - - S2 = Synapses(G, G2) - S2.connect(j="k for k in sample(N_post, size=N_post)") - - S3 = Synapses(G, G2) - S3.connect(j="k for k in sample(N_post, size=3)") - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S4 = Synapses(G, G2) - S4.connect(j="k for k in sample(N_post, size=int(x_pre==2)*N_post)") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - _compare(S2, np.ones((len(G), len(G2)))) - # Each neuron should have 3 outgoing connections - assert_array_equal(S3.N_outgoing_pre, np.ones(4) * 3) - # Synapses should be sorted and unique - for source_idx in range(4): - assert len(set(S3.j[source_idx, :])) == 3 - assert all(S3.j[source_idx, :] == sorted(S3.j[source_idx, :])) - assert len(S4) == 7 - assert_equal(S4.i, np.ones(7) * 2) - assert_equal(S4.j, np.arange(7)) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_fixed_random_over_postsynaptic(): - # Random samples with fixed size, iterating over post-synaptic neurons - G = NeuronGroup(4, "") - G2 = NeuronGroup(7, "y : integer") - G2.y = "i" - - S1 = Synapses(G, G2) - S1.connect(i="k for k in sample(N_pre, size=0)") - - S2 = Synapses(G, G2) - S2.connect(i="k for k in sample(N_pre, size=N_pre)") - - S3 = Synapses(G, G2) - S3.connect(i="k for k in sample(N_pre, size=3)") - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S4 = Synapses(G, G2) - S4.connect(i="k for k in sample(N_pre, size=int(y_post==2)*N_pre)") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - _compare(S2, np.ones((len(G), len(G2)))) - # Each neuron should have 3 incoming connections - assert_array_equal(S3.N_incoming_post, np.ones(7) * 3) - # Synapses should be sorted and unique - for target_idx in range(7): - assert len(set(S3.i[:, target_idx])) == 3 - assert all(S3.i[:, target_idx] == sorted(S3.i[:, target_idx])) - assert len(S4) == 4 - assert_equal(S4.j, np.ones(4) * 2) - assert_equal(S4.i, np.arange(4)) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_fixed_random_positive_steps(): - # Test generator with fixed-size sampling from stepped ranges (e.g. all - # even numbers) - G = NeuronGroup(4, "x : integer") - G.x = "i" - G2 = NeuronGroup(7, "") - - S1 = Synapses(G, G2) - S1.connect(j="k for k in sample(2, N_post, 2, size=0)") - - S2 = Synapses(G, G2) - S2.connect(j="k for k in sample(2, N_post, 2, size=3)") - - # Just make sure using values between 0 and 1 work in principle - S3 = Synapses(G, G2) - S3.connect(j="k for k in sample(2, N_post, 2, size=2)") - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S4 = Synapses(G, G2) - S4.connect(j="k for k in sample(2, N_post, 2, size=int(x_pre==2)*3)") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - S2_comp = np.zeros((len(G), len(G2))) - S2_comp[:, 2::2] = 1 - _compare(S2, S2_comp) - assert len(S3) == len(G) * 2 - assert all(S3.N_outgoing_pre == 2) - assert all(S3.j[:] % 2 == 0) - assert all(S3.j >= 2) - assert all([len(S3.j[x, :]) == len(set(S3.j[x, :])) for x in range(len(G))]) - assert len(S4) == 3 - assert_equal(S4.i, np.ones(3) * 2) - assert_equal(S4.j, np.arange(2, 7, 2)) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_fixed_random_negative_steps(): - # Test generator with fixed-size sampling from stepped ranges (e.g. all - # even numbers) going backwards - G = NeuronGroup(4, "x : integer") - G.x = "i" - G2 = NeuronGroup(7, "") - - S1 = Synapses(G, G2) - S1.connect(j="k for k in sample(N_post-1, 0, -2, size=0)") - - S2 = Synapses(G, G2) - S2.connect(j="k for k in sample(N_post-1, 0, -2, size=3)") - - # Just make sure using intermediate values between 0 and 1 work in principle - S3 = Synapses(G, G2) - S3.connect(j="k for k in sample(N_post-1, 0, -2, size=2)") - - # Use pre-/post-synaptic variables for "stochastic" connections that are - # actually deterministic - S4 = Synapses(G, G2, "w:1") - S4.connect(j="k for k in sample(N_post-1, 0, -2, size=int(x_pre==2)*3)") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - S2_comp = np.zeros((len(G), len(G2))) - S2_comp[:, 2::2] = 1 - _compare(S2, S2_comp) - assert len(S3) == len(G) * 2 - assert all(S3.N_outgoing_pre == 2) - assert all(S3.j[:] % 2 == 0) - assert all(S3.j >= 2) - assert all([len(S3.j[x, :]) == len(set(S3.j[x, :])) for x in range(len(G))]) - assert len(S4) == 3 - assert_equal(S4.i, np.ones(3) * 2) - assert_equal(S4.j, np.arange(6, 0, -2)) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_fixed_random_error1(): - G = NeuronGroup(5, "") - G2 = NeuronGroup(7, "") - S = Synapses(G, G2) - with pytest.raises((BrianObjectException, IndexError, RuntimeError)): - # Won't work for i=4 - S.connect(j="k for k in sample(N_post, size=i+4)") - run(0 * ms) # for standalone - - -@pytest.mark.standalone_compatible -def test_synapse_generator_fixed_random_error2(): - G = NeuronGroup(5, "") - G2 = NeuronGroup(7, "") - S = Synapses(G, G2) - with pytest.raises((BrianObjectException, IndexError, RuntimeError)): - # Won't work for i=4 - S.connect(j="k for k in sample(N_post, size=3-i)") - run(0 * ms) # for standalone - - -@pytest.mark.standalone_compatible -def test_synapse_generator_fixed_random_skip_if_invalid(): - G = NeuronGroup(5, "") - G2 = NeuronGroup(7, "") - S1 = Synapses(G, G2) - S2 = Synapses(G, G2) - # > N_post for i=4 - S1.connect(j="k for k in sample(N_post, size=i+4)", skip_if_invalid=True) - # < 0 for i=4 - S2.connect(j="k for k in sample(N_post, size=3-i)", skip_if_invalid=True) - run(0 * ms) # for standalone - assert_array_equal(S1.N_outgoing_pre, [4, 5, 6, 7, 7]) - assert_array_equal(S2.N_outgoing_pre, [3, 2, 1, 0, 0]) - - -@pytest.mark.standalone_compatible -def test_synapse_generator_random_with_condition(): - G = NeuronGroup(4, "") - - S1 = Synapses(G, G) - S1.connect(j="k for k in sample(N_post, p=0) if i != k") - - S2 = Synapses(G, G) - S2.connect(j="k for k in sample(N_post, p=1) if i != k") - expected2 = np.ones((len(G), len(G))) - np.eye(len(G)) - - S3 = Synapses(G, G) - S3.connect(j="k for k in sample(N_post, p=0) if i >= 2") - - S4 = Synapses(G, G) - S4.connect(j="k for k in sample(N_post, p=1.0) if i >= 2") - expected4 = np.zeros((len(G), len(G))) - expected4[2, :] = 1 - expected4[3, :] = 1 - - S5 = Synapses(G, G) - S5.connect(j="k for k in sample(N_post, p=0) if j < 2") # inefficient - - S6 = Synapses(G, G) - S6.connect(j="k for k in sample(2, p=0)") # better - - S7 = Synapses(G, G) - expected7 = np.zeros((len(G), len(G))) - expected7[:, 0] = 1 - expected7[:, 1] = 1 - S7.connect(j="k for k in sample(N_post, p=1.0) if j < 2") # inefficient - - S8 = Synapses(G, G) - S8.connect(j="k for k in sample(2, p=1.0)") # better - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert len(S1) == 0 - _compare(S2, expected2) - assert len(S3) == 0 - _compare(S4, expected4) - assert len(S5) == 0 - assert len(S6) == 0 - _compare(S7, expected7) - _compare(S8, expected7) - - -@pytest.mark.standalone_compatible -@pytest.mark.long -def test_synapse_generator_random_with_condition_2(): - G = NeuronGroup(4, "") - - # Just checking that everything works in principle (we can't check the - # actual connections) - S9 = Synapses(G, G) - S9.connect(j="k for k in sample(N_post, p=0.001) if i != k") - - S10 = Synapses(G, G) - S10.connect(j="k for k in sample(N_post, p=0.03) if i != k") - - S11 = Synapses(G, G) - S11.connect(j="k for k in sample(N_post, p=0.1) if i != k") - - S12 = Synapses(G, G) - S12.connect(j="k for k in sample(N_post, p=0.9) if i != k") - - S13 = Synapses(G, G) - S13.connect(j="k for k in sample(N_post, p=0.001) if i >= 2") - - S14 = Synapses(G, G) - S14.connect(j="k for k in sample(N_post, p=0.03) if i >= 2") - - S15 = Synapses(G, G) - S15.connect(j="k for k in sample(N_post, p=0.1) if i >= 2") - - S16 = Synapses(G, G) - S16.connect(j="k for k in sample(N_post, p=0.9) if i >= 2") - - S17 = Synapses(G, G) - S17.connect(j="k for k in sample(N_post, p=0.001) if j < 2") - - S18 = Synapses(G, G) - S18.connect(j="k for k in sample(N_post, p=0.03) if j < 2") - - S19 = Synapses(G, G) - S19.connect(j="k for k in sample(N_post, p=0.1) if j < 2") - - S20 = Synapses(G, G) - S20.connect(j="k for k in sample(N_post, p=0.9) if j < 2") - - S21 = Synapses(G, G) - S21.connect(j="k for k in sample(2, p=0.001)") - - S22 = Synapses(G, G) - S22.connect(j="k for k in sample(2, p=0.03)") - - S23 = Synapses(G, G) - S23.connect(j="k for k in sample(2, p=0.1)") - - S24 = Synapses(G, G) - S24.connect(j="k for k in sample(2, p=0.9)") - - # Some more tests specific to the generator syntax - S25 = Synapses(G, G) - S25.connect(j="i+1 for _ in sample(1, p=0.5) if i < N_post-1") - - S26 = Synapses(G, G) - S26.connect(j="i+k for k in sample(N_post-i, p=0.5)") - - with catch_logs() as _: # Ignore warnings about empty synapses - run(0 * ms) # for standalone - - assert not any(S9.i == S9.j) - assert 0 <= len(S9) <= len(G) * (len(G) - 1) - assert not any(S10.i == S10.j) - assert 0 <= len(S10) <= len(G) * (len(G) - 1) - assert not any(S11.i == S11.j) - assert 0 <= len(S11) <= len(G) * (len(G) - 1) - assert not any(S12.i == S12.j) - assert 0 <= len(S12) <= len(G) * (len(G) - 1) - assert all(S13.i[:] >= 2) - assert 0 <= len(S13) <= len(G) * (len(G) - 1) - assert all(S14.i[:] >= 2) - assert 0 <= len(S14) <= len(G) * (len(G) - 1) - assert all(S15.i[:] >= 2) - assert 0 <= len(S15) <= len(G) * (len(G) - 1) - assert all(S16.i[:] >= 2) - assert 0 <= len(S16) <= len(G) * (len(G) - 1) - assert all(S17.j[:] < 2) - assert 0 <= len(S17) <= len(G) * (len(G) - 1) - assert all(S18.j[:] < 2) - assert 0 <= len(S18) <= len(G) * (len(G) - 1) - assert all(S19.j[:] < 2) - assert 0 <= len(S19) <= len(G) * (len(G) - 1) - assert all(S20.j[:] < 2) - assert 0 <= len(S20) <= len(G) * (len(G) - 1) - assert all(S21.j[:] < 2) - assert 0 <= len(S21) <= len(G) * (len(G) - 1) - assert all(S22.j[:] < 2) - assert 0 <= len(S22) <= len(G) * (len(G) - 1) - assert all(S23.j[:] < 2) - assert 0 <= len(S23) <= len(G) * (len(G) - 1) - assert all(S24.j[:] < 2) - assert 0 <= len(S24) <= len(G) * (len(G) - 1) - assert 0 <= len(S25) <= len(G) - assert_equal(S25.j[:], S25.i[:] + 1) - assert 0 <= len(S26) <= (1 + len(G)) * (len(G) / 2) - assert all(S26.j[:] >= S26.i[:]) - - -@pytest.mark.standalone_compatible -def test_synapses_refractory(): - source = NeuronGroup(10, "", threshold="True") - target = NeuronGroup( - 10, - "dv/dt = 0/second : 1 (unless refractory)", - threshold="i>=5", - refractory=defaultclock.dt, - ) - S = Synapses(source, target, on_pre="v += 1") - S.connect(j="i") - run(defaultclock.dt + schedule_propagation_offset()) - assert_allclose(target.v[:5], 1) - assert_allclose(target.v[5:], 0) - - -@pytest.mark.standalone_compatible -def test_synapses_refractory_rand(): - source = NeuronGroup(10, "", threshold="True") - target = NeuronGroup( - 10, - "dv/dt = 0/second : 1 (unless refractory)", - threshold="i>=5", - refractory=defaultclock.dt, - ) - S = Synapses(source, target, on_pre="v += rand()") - S.connect(j="i") - with catch_logs() as _: - # Currently, rand() is a stateful function (we do not make use of - # _vectorisation_idx yet to make random numbers completely - # reproducible), which will lead to a warning, since the result depends - # on the order of execution. - run(defaultclock.dt + schedule_propagation_offset()) - assert all(target.v[:5] > 0) - assert_allclose(target.v[5:], 0) - - -@pytest.mark.codegen_independent -def test_synapse_generator_range_noint(): - # arguments to `range` should only be integers (issue #781) - G = NeuronGroup(42, "") - S = Synapses(G, G) - msg = ( - r"The '{}' argument of the range function was .+, but it needs to be an" - r" integer\." - ) - with pytest.raises(TypeError, match=msg.format("high")): - S.connect(j="k for k in range(42.0)") - with pytest.raises(TypeError, match=msg.format("low")): - S.connect(j="k for k in range(0.0, 42)") - with pytest.raises(TypeError, match=msg.format("high")): - S.connect(j="k for k in range(0, 42.0)") - with pytest.raises(TypeError, match=msg.format("step")): - S.connect(j="k for k in range(0, 42, 1.0)") - with pytest.raises(TypeError, match=msg.format("low")): - S.connect(j="k for k in range(True, 42)") - with pytest.raises(TypeError, match=msg.format("high")): - S.connect(j="k for k in range(0, True)") - with pytest.raises(TypeError, match=msg.format("step")): - S.connect(j="k for k in range(0, 42, True)") - - -@pytest.mark.codegen_independent -def test_missing_lastupdate_error_syn_pathway(): - G = NeuronGroup(1, "v : 1", threshold="False") - S = Synapses(G, G, on_pre="v += exp(-lastupdate/dt)") - S.connect() - with pytest.raises(BrianObjectException) as exc: - run(0 * ms) - assert exc_isinstance(exc, KeyError) - assert "lastupdate = t" in str(exc.value.__cause__) - assert "lastupdate : second" in str(exc.value.__cause__) - - -@pytest.mark.codegen_independent -def test_missing_lastupdate_error_run_regularly(): - G = NeuronGroup(1, "v : 1") - S = Synapses(G, G) - S.connect() - S.run_regularly("v += exp(-lastupdate/dt") - with pytest.raises(BrianObjectException) as exc: - run(0 * ms) - assert exc_isinstance(exc, KeyError) - assert "lastupdate = t" in str(exc.value.__cause__) - assert "lastupdate : second" in str(exc.value.__cause__) - - -@pytest.mark.codegen_independent -def test_synaptic_subgroups(): - source = NeuronGroup(5, "") - target = NeuronGroup(3, "") - syn = Synapses(source, target) - syn.connect() - assert len(syn) == 15 - - from_3 = syn[3, :] - assert len(from_3) == 3 - assert all(syn.i[from_3] == 3) - assert_array_equal(syn.j[from_3], np.arange(3)) - - to_2 = syn[:, 2] - assert len(to_2) == 5 - assert all(syn.j[to_2] == 2) - assert_array_equal(syn.i[to_2], np.arange(5)) - - mixed = syn[1:3, :2] - assert len(mixed) == 4 - connections = {(i, j) for i, j in zip(syn.i[mixed], syn.j[mixed])} - assert connections == {(1, 0), (1, 1), (2, 0), (2, 1)} - - -@pytest.mark.codegen_independent -def test_incorrect_connect_N_incoming_outgoing(): - # See github issue #1227 - source = NeuronGroup(5, "") - target = NeuronGroup(3, "") - syn = Synapses(source, target) - - with pytest.raises(ValueError) as ex: - syn.connect("N_incoming < 5") - assert "N_incoming" in str(ex) - - with pytest.raises(ValueError) as ex: - syn.connect("N_outgoing < 5") - assert "N_outgoing" in str(ex) - - -@pytest.mark.codegen_independent -def test_setting_from_weight_matrix(): - # fully connected weight matrix - # weights[source_index, target_index] - weights = np.array([[1, 2, 3], [4, 5, 6]]) - - source = NeuronGroup(2, "") - target = NeuronGroup(3, "") - - syn = Synapses(source, target, "w : 1") - syn.connect() - syn.w[:] = weights.flatten() - - for (i, j), w in np.ndenumerate(weights): - assert all(syn.w[i, j] == weights[i, j]) - - -if __name__ == "__main__": - SANITY_CHECK_PERMUTATION_ANALYSIS_EXAMPLE = True - # prefs.codegen.target = 'numpy' - # prefs._backup() - import time - - from _pytest.outcomes import Skipped - - from brian2 import prefs - - start = time.time() - - test_creation() - test_name_clashes() - test_incoming_outgoing() - test_connection_string_deterministic_full() - test_connection_string_deterministic_full_no_self() - test_connection_string_deterministic_full_one_to_one() - test_connection_string_deterministic_full_custom() - test_connection_string_deterministic_multiple_and() - test_connection_random_with_condition() - test_connection_random_with_condition_2() - test_connection_random_without_condition() - test_connection_random_with_indices() - test_connection_multiple_synapses() - test_connection_arrays() - reinit_and_delete() - test_state_variable_assignment() - test_state_variable_indexing() - test_indices() - test_subexpression_references() - test_nested_subexpression_references() - test_constant_variable_subexpression_in_synapses() - test_equations_unit_check() - test_delay_specification() - test_delays_pathways() - test_delays_pathways_subgroups() - test_pre_before_post() - test_pre_post_simple() - test_transmission_simple() - test_transmission_custom_event() - test_invalid_custom_event() - test_transmission() - test_transmission_all_to_one_heterogeneous_delays() - test_transmission_one_to_all_heterogeneous_delays() - test_transmission_scalar_delay() - test_transmission_scalar_delay_different_clocks() - test_transmission_boolean_variable() - test_clocks() - test_changed_dt_spikes_in_queue() - test_no_synapses() - test_no_synapses_variable_write() - test_summed_variable() - test_summed_variable_pre_and_post() - test_summed_variable_differing_group_size() - test_summed_variable_errors() - test_multiple_summed_variables() - test_summed_variables_subgroups() - test_summed_variables_overlapping_subgroups() - test_summed_variables_linked_variables() - test_scalar_parameter_access() - test_scalar_subexpression() - test_sim_with_scalar_variable() - test_sim_with_scalar_subexpression() - test_sim_with_constant_subexpression() - test_external_variables() - test_event_driven() - test_event_driven_dependency_error() - test_event_driven_dependency_error2() - test_event_driven_dependency_error3() - test_repr() - test_pre_post_variables() - test_variables_by_owner() - test_permutation_analysis() - test_vectorisation() - test_vectorisation_STDP_like() - test_synaptic_equations() - test_synapse_with_run_regularly() - test_synapses_to_synapses() - test_synapses_to_synapses_statevar_access() - test_synapses_to_synapses_different_sizes() - test_synapses_to_synapses_summed_variable() - try: - test_ufunc_at_vectorisation() - test_fallback_loop_and_stateless_func() - except Skipped: - print("Skipping numpy-only test") - test_synapse_generator_syntax() - test_synapse_generator_out_of_range() - test_synapse_generator_deterministic() - test_synapse_generator_deterministic_2() - test_synapse_generator_random() - test_synapse_generator_random_with_condition() - test_synapse_generator_random_with_condition_2() - test_synapses_refractory() - test_synapses_refractory_rand() - test_synapse_generator_range_noint() - test_missing_lastupdate_error_syn_pathway() - test_missing_lastupdate_error_run_regularly() - test_synaptic_subgroups() - test_incorrect_connect_N_incoming_outgoing() - test_setting_from_weight_matrix() - print("Tests took", time.time() - start) diff --git a/brian2/tests/test_thresholder.py b/brian2/tests/test_thresholder.py deleted file mode 100644 index 4e8a8ee2c..000000000 --- a/brian2/tests/test_thresholder.py +++ /dev/null @@ -1,28 +0,0 @@ -import pytest -from numpy.testing import assert_equal - -from brian2 import * -from brian2.devices.device import reinit_and_delete - - -@pytest.mark.standalone_compatible -def test_simple_threshold(): - G = NeuronGroup(4, "v : 1", threshold="v > 1") - G.v = [1.5, 0, 3, -1] - s_mon = SpikeMonitor(G) - run(defaultclock.dt) - assert_equal(s_mon.count, np.array([1, 0, 1, 0])) - - -@pytest.mark.standalone_compatible -def test_scalar_threshold(): - c = 2 - G = NeuronGroup(4, "", threshold="c > 1") - s_mon = SpikeMonitor(G) - run(defaultclock.dt) - assert_equal(s_mon.count, np.array([1, 1, 1, 1])) - - -if __name__ == "__main__": - test_simple_threshold() - test_scalar_threshold() diff --git a/brian2/tests/test_timedarray.py b/brian2/tests/test_timedarray.py deleted file mode 100644 index 02d5a2731..000000000 --- a/brian2/tests/test_timedarray.py +++ /dev/null @@ -1,138 +0,0 @@ -import pytest - -from brian2 import * -from brian2.devices.device import reinit_and_delete -from brian2.tests.utils import assert_allclose -from brian2.utils.caching import _hashable - - -@pytest.mark.codegen_independent -def test_timedarray_direct_use(): - ta = TimedArray(np.linspace(0, 10, 11), 1 * ms) - assert ta(-1 * ms) == 0 - assert ta(5 * ms) == 5 - assert ta(10 * ms) == 10 - assert ta(15 * ms) == 10 - ta = TimedArray(np.linspace(0, 10, 11) * amp, 1 * ms) - assert ta(-1 * ms) == 0 * amp - assert ta(5 * ms) == 5 * amp - assert ta(10 * ms) == 10 * amp - assert ta(15 * ms) == 10 * amp - ta2d = TimedArray((np.linspace(0, 11, 12) * amp).reshape(4, 3), 1 * ms) - assert ta2d(-1 * ms, 0) == 0 * amp - assert ta2d(0 * ms, 0) == 0 * amp - assert ta2d(0 * ms, 1) == 1 * amp - assert ta2d(1 * ms, 1) == 4 * amp - assert_allclose(ta2d(1 * ms, [0, 1, 2]), [3, 4, 5] * amp) - assert_allclose(ta2d(15 * ms, [0, 1, 2]), [9, 10, 11] * amp) - - -@pytest.mark.standalone_compatible -def test_timedarray_semantics(): - # Make sure that timed arrays are interpreted as specifying the values - # between t and t+dt (not between t-dt/2 and t+dt/2 as in Brian1) - ta = TimedArray(array([0, 1]), dt=0.4 * ms) - G = NeuronGroup(1, "value = ta(t) : 1", dt=0.1 * ms) - mon = StateMonitor(G, "value", record=0) - run(0.8 * ms) - assert_allclose(mon[0].value, [0, 0, 0, 0, 1, 1, 1, 1]) - assert_allclose(mon[0].value, ta(mon.t)) - - -@pytest.mark.standalone_compatible -def test_timedarray_no_units(): - ta = TimedArray(np.arange(10), dt=0.1 * ms) - G = NeuronGroup(1, "value = ta(t) + 1: 1", dt=0.1 * ms) - mon = StateMonitor(G, "value", record=True, dt=0.1 * ms) - run(1.1 * ms) - assert_allclose(mon[0].value_, np.clip(np.arange(len(mon[0].t)), 0, 9) + 1) - - -@pytest.mark.standalone_compatible -def test_timedarray_with_units(): - ta = TimedArray(np.arange(10) * amp, dt=0.1 * ms) - G = NeuronGroup(1, "value = ta(t) + 2*nA: amp", dt=0.1 * ms) - mon = StateMonitor(G, "value", record=True, dt=0.1 * ms) - run(1.1 * ms) - assert_allclose( - mon[0].value, np.clip(np.arange(len(mon[0].t)), 0, 9) * amp + 2 * nA - ) - - -@pytest.mark.standalone_compatible -def test_timedarray_2d(): - # 4 time steps, 3 neurons - ta2d = TimedArray(np.arange(12).reshape(4, 3), dt=0.1 * ms) - G = NeuronGroup(3, "value = ta2d(t, i) + 1: 1", dt=0.1 * ms) - mon = StateMonitor(G, "value", record=True, dt=0.1 * ms) - run(0.5 * ms) - assert_allclose(mon[0].value_, np.array([0, 3, 6, 9, 9]) + 1) - assert_allclose(mon[1].value_, np.array([1, 4, 7, 10, 10]) + 1) - assert_allclose(mon[2].value_, np.array([2, 5, 8, 11, 11]) + 1) - - -@pytest.mark.codegen_independent -def test_timedarray_incorrect_use(): - ta = TimedArray(np.linspace(0, 10, 11), 1 * ms) - ta2d = TimedArray((np.linspace(0, 11, 12) * amp).reshape(4, 3), 1 * ms) - G = NeuronGroup(3, "I : amp") - with pytest.raises(ValueError): - setattr(G, "I", "ta2d(t)*amp") - with pytest.raises(ValueError): - setattr(G, "I", "ta(t, i)*amp") - with pytest.raises(ValueError): - setattr(G, "I", "ta()*amp") - with pytest.raises(ValueError): - setattr(G, "I", "ta*amp") - - -@pytest.mark.standalone_compatible -def test_timedarray_no_upsampling(): - # Test a TimedArray where no upsampling is necessary because the monitor's - # dt is bigger than the TimedArray's - ta = TimedArray(np.arange(10), dt=0.01 * ms) - G = NeuronGroup(1, "value = ta(t): 1", dt=0.1 * ms) - mon = StateMonitor(G, "value", record=True, dt=1 * ms) - run(2.1 * ms) - assert_allclose(mon[0].value, [0, 9, 9]) - - -# @pytest.mark.standalone_compatible # see FIXME comment below -def test_long_timedarray(): - """ - Use a very long timedarray (with a big dt), where the upsampling can lead - to integer overflow. - """ - ta = TimedArray(np.arange(16385), dt=1 * second) - G = NeuronGroup(1, "value = ta(t) : 1") - mon = StateMonitor(G, "value", record=True) - net = Network(G, mon) - # We'll start the simulation close to the critical boundary - # FIXME: setting the time like this does not work for standalone - net.t_ = float(16384 * second - 5 * ms) - net.run(10 * ms) - assert_allclose(mon[0].value[mon.t < 16384 * second], 16383) - assert_allclose(mon[0].value[mon.t >= 16384 * second], 16384) - - -def test_timedarray_repeated_use(): - # Check that recreating a TimedArray with different values does work - # correctly (no issues with caching) - values = np.array([[1, 2, 3], [2, 4, 6]]) - for run_idx in range(2): - ta = TimedArray(values[run_idx], dt=defaultclock.dt, name="ta") - G = NeuronGroup(1, "dx/dt = ta(t)/dt : 1", name="G") - run(3 * defaultclock.dt) - assert G.x[0] == 6 * (run_idx + 1) - - -if __name__ == "__main__": - test_timedarray_direct_use() - test_timedarray_semantics() - test_timedarray_no_units() - test_timedarray_with_units() - test_timedarray_2d() - test_timedarray_incorrect_use() - test_timedarray_no_upsampling() - test_long_timedarray() - test_timedarray_repeated_use() diff --git a/brian2/tests/test_units.py b/brian2/tests/test_units.py deleted file mode 100644 index a6ed5b71a..000000000 --- a/brian2/tests/test_units.py +++ /dev/null @@ -1,1579 +0,0 @@ -import itertools -import pickle -import warnings - -import numpy as np -import pytest -from numpy.testing import assert_equal - -import brian2 -from brian2.core.preferences import prefs -from brian2.tests.utils import assert_allclose -from brian2.units.allunits import * -from brian2.units.fundamentalunits import ( - DIMENSIONLESS, - UFUNCS_DIMENSIONLESS, - UFUNCS_DIMENSIONLESS_TWOARGS, - UFUNCS_INTEGERS, - UFUNCS_LOGICAL, - DimensionMismatchError, - Quantity, - Unit, - check_units, - fail_for_dimension_mismatch, - get_dimensions, - get_or_create_dimension, - get_unit, - have_same_dimensions, - in_unit, - is_dimensionless, - is_scalar_type, -) -from brian2.units.stdunits import Hz, cm, kHz, mM, ms, mV, nA, nS - -# To work around an issue in matplotlib 1.3.1 (see -# https://github.com/matplotlib/matplotlib/pull/2591), we make `ravel` -# return a unitless array and emit a warning explaining the issue. -use_matplotlib_units_fix = False -try: - import matplotlib - - if matplotlib.__version__ == "1.3.1": - use_matplotlib_units_fix = True -except ImportError: - pass - - -def assert_quantity(q, values, unit): - assert isinstance(q, Quantity) or ( - have_same_dimensions(unit, 1) - and (values.shape == () or isinstance(q, np.ndarray)) - ), q - assert_allclose(np.asarray(q), values) - assert have_same_dimensions( - q, unit - ), f"Dimension mismatch: ({get_dimensions(q)}) ({get_dimensions(unit)})" - - -@pytest.mark.codegen_independent -def test_construction(): - """Test the construction of quantity objects""" - q = 500 * ms - assert_quantity(q, 0.5, second) - q = np.float64(500) * ms - assert_quantity(q, 0.5, second) - q = np.array(500) * ms - assert_quantity(q, 0.5, second) - q = np.array([500, 1000]) * ms - assert_quantity(q, np.array([0.5, 1]), second) - q = Quantity(500) - assert_quantity(q, 500, 1) - q = Quantity(500, dim=second.dim) - assert_quantity(q, 500, second) - q = Quantity([0.5, 1], dim=second.dim) - assert_quantity(q, np.array([0.5, 1]), second) - q = Quantity(np.array([0.5, 1]), dim=second.dim) - assert_quantity(q, np.array([0.5, 1]), second) - q = Quantity([500 * ms, 1 * second]) - assert_quantity(q, np.array([0.5, 1]), second) - q = Quantity.with_dimensions(np.array([0.5, 1]), second=1) - assert_quantity(q, np.array([0.5, 1]), second) - q = [0.5, 1] * second - assert_quantity(q, np.array([0.5, 1]), second) - - # dimensionless quantities - q = Quantity([1, 2, 3]) - assert_quantity(q, np.array([1, 2, 3]), Unit(1)) - q = Quantity(np.array([1, 2, 3])) - assert_quantity(q, np.array([1, 2, 3]), Unit(1)) - q = Quantity([]) - assert_quantity(q, np.array([]), Unit(1)) - - # copying/referencing a quantity - q1 = Quantity.with_dimensions(np.array([0.5, 1]), second=1) - q2 = Quantity(q1) # no copy - assert_quantity(q2, np.asarray(q1), q1) - q2[0] = 3 * second - assert_equal(q1[0], 3 * second) - - q1 = Quantity.with_dimensions(np.array([0.5, 1]), second=1) - q2 = Quantity(q1, copy=True) # copy - assert_quantity(q2, np.asarray(q1), q1) - q2[0] = 3 * second - assert_equal(q1[0], 0.5 * second) - - # Illegal constructor calls - with pytest.raises(TypeError): - Quantity([500 * ms, 1]) - with pytest.raises(TypeError): - Quantity(["some", "nonsense"]) - with pytest.raises(DimensionMismatchError): - Quantity([500 * ms, 1 * volt]) - - -@pytest.mark.codegen_independent -def test_get_dimensions(): - """ - Test various ways of getting/comparing the dimensions of a quantity. - """ - q = 500 * ms - assert get_dimensions(q) is get_or_create_dimension(q.dimensions._dims) - assert get_dimensions(q) is q.dimensions - assert q.has_same_dimensions(3 * second) - dims = q.dimensions - assert_equal(dims.get_dimension("time"), 1.0) - assert_equal(dims.get_dimension("length"), 0) - - assert get_dimensions(5) is DIMENSIONLESS - assert get_dimensions(5.0) is DIMENSIONLESS - assert get_dimensions(np.array(5, dtype=np.int32)) is DIMENSIONLESS - assert get_dimensions(np.array(5.0)) is DIMENSIONLESS - assert get_dimensions(np.float32(5.0)) is DIMENSIONLESS - assert get_dimensions(np.float64(5.0)) is DIMENSIONLESS - assert is_scalar_type(5) - assert is_scalar_type(5.0) - assert is_scalar_type(np.array(5, dtype=np.int32)) - assert is_scalar_type(np.array(5.0)) - assert is_scalar_type(np.float32(5.0)) - assert is_scalar_type(np.float64(5.0)) - with pytest.raises(TypeError): - get_dimensions("a string") - # wrong number of indices - with pytest.raises(TypeError): - get_or_create_dimension([1, 2, 3, 4, 5, 6]) - # not a sequence - with pytest.raises(TypeError): - get_or_create_dimension(42) - - -@pytest.mark.codegen_independent -def test_display(): - """ - Test displaying a quantity in different units - """ - assert_equal(in_unit(3 * volt, mvolt), "3000. mV") - assert_equal(in_unit(10 * mV, ohm * amp), "0.01 ohm A") - with pytest.raises(DimensionMismatchError): - in_unit(10 * nS, ohm) - - # A bit artificial... - assert_equal(in_unit(10.0, Unit(10.0, scale=1)), "1.0") - - -@pytest.mark.codegen_independent -def test_scale(): - # Check that unit scaling is implemented correctly - from brian2.core.namespace import DEFAULT_UNITS - - siprefixes = { - "y": 1e-24, - "z": 1e-21, - "a": 1e-18, - "f": 1e-15, - "p": 1e-12, - "n": 1e-9, - "u": 1e-6, - "m": 1e-3, - "": 1.0, - "k": 1e3, - "M": 1e6, - "G": 1e9, - "T": 1e12, - "P": 1e15, - "E": 1e18, - "Z": 1e21, - "Y": 1e24, - } - for prefix in siprefixes: - if prefix in ["c", "d", "da", "h"]: - continue - scaled_unit = DEFAULT_UNITS[f"{prefix}meter"] - assert_allclose(float(scaled_unit), siprefixes[prefix]) - assert_allclose(5 * scaled_unit / meter, 5 * siprefixes[prefix]) - scaled_unit = DEFAULT_UNITS[f"{prefix}meter2"] - assert_allclose(float(scaled_unit), siprefixes[prefix] ** 2) - assert_allclose(5 * scaled_unit / meter2, 5 * siprefixes[prefix] ** 2) - scaled_unit = DEFAULT_UNITS[f"{prefix}meter3"] - assert_allclose(float(scaled_unit), siprefixes[prefix] ** 3) - assert_allclose(5 * scaled_unit / meter3, 5 * siprefixes[prefix] ** 3) - # liter, gram, and molar are special, they are not base units with a - # value of one, even though they do not have any prefix - for unit, factor in [ - ("liter", 1e-3), - ("litre", 1e-3), - ("gram", 1e-3), - ("gramme", 1e-3), - ("molar", 1e3), - ]: - base_unit = DEFAULT_UNITS[unit] - scaled_unit = DEFAULT_UNITS[prefix + unit] - assert_allclose(float(scaled_unit), siprefixes[prefix] * factor) - assert_allclose(5 * scaled_unit / base_unit, 5 * siprefixes[prefix]) - - -@pytest.mark.codegen_independent -def test_pickling(): - """ - Test pickling of units. - """ - for q in [ - 500 * mV, - 500 * mV / mV, - np.arange(10) * mV, - np.arange(12).reshape(4, 3) * mV / ms, - ]: - pickled = pickle.dumps(q) - unpickled = pickle.loads(pickled) - assert isinstance(unpickled, type(q)) - assert have_same_dimensions(unpickled, q) - assert_equal(unpickled, q) - - -@pytest.mark.codegen_independent -def test_dimension_singletons(): - # Make sure that Dimension objects are singletons, even when pickled - volt_dim = get_or_create_dimension((2, 1, -3, -1, 0, 0, 0)) - assert volt.dim is volt_dim - import pickle - - pickled_dim = pickle.dumps(volt_dim) - unpickled_dim = pickle.loads(pickled_dim) - assert unpickled_dim is volt_dim - assert unpickled_dim is volt.dim - - -@pytest.mark.codegen_independent -def test_str_repr(): - """ - Test that str representations do not raise any errors and that repr - fullfills eval(repr(x)) == x. Also test generating LaTeX representations via sympy. - """ - import sympy - from numpy import array # necessary for evaluating repr - - units_which_should_exist = [ - metre, - meter, - kilogram, - kilogramme, - second, - amp, - kelvin, - mole, - candle, - radian, - steradian, - hertz, - newton, - pascal, - joule, - watt, - coulomb, - volt, - farad, - ohm, - siemens, - weber, - tesla, - henry, - lumen, - lux, - becquerel, - gray, - sievert, - katal, - gram, - gramme, - molar, - liter, - litre, - ] - - # scaled versions of all these units should exist (we just check farad as an example) - some_scaled_units = [ - Yfarad, - Zfarad, - Efarad, - Pfarad, - Tfarad, - Gfarad, - Mfarad, - kfarad, - hfarad, - dafarad, - dfarad, - cfarad, - mfarad, - ufarad, - nfarad, - pfarad, - ffarad, - afarad, - zfarad, - yfarad, - ] - - # some powered units - powered_units = [cmetre2, Yfarad3] - - # Combined units - complex_units = [ - (kgram * metre2) / (amp * second3), - 5 * (kgram * metre2) / (amp * second3), - metre * second**-1, - 10 * metre * second**-1, - array([1, 2, 3]) * kmetre / second, - np.ones(3) * nS / cm**2, - # Made-up unit: - Unit( - 1, - dim=get_or_create_dimension(length=5, time=2), - dispname="O", - latexname=r"\Omega", - ), - 8000 * umetre**3, - [0.0001, 10000] * umetre**3, - 1 / metre, - 1 / (coulomb * metre**2), - Unit(1) / second, - 3.0 * mM, - 5 * mole / liter, - 7 * liter / meter3, - 1 / second**2, - volt**-2, - (volt**2) ** -1, - (1 / second) / meter, - 1 / (1 / second), - ] - - unitless = [second / second, 5 * second / second, Unit(1)] - - for u in itertools.chain( - units_which_should_exist, - some_scaled_units, - powered_units, - complex_units, - unitless, - ): - assert len(str(u)) > 0 - if not is_dimensionless(u): - assert len(sympy.latex(u)) - assert get_dimensions(eval(repr(u))) == get_dimensions(u) - assert_allclose(eval(repr(u)), u) - - for ar in [np.arange(10000) * mV, np.arange(100).reshape(10, 10) * mV]: - latex_str = sympy.latex(ar) - assert 0 < len(latex_str) < 1000 # arbitrary threshold, but see #1425 - - # test the `DIMENSIONLESS` object - assert str(DIMENSIONLESS) == "1" - assert repr(DIMENSIONLESS) == "Dimension()" - - # test DimensionMismatchError (only that it works without raising an error - for error in [ - DimensionMismatchError("A description"), - DimensionMismatchError("A description", DIMENSIONLESS), - DimensionMismatchError("A description", DIMENSIONLESS, second.dim), - ]: - assert len(str(error)) - assert len(repr(error)) - - -@pytest.mark.codegen_independent -def test_format_quantity(): - # Avoid that the default f-string (or .format call) discards units when used without - # a format spec - q = 0.5 * ms - assert f"{q}" == f"{q!s}" == str(q) - assert f"{q:g}" == f"{float(q)}" - - -@pytest.mark.codegen_independent -def test_slicing(): - # Slicing and indexing, setting items - quantity = np.reshape(np.arange(6), (2, 3)) * mV - assert_equal(quantity[:], quantity) - assert_equal(quantity[0], np.asarray(quantity)[0] * volt) - assert_equal(quantity[0:1], np.asarray(quantity)[0:1] * volt) - assert_equal(quantity[0, 1], np.asarray(quantity)[0, 1] * volt) - assert_equal(quantity[0:1, 1:], np.asarray(quantity)[0:1, 1:] * volt) - bool_matrix = np.array([[True, False, False], [False, False, True]]) - assert_equal(quantity[bool_matrix], np.asarray(quantity)[bool_matrix] * volt) - - -@pytest.mark.codegen_independent -def test_setting(): - quantity = np.reshape(np.arange(6), (2, 3)) * mV - quantity[0, 1] = 10 * mV - assert quantity[0, 1] == 10 * mV - quantity[:, 1] = 20 * mV - assert np.all(quantity[:, 1] == 20 * mV) - quantity[1, :] = np.ones((1, 3)) * volt - assert np.all(quantity[1, :] == 1 * volt) - # Setting to zero should work without units as well - quantity[1, 2] = 0 - assert quantity[1, 2] == 0 * mV - - def set_to_value(key, value): - quantity[key] = value - - with pytest.raises(DimensionMismatchError): - set_to_value(0, 1) - with pytest.raises(DimensionMismatchError): - set_to_value(0, 1 * second) - with pytest.raises(DimensionMismatchError): - set_to_value((slice(2), slice(3)), np.ones((2, 3))) - - -@pytest.mark.codegen_independent -def test_multiplication_division(): - quantities = [3 * mV, np.array([1, 2]) * mV, np.ones((3, 3)) * mV] - q2 = 5 * second - - for q in quantities: - # Scalars and array scalars - assert_quantity(q / 3, np.asarray(q) / 3, volt) - assert_quantity(3 / q, 3 / np.asarray(q), 1 / volt) - assert_quantity(q * 3, np.asarray(q) * 3, volt) - assert_quantity(3 * q, 3 * np.asarray(q), volt) - assert_quantity(q / np.float64(3), np.asarray(q) / 3, volt) - assert_quantity(np.float64(3) / q, 3 / np.asarray(q), 1 / volt) - assert_quantity(q * np.float64(3), np.asarray(q) * 3, volt) - assert_quantity(np.float64(3) * q, 3 * np.asarray(q), volt) - assert_quantity(q / np.array(3), np.asarray(q) / 3, volt) - assert_quantity(np.array(3) / q, 3 / np.asarray(q), 1 / volt) - assert_quantity(q * np.array(3), np.asarray(q) * 3, volt) - assert_quantity(np.array(3) * q, 3 * np.asarray(q), volt) - - # (unitless) arrays - assert_quantity(q / np.array([3]), np.asarray(q) / 3, volt) - assert_quantity(np.array([3]) / q, 3 / np.asarray(q), 1 / volt) - assert_quantity(q * np.array([3]), np.asarray(q) * 3, volt) - assert_quantity(np.array([3]) * q, 3 * np.asarray(q), volt) - - # arrays with units - assert_quantity(q / q, np.asarray(q) / np.asarray(q), 1) - assert_quantity(q * q, np.asarray(q) ** 2, volt**2) - assert_quantity(q / q2, np.asarray(q) / np.asarray(q2), volt / second) - assert_quantity(q2 / q, np.asarray(q2) / np.asarray(q), second / volt) - assert_quantity(q * q2, np.asarray(q) * np.asarray(q2), volt * second) - - # using unsupported objects should fail - with pytest.raises(TypeError): - q / "string" - with pytest.raises(TypeError): - "string" / q - with pytest.raises(TypeError): - "string" * q - with pytest.raises(TypeError): - q * "string" - - -@pytest.mark.codegen_independent -def test_addition_subtraction(): - quantities = [3 * mV, np.array([1, 2]) * mV, np.ones((3, 3)) * mV] - q2 = 5 * volt - - for q in quantities: - # arrays with units - assert_quantity(q + q, np.asarray(q) + np.asarray(q), volt) - assert_quantity(q - q, 0, volt) - assert_quantity(q + q2, np.asarray(q) + np.asarray(q2), volt) - assert_quantity(q2 + q, np.asarray(q2) + np.asarray(q), volt) - assert_quantity(q - q2, np.asarray(q) - np.asarray(q2), volt) - assert_quantity(q2 - q, np.asarray(q2) - np.asarray(q), volt) - - # mismatching units - with pytest.raises(DimensionMismatchError): - q + 5 * second - with pytest.raises(DimensionMismatchError): - 5 * second + q - with pytest.raises(DimensionMismatchError): - q - 5 * second - with pytest.raises(DimensionMismatchError): - 5 * second - q - - # scalar - with pytest.raises(DimensionMismatchError): - q + 5 - with pytest.raises(DimensionMismatchError): - 5 + q - with pytest.raises(DimensionMismatchError): - q + np.float64(5) - with pytest.raises(DimensionMismatchError): - np.float64(5) + q - with pytest.raises(DimensionMismatchError): - q - 5 - with pytest.raises(DimensionMismatchError): - 5 - q - with pytest.raises(DimensionMismatchError): - q - np.float64(5) - with pytest.raises(DimensionMismatchError): - np.float64(5) - q - - # unitless array - with pytest.raises(DimensionMismatchError): - q + np.array([5]) - with pytest.raises(DimensionMismatchError): - np.array([5]) + q - with pytest.raises(DimensionMismatchError): - q + np.array([5], dtype=np.float64) - with pytest.raises(DimensionMismatchError): - np.array([5], dtype=np.float64) + q - with pytest.raises(DimensionMismatchError): - q - np.array([5]) - with pytest.raises(DimensionMismatchError): - np.array([5]) - q - with pytest.raises(DimensionMismatchError): - q - np.array([5], dtype=np.float64) - with pytest.raises(DimensionMismatchError): - np.array([5], dtype=np.float64) - q - - # Check that operations with 0 work - assert_quantity(q + 0, np.asarray(q), volt) - assert_quantity(0 + q, np.asarray(q), volt) - assert_quantity(q - 0, np.asarray(q), volt) - assert_quantity(0 - q, -np.asarray(q), volt) - assert_quantity(q + np.float64(0), np.asarray(q), volt) - assert_quantity(np.float64(0) + q, np.asarray(q), volt) - assert_quantity(q - np.float64(0), np.asarray(q), volt) - assert_quantity(np.float64(0) - q, -np.asarray(q), volt) - - # using unsupported objects should fail - with pytest.raises(TypeError): - "string" + q - with pytest.raises(TypeError): - q + "string" - with pytest.raises(TypeError): - q - "string" - with pytest.raises(TypeError): - "string" - q - - -@pytest.mark.codegen_independent -def test_unary_operations(): - from operator import neg, pos - - for op in [neg, pos]: - for x in [2, np.array([2]), np.array([1, 2])]: - assert_quantity(op(x * kilogram), op(x), kilogram) - - -@pytest.mark.codegen_independent -def test_binary_operations(): - """Test whether binary operations work when they should and raise - DimensionMismatchErrors when they should. - Does not test for the actual result. - """ - from operator import add, eq, ge, gt, le, lt, ne, sub - - def assert_operations_work(a, b): - try: - # Test python builtins - tryops = [add, sub, lt, le, gt, ge, eq, ne] - for op in tryops: - op(a, b) - op(b, a) - - # Test equivalent numpy functions - numpy_funcs = [ - np.add, - np.subtract, - np.less, - np.less_equal, - np.greater, - np.greater_equal, - np.equal, - np.not_equal, - np.maximum, - np.minimum, - ] - for numpy_func in numpy_funcs: - numpy_func(a, b) - numpy_func(b, a) - except DimensionMismatchError as ex: - raise AssertionError(f"Operation raised unexpected exception: {ex}") - - def assert_operations_do_not_work(a, b): - # Test python builtins - tryops = [add, sub, lt, le, gt, ge, eq, ne] - for op in tryops: - with pytest.raises(DimensionMismatchError): - op(a, b) - with pytest.raises(DimensionMismatchError): - op(b, a) - - # Test equivalent numpy functions - numpy_funcs = [ - np.add, - np.subtract, - np.less, - np.less_equal, - np.greater, - np.greater_equal, - np.equal, - np.not_equal, - np.maximum, - np.minimum, - ] - for numpy_func in numpy_funcs: - with pytest.raises(DimensionMismatchError): - numpy_func(a, b) - with pytest.raises(DimensionMismatchError): - numpy_func(b, a) - - # - # Check that consistent units work - # - - # unit arrays - a = 1 * kilogram - for b in [2 * kilogram, np.array([2]) * kilogram, np.array([1, 2]) * kilogram]: - assert_operations_work(a, b) - - # dimensionless units and scalars - a = 1 - for b in [ - 2 * kilogram / kilogram, - np.array([2]) * kilogram / kilogram, - np.array([1, 2]) * kilogram / kilogram, - ]: - assert_operations_work(a, b) - - # dimensionless units and unitless arrays - a = np.array([1]) - for b in [ - 2 * kilogram / kilogram, - np.array([2]) * kilogram / kilogram, - np.array([1, 2]) * kilogram / kilogram, - ]: - assert_operations_work(a, b) - - # - # Check that inconsistent units do not work - # - - # unit arrays - a = np.array([1]) * second - for b in [2 * kilogram, np.array([2]) * kilogram, np.array([1, 2]) * kilogram]: - assert_operations_do_not_work(a, b) - - # unitless array - a = np.array([1]) - for b in [2 * kilogram, np.array([2]) * kilogram, np.array([1, 2]) * kilogram]: - assert_operations_do_not_work(a, b) - - # scalar - a = 1 - for b in [2 * kilogram, np.array([2]) * kilogram, np.array([1, 2]) * kilogram]: - assert_operations_do_not_work(a, b) - - # Check that comparisons with inf/-inf always work - values = [ - 2 * kilogram / kilogram, - 2 * kilogram, - np.array([2]) * kilogram, - np.array([1, 2]) * kilogram, - ] - for value in values: - assert np.all(value < np.inf) - assert np.all(np.inf > value) - assert np.all(value <= np.inf) - assert np.all(np.inf >= value) - assert np.all(value != np.inf) - assert np.all(np.inf != value) - assert np.all(value >= -np.inf) - assert np.all(-np.inf <= value) - assert np.all(value > -np.inf) - assert np.all(-np.inf < value) - - -@pytest.mark.codegen_independent -def test_power(): - """ - Test raising quantities to a power. - """ - values = [2 * kilogram, np.array([2]) * kilogram, np.array([1, 2]) * kilogram] - for value in values: - assert_quantity(value**3, np.asarray(value) ** 3, kilogram**3) - # Test raising to a dimensionless quantity - assert_quantity(value ** (3 * volt / volt), np.asarray(value) ** 3, kilogram**3) - with pytest.raises(DimensionMismatchError): - # FIXME: Not that if float(exponent) is a special value such as 1 or 2 - # numpy will actually use a ufunc such as identity or square, which will - # not raise a DimensionMismatchError. This is a limitation of the current - # implementation. - value ** (2 * mV) - with pytest.raises(TypeError): - value ** np.array([2, 3]) - - -@pytest.mark.codegen_independent -def test_inplace_operations(): - q = np.arange(10) * volt - q_orig = q.copy() - q_ref = q - - q *= 2 - assert np.array_equal(q, 2 * q_orig) - assert np.array_equal(q_ref, q) - q /= 2 - assert np.array_equal(q, q_orig) - assert np.array_equal(q_ref, q) - q += 1 * volt - assert np.array_equal(q, q_orig + 1 * volt) - assert np.array_equal(q_ref, q) - q -= 1 * volt - assert np.array_equal(q, q_orig) - assert np.array_equal(q_ref, q) - q **= 2 - assert np.array_equal(q, q_orig**2) - assert np.array_equal(q_ref, q) - q **= 0.5 - assert np.array_equal(q, q_orig) - assert np.array_equal(q_ref, q) - - def illegal_add(q2): - q = np.arange(10) * volt - q += q2 - - with pytest.raises(DimensionMismatchError): - illegal_add(1 * second) - with pytest.raises(DimensionMismatchError): - illegal_add(1) - - def illegal_sub(q2): - q = np.arange(10) * volt - q -= q2 - - with pytest.raises(DimensionMismatchError): - illegal_add(1 * second) - with pytest.raises(DimensionMismatchError): - illegal_add(1) - - def illegal_pow(q2): - q = np.arange(10) * volt - q **= q2 - - with pytest.raises(DimensionMismatchError): - illegal_pow(1 * mV) - with pytest.raises(TypeError): - illegal_pow(np.arange(10)) - - # inplace operations with unsupported objects should fail - for inplace_op in [ - q.__iadd__, - q.__isub__, - q.__imul__, - q.__itruediv__, - q.__ifloordiv__, - q.__imod__, - q.__ipow__, - ]: - try: - result = inplace_op("string") - # if it doesn't fail with an error, it should return NotImplemented - assert result == NotImplemented - except TypeError: - pass # raised on numpy >= 0.10 - - # make sure that inplace operations do not work on units/dimensions at all - for inplace_op in [ - volt.__iadd__, - volt.__isub__, - volt.__imul__, - volt.__itruediv__, - volt.__ifloordiv__, - volt.__imod__, - volt.__ipow__, - ]: - with pytest.raises(TypeError): - inplace_op(volt) - for inplace_op in [ - volt.dimensions.__imul__, - volt.dimensions.__itruediv__, - volt.dimensions.__ipow__, - ]: - with pytest.raises(TypeError): - inplace_op(volt.dimensions) - - -@pytest.mark.codegen_independent -def test_unit_discarding_functions(): - """ - Test functions that discard units. - """ - from brian2.units.unitsafefunctions import ones_like, zeros_like - - values = [3 * mV, np.array([1, 2]) * mV, np.arange(12).reshape(3, 4) * mV] - for value in values: - assert_equal(np.sign(value), np.sign(np.asarray(value))) - assert_equal(zeros_like(value), np.zeros_like(np.asarray(value))) - assert_equal(ones_like(value), np.ones_like(np.asarray(value))) - # Calling non-zero on a 0d array is deprecated, don't test it: - if value.ndim > 0: - assert_equal(np.nonzero(value), np.nonzero(np.asarray(value))) - - -@pytest.mark.codegen_independent -def test_unitsafe_functions(): - """ - Test the unitsafe functions wrapping their numpy counterparts. - """ - from brian2.units.unitsafefunctions import ( - arccos, - arccosh, - arcsin, - arcsinh, - arctan, - arctanh, - cos, - cosh, - exp, - log, - sin, - sinh, - tan, - tanh, - ) - - # All functions with their numpy counterparts - funcs = [ - (sin, np.sin), - (sinh, np.sinh), - (arcsin, np.arcsin), - (arcsinh, np.arcsinh), - (cos, np.cos), - (cosh, np.cosh), - (arccos, np.arccos), - (arccosh, np.arccosh), - (tan, np.tan), - (tanh, np.tanh), - (arctan, np.arctan), - (arctanh, np.arctanh), - (log, np.log), - (exp, np.exp), - ] - - unitless_values = [ - 3 * mV / mV, - np.array([1, 2]) * mV / mV, - np.ones((3, 3)) * mV / mV, - ] - numpy_values = [3, np.array([1, 2]), np.ones((3, 3))] - unit_values = [3 * mV, np.array([1, 2]) * mV, np.ones((3, 3)) * mV] - - for func, np_func in funcs: - # make sure these functions raise errors when run on values with dimensions - for val in unit_values: - with pytest.raises(DimensionMismatchError): - func(val) - - # make sure the functions are equivalent to their numpy counterparts - # when run on unitless values while ignoring warnings about invalid - # values or divisions by zero - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - - for val in unitless_values: - assert_equal(func(val), np_func(val)) - - for val in numpy_values: - assert_equal(func(val), np_func(val)) - - -@pytest.mark.codegen_independent -def test_special_case_numpy_functions(): - """ - Test a couple of functions/methods that need special treatment. - """ - from brian2.units.unitsafefunctions import diagonal, dot, ravel, trace, where - - quadratic_matrix = np.reshape(np.arange(9), (3, 3)) * mV - - # Temporarily suppress warnings related to the matplotlib 1.3 bug - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - # Check that function and method do the same thing - assert_equal(ravel(quadratic_matrix), quadratic_matrix.ravel()) - # Check that function gives the same result as on unitless arrays - assert_equal( - np.asarray(ravel(quadratic_matrix)), ravel(np.asarray(quadratic_matrix)) - ) - # Check that the function gives the same results as the original numpy - # function - assert_equal( - np.ravel(np.asarray(quadratic_matrix)), ravel(np.asarray(quadratic_matrix)) - ) - - # Do the same checks for diagonal, trace and dot - assert_equal(diagonal(quadratic_matrix), quadratic_matrix.diagonal()) - assert_equal( - np.asarray(diagonal(quadratic_matrix)), diagonal(np.asarray(quadratic_matrix)) - ) - assert_equal( - np.diagonal(np.asarray(quadratic_matrix)), - diagonal(np.asarray(quadratic_matrix)), - ) - - assert_equal(trace(quadratic_matrix), quadratic_matrix.trace()) - assert_equal( - np.asarray(trace(quadratic_matrix)), trace(np.asarray(quadratic_matrix)) - ) - assert_equal( - np.trace(np.asarray(quadratic_matrix)), trace(np.asarray(quadratic_matrix)) - ) - - assert_equal( - dot(quadratic_matrix, quadratic_matrix), quadratic_matrix.dot(quadratic_matrix) - ) - assert_equal( - np.asarray(dot(quadratic_matrix, quadratic_matrix)), - dot(np.asarray(quadratic_matrix), np.asarray(quadratic_matrix)), - ) - assert_equal( - np.dot(np.asarray(quadratic_matrix), np.asarray(quadratic_matrix)), - dot(np.asarray(quadratic_matrix), np.asarray(quadratic_matrix)), - ) - - assert_equal( - np.asarray(quadratic_matrix.prod()), np.asarray(quadratic_matrix).prod() - ) - assert_equal( - np.asarray(quadratic_matrix.prod(axis=0)), - np.asarray(quadratic_matrix).prod(axis=0), - ) - - # Check for correct units - if use_matplotlib_units_fix: - assert have_same_dimensions(1, ravel(quadratic_matrix)) - else: - assert have_same_dimensions(quadratic_matrix, ravel(quadratic_matrix)) - assert have_same_dimensions(quadratic_matrix, trace(quadratic_matrix)) - assert have_same_dimensions(quadratic_matrix, diagonal(quadratic_matrix)) - assert have_same_dimensions( - quadratic_matrix[0] ** 2, dot(quadratic_matrix, quadratic_matrix) - ) - assert have_same_dimensions( - quadratic_matrix.prod(axis=0), quadratic_matrix[0] ** quadratic_matrix.shape[0] - ) - - # check the where function - # pure numpy array - cond = [True, False, False] - ar1 = np.array([1, 2, 3]) - ar2 = np.array([4, 5, 6]) - assert_equal(np.where(cond), where(cond)) - assert_equal(np.where(cond, ar1, ar2), where(cond, ar1, ar2)) - - # dimensionless quantity - assert_equal( - np.where(cond, ar1, ar2), np.asarray(where(cond, ar1 * mV / mV, ar2 * mV / mV)) - ) - - # quantity with dimensions - ar1 = ar1 * mV - ar2 = ar2 * mV - assert_equal( - np.where(cond, np.asarray(ar1), np.asarray(ar2)), - np.asarray(where(cond, ar1, ar2)), - ) - - # Check some error cases - with pytest.raises(ValueError): - where(cond, ar1) - with pytest.raises(TypeError): - where(cond, ar1, ar1, ar2) - with pytest.raises(DimensionMismatchError): - where(cond, ar1, ar1 / ms) - - # Check setasflat (for numpy < 1.7) - if hasattr(Quantity, "setasflat"): - a = np.arange(10) * mV - b = np.ones(10).reshape(5, 2) * volt - c = np.ones(10).reshape(5, 2) * second - with pytest.raises(DimensionMismatchError): - a.setasflat(c) - a.setasflat(b) - assert_equal(a.flatten(), b.flatten()) - - # Check cumprod - a = np.arange(1, 10) * mV / mV - assert_equal(a.cumprod(), np.asarray(a).cumprod()) - with pytest.raises(TypeError): - (np.arange(1, 5) * mV).cumprod() - - -# Functions that should not change units -@pytest.mark.codegen_independent -def test_numpy_functions_same_dimensions(): - values = [np.array([1, 2]), np.ones((3, 3))] - units = [volt, second, siemens, mV, kHz] - - from brian2.units.unitsafefunctions import ptp - - # numpy functions - keep_dim_funcs = [ - np.abs, - np.cumsum, - np.max, - np.mean, - np.min, - np.negative, - ptp, - np.round, - np.squeeze, - np.std, - np.sum, - np.transpose, - ] - - for value, unit in itertools.product(values, units): - q_ar = value * unit - for func in keep_dim_funcs: - test_ar = func(q_ar) - if not get_dimensions(test_ar) is q_ar.dim: - raise AssertionError( - f"'{func.__name__}' failed on {q_ar!r} -- dim was " - f"{q_ar.dim}, is now {get_dimensions(test_ar)}." - ) - - # Python builtins should work on one-dimensional arrays - value = np.arange(5) - builtins = [abs, max, min, sum] - for unit in units: - q_ar = value * unit - for func in builtins: - test_ar = func(q_ar) - if not get_dimensions(test_ar) is q_ar.dim: - raise AssertionError( - f"'{func.__name__}' failed on {q_ar!r} -- dim " - f"was {q_ar.dim}, is now " - f"{get_dimensions(test_ar)}" - ) - - -@pytest.mark.codegen_independent -def test_numpy_functions_indices(): - """ - Check numpy functions that return indices. - """ - values = [np.array([-4, 3, -2, 1, 0]), np.ones((3, 3)), np.array([17])] - units = [volt, second, siemens, mV, kHz] - - # numpy functions - keep_dim_funcs = [np.argmin, np.argmax, np.argsort, np.nonzero] - - for value, unit in itertools.product(values, units): - q_ar = value * unit - for func in keep_dim_funcs: - test_ar = func(q_ar) - # Compare it to the result on the same value without units - comparison_ar = func(value) - assert_equal( - test_ar, - comparison_ar, - ( - "function %s returned an incorrect result when used on quantities " - % func.__name__ - ), - ) - - -@pytest.mark.codegen_independent -def test_numpy_functions_dimensionless(): - """ - Test that numpy functions that should work on dimensionless quantities only - work dimensionless arrays and return the correct result. - """ - unitless_values = [3, np.array([-4, 3, -1, 2]), np.ones((3, 3))] - unit_values = [3 * mV, np.array([-4, 3, -1, 2]) * mV, np.ones((3, 3)) * mV] - with warnings.catch_warnings(): - # ignore division by 0 warnings - warnings.simplefilter("ignore", RuntimeWarning) - for value in unitless_values: - for ufunc in UFUNCS_DIMENSIONLESS: - result_unitless = eval(f"np.{ufunc}(value)") - result_array = eval(f"np.{ufunc}(np.array(value))") - assert isinstance( - result_unitless, (np.ndarray, np.number) - ) and not isinstance(result_unitless, Quantity) - assert_equal(result_unitless, result_array) - for ufunc in UFUNCS_DIMENSIONLESS_TWOARGS: - result_unitless = eval(f"np.{ufunc}(value, value)") - result_array = eval(f"np.{ufunc}(np.array(value), np.array(value))") - assert isinstance( - result_unitless, (np.ndarray, np.number) - ) and not isinstance(result_unitless, Quantity) - assert_equal(result_unitless, result_array) - - for value, unitless_value in zip(unit_values, unitless_values): - for ufunc in UFUNCS_DIMENSIONLESS: - with pytest.raises(DimensionMismatchError): - eval(f"np.{ufunc}(value)", globals(), {"value": value}) - for ufunc in UFUNCS_DIMENSIONLESS_TWOARGS: - with pytest.raises(DimensionMismatchError): - eval( - f"np.{ufunc}(value1, value2)", - globals(), - {"value1": value, "value2": unitless_value}, - ) - with pytest.raises(DimensionMismatchError): - eval( - f"np.{ufunc}(value2, value1)", - globals(), - {"value1": value, "value2": unitless_value}, - ) - with pytest.raises(DimensionMismatchError): - eval(f"np.{ufunc}(value, value)", globals(), {"value": value}) - - -@pytest.mark.codegen_independent -def test_numpy_functions_change_dimensions(): - """ - Test some numpy functions that change the dimensions of the quantity. - """ - unit_values = [np.array([1, 2]) * mV, np.ones((3, 3)) * 2 * mV] - for value in unit_values: - assert_quantity(np.var(value), np.var(np.array(value)), volt**2) - assert_quantity(np.square(value), np.square(np.array(value)), volt**2) - assert_quantity(np.sqrt(value), np.sqrt(np.array(value)), volt**0.5) - assert_quantity( - np.reciprocal(value), np.reciprocal(np.array(value)), 1.0 / volt - ) - - -@pytest.mark.codegen_independent -def test_numpy_functions_matmul(): - """ - Check support for matmul and the ``@`` operator. - """ - no_units_eye = np.eye(3) - with_units_eye = no_units_eye * Mohm - matrix_no_units = np.arange(9).reshape((3, 3)) - matrix_units = matrix_no_units * nA - - # First operand with units - assert_allclose(no_units_eye @ matrix_units, matrix_units) - assert have_same_dimensions(no_units_eye @ matrix_units, matrix_units) - assert_allclose(np.matmul(no_units_eye, matrix_units), matrix_units) - assert have_same_dimensions(np.matmul(no_units_eye, matrix_units), matrix_units) - - # Second operand with units - assert_allclose(with_units_eye @ matrix_no_units, matrix_no_units * Mohm) - assert have_same_dimensions( - with_units_eye @ matrix_no_units, matrix_no_units * Mohm - ) - assert_allclose(np.matmul(with_units_eye, matrix_no_units), matrix_no_units * Mohm) - assert have_same_dimensions( - np.matmul(with_units_eye, matrix_no_units), matrix_no_units * Mohm - ) - - # Both operands with units - assert_allclose( - with_units_eye @ matrix_units, no_units_eye @ matrix_no_units * nA * Mohm - ) - assert have_same_dimensions(with_units_eye @ matrix_units, nA * Mohm) - assert_allclose( - np.matmul(with_units_eye, matrix_units), - np.matmul(no_units_eye, matrix_no_units) * nA * Mohm, - ) - assert have_same_dimensions(np.matmul(with_units_eye, matrix_units), nA * Mohm) - - -@pytest.mark.codegen_independent -def test_numpy_functions_typeerror(): - """ - Assures that certain numpy functions raise a TypeError when called on - quantities. - """ - unitless_values = [ - 3 * mV / mV, - np.array([1, 2]) * mV / mV, - np.ones((3, 3)) * mV / mV, - ] - unit_values = [3 * mV, np.array([1, 2]) * mV, np.ones((3, 3)) * mV] - for value in unitless_values + unit_values: - for ufunc in UFUNCS_INTEGERS: - if ufunc == "invert": - # only takes one argument - with pytest.raises(TypeError): - eval(f"np.{ufunc}(value)", globals(), {"value": value}) - else: - with pytest.raises(TypeError): - eval(f"np.{ufunc}(value, value)", globals(), {"value": value}) - - -@pytest.mark.codegen_independent -def test_numpy_functions_logical(): - """ - Assure that logical numpy functions work on all quantities and return - unitless boolean arrays. - """ - unit_values1 = [3 * mV, np.array([1, 2]) * mV, np.ones((3, 3)) * mV] - unit_values2 = [3 * second, np.array([1, 2]) * second, np.ones((3, 3)) * second] - for ufunc in UFUNCS_LOGICAL: - for value1, value2 in zip(unit_values1, unit_values2): - try: - # one argument - result_units = eval(f"np.{ufunc}(value1)") - result_array = eval(f"np.{ufunc}(np.array(value1))") - except (ValueError, TypeError): - # two arguments - result_units = eval(f"np.{ufunc}(value1, value2)") - result_array = eval(f"np.{ufunc}(np.array(value1), np.array(value2))") - assert not isinstance(result_units, Quantity) - assert_equal(result_units, result_array) - - -@pytest.mark.codegen_independent -def test_arange_linspace(): - # For dimensionless values, the unit-safe functions should give the same results - assert_equal(brian2.arange(5), np.arange(5)) - assert_equal(brian2.arange(1, 5), np.arange(1, 5)) - assert_equal(brian2.arange(10, step=2), np.arange(10, step=2)) - assert_equal(brian2.arange(0, 5, 0.5), np.arange(0, 5, 0.5)) - assert_equal(brian2.linspace(0, 1), np.linspace(0, 1)) - assert_equal(brian2.linspace(0, 1, 10), np.linspace(0, 1, 10)) - - # Make sure units are checked - with pytest.raises(DimensionMismatchError): - brian2.arange(1 * mV, 5) - with pytest.raises(DimensionMismatchError): - brian2.arange(1 * mV, 5 * mV) - with pytest.raises(DimensionMismatchError): - brian2.arange(1, 5 * mV) - with pytest.raises(DimensionMismatchError): - brian2.arange(1 * mV, 5 * ms) - with pytest.raises(DimensionMismatchError): - brian2.arange(1 * mV, 5 * mV, step=1 * ms) - with pytest.raises(DimensionMismatchError): - brian2.arange(1 * ms, 5 * mV) - - # Check correct functioning with units - assert_quantity( - brian2.arange(5 * mV, step=1 * mV), float(mV) * np.arange(5, step=1), mV - ) - assert_quantity( - brian2.arange(1 * mV, 5 * mV, 1 * mV), float(mV) * np.arange(1, 5, 1), mV - ) - assert_quantity(brian2.linspace(1 * mV, 2 * mV), float(mV) * np.linspace(1, 2), mV) - - # Check errors for arange with incorrect numbers of arguments/duplicate arguments - with pytest.raises(TypeError): - brian2.arange() - with pytest.raises(TypeError): - brian2.arange(0, 5, 1, 0) - with pytest.raises(TypeError): - brian2.arange(0, stop=1) - with pytest.raises(TypeError): - brian2.arange(0, 5, stop=1) - with pytest.raises(TypeError): - brian2.arange(0, 5, start=1) - with pytest.raises(TypeError): - brian2.arange(0, 5, 1, start=1) - with pytest.raises(TypeError): - brian2.arange(0, 5, 1, stop=2) - with pytest.raises(TypeError): - brian2.arange(0, 5, 1, step=2) - - -@pytest.mark.codegen_independent -def test_list(): - """ - Test converting to and from a list. - """ - values = [3 * mV, np.array([1, 2]) * mV, np.arange(12).reshape(4, 3) * mV] - for value in values: - l = value.tolist() - from_list = Quantity(l) - assert have_same_dimensions(from_list, value) - assert_equal(from_list, value) - - -@pytest.mark.codegen_independent -def test_check_units(): - """ - Test the check_units decorator - """ - - @check_units(v=volt) - def a_function(v, x): - """ - v has to have units of volt, x can have any (or no) unit. - """ - pass - - # Try correct units - a_function(3 * mV, 5 * second) - a_function(5 * volt, "something") - a_function([1, 2, 3] * volt, None) - # lists that can be converted should also work - a_function([1 * volt, 2 * volt, 3 * volt], None) - # Strings and None are also allowed to pass - a_function("a string", None) - a_function(None, None) - - # Try incorrect units - with pytest.raises(DimensionMismatchError): - a_function(5 * second, None) - with pytest.raises(DimensionMismatchError): - a_function(5, None) - with pytest.raises(TypeError): - a_function(object(), None) - with pytest.raises(TypeError): - a_function([1, 2 * volt, 3], None) - - @check_units(result=second) - def b_function(return_second): - """ - Return a value in seconds if return_second is True, otherwise return - a value in volt. - """ - if return_second: - return 5 * second - else: - return 3 * volt - - # Should work (returns second) - b_function(True) - # Should fail (returns volt) - with pytest.raises(DimensionMismatchError): - b_function(False) - - @check_units(a=bool, b=1, result=bool) - def c_function(a, b): - if a: - return b > 0 - else: - return b - - assert c_function(True, 1) - assert not c_function(True, -1) - with pytest.raises(TypeError): - c_function(1, 1) - with pytest.raises(TypeError): - c_function(1 * mV, 1) - with pytest.raises(TypeError): - c_function(False, 1) - - -@pytest.mark.codegen_independent -def test_get_unit(): - """ - Test get_unit - """ - values = [ - (volt.dim, volt), - (mV.dim, volt), - ((amp / metre**2).dim, amp / metre**2), - ] - for dim, expected_unit in values: - unit = get_unit(dim) - assert isinstance(unit, Unit) - assert unit == expected_unit - assert float(unit) == 1.0 - - -@pytest.mark.codegen_independent -def test_get_best_unit(): - # get_best_unit should not check all values for long arrays, since it is - # a function used for display purposes only. Instead, only the first and - # last few values should matter (see github issue #966) - long_ar = np.ones(10000) * siemens - long_ar[:10] = 1 * nS - long_ar[-10:] = 2 * nS - values = [ - (np.arange(10) * mV, mV), - ([0.001, 0.002, 0.003] * second, ms), - (long_ar, nS), - ] - for ar, expected_unit in values: - assert ar.get_best_unit() is expected_unit - assert str(expected_unit) in ar.in_best_unit() - - -@pytest.mark.codegen_independent -def test_switching_off_unit_checks(): - """ - Check switching off unit checks (used for external functions). - """ - import brian2.units.fundamentalunits as fundamentalunits - - x = 3 * second - y = 5 * volt - with pytest.raises(DimensionMismatchError): - x + y - fundamentalunits.unit_checking = False - # Now it should work - assert np.asarray(x + y) == np.array(8) - assert have_same_dimensions(x, y) - assert x.has_same_dimensions(y) - fundamentalunits.unit_checking = True - - -@pytest.mark.codegen_independent -def test_fail_for_dimension_mismatch(): - """ - Test the fail_for_dimension_mismatch function. - """ - # examples that should not raise an error - dim1, dim2 = fail_for_dimension_mismatch(3) - assert dim1 is DIMENSIONLESS - assert dim2 is DIMENSIONLESS - dim1, dim2 = fail_for_dimension_mismatch(3 * volt / volt) - assert dim1 is DIMENSIONLESS - assert dim2 is DIMENSIONLESS - dim1, dim2 = fail_for_dimension_mismatch(3 * volt / volt, 7) - assert dim1 is DIMENSIONLESS - assert dim2 is DIMENSIONLESS - dim1, dim2 = fail_for_dimension_mismatch(3 * volt, 5 * volt) - assert dim1 is volt.dim - assert dim2 is volt.dim - - # examples that should raise an error - with pytest.raises(DimensionMismatchError): - fail_for_dimension_mismatch(6 * volt) - with pytest.raises(DimensionMismatchError): - fail_for_dimension_mismatch(6 * volt, 5 * second) - - -@pytest.mark.codegen_independent -def test_deepcopy(): - d = {"x": 1 * second} - from copy import deepcopy - - d_copy = deepcopy(d) - assert d_copy["x"] == 1 * second - d_copy["x"] += 1 * second - assert d_copy["x"] == 2 * second - assert d["x"] == 1 * second - - -@pytest.mark.codegen_independent -def test_inplace_on_scalars(): - # We want "copy semantics" for in-place operations on scalar quantities - # in the same way as for Python scalars - for scalar in [3 * mV, 3 * mV / mV]: - scalar_reference = scalar - scalar_copy = Quantity(scalar, copy=True) - scalar += scalar_copy - assert_equal(scalar_copy, scalar_reference) - scalar *= 1.5 - assert_equal(scalar_copy, scalar_reference) - scalar /= 2 - assert_equal(scalar_copy, scalar_reference) - - # also check that it worked correctly for the scalar itself - assert_allclose(scalar, (scalar_copy + scalar_copy) * 1.5 / 2) - - # For arrays, it should use reference semantics - for vector in [[3] * mV, [3] * mV / mV]: - vector_reference = vector - vector_copy = Quantity(vector, copy=True) - vector += vector_copy - assert_equal(vector, vector_reference) - vector *= 1.5 - assert_equal(vector, vector_reference) - vector /= 2 - assert_equal(vector, vector_reference) - - # also check that it worked correctly for the vector itself - assert_allclose(vector, (vector_copy + vector_copy) * 1.5 / 2) - - -def test_units_vs_quantities(): - # Unit objects should stay Unit objects under certain operations - # (important e.g. in the unit definition of Equations, where only units but - # not quantities are allowed) - assert isinstance(meter**2, Unit) - assert isinstance(meter**-1, Unit) - assert isinstance(meter**0.5, Unit) - assert isinstance(meter / second, Unit) - assert isinstance(amp / meter**2, Unit) - assert isinstance(1 / meter, Unit) - assert isinstance(1.0 / meter, Unit) - - # Using the unconventional type(x) == y since we want to test that - # e.g. meter**2 stays a Unit and does not become a Quantity however Unit - # inherits from Quantity and therefore both would pass the isinstance test - assert type(2 / meter) == Quantity - assert type(2 * meter) == Quantity - assert type(meter + meter) == Quantity - assert type(meter - meter) == Quantity - - -@pytest.mark.codegen_independent -def test_all_units_list(): - from brian2.units.allunits import all_units - - assert meter in all_units - assert volt in all_units - assert cm in all_units - assert Hz in all_units - assert all(isinstance(u, Unit) for u in all_units) - - -@pytest.mark.codegen_independent -def test_constants(): - import brian2.units.constants as constants - - # Check that the expected names exist and have the correct dimensions - assert constants.avogadro_constant.dim == (1 / mole).dim - assert constants.boltzmann_constant.dim == (joule / kelvin).dim - assert constants.electric_constant.dim == (farad / meter).dim - assert constants.electron_mass.dim == kilogram.dim - assert constants.elementary_charge.dim == coulomb.dim - assert constants.faraday_constant.dim == (coulomb / mole).dim - assert constants.gas_constant.dim == (joule / mole / kelvin).dim - assert constants.magnetic_constant.dim == (newton / amp2).dim - assert constants.molar_mass_constant.dim == (kilogram / mole).dim - assert constants.zero_celsius.dim == kelvin.dim - - # Check the consistency between a few constants - assert_allclose( - constants.gas_constant, - constants.avogadro_constant * constants.boltzmann_constant, - ) - assert_allclose( - constants.faraday_constant, - constants.avogadro_constant * constants.elementary_charge, - ) - - -if __name__ == "__main__": - test_construction() - test_get_dimensions() - test_display() - test_scale() - test_power() - test_pickling() - test_str_repr() - test_slicing() - test_setting() - test_multiplication_division() - test_addition_subtraction() - test_unary_operations() - test_binary_operations() - test_inplace_operations() - test_unit_discarding_functions() - test_unitsafe_functions() - test_special_case_numpy_functions() - test_numpy_functions_same_dimensions() - test_numpy_functions_indices() - test_numpy_functions_dimensionless() - test_numpy_functions_change_dimensions() - test_numpy_functions_typeerror() - test_numpy_functions_logical() - test_arange_linspace() - test_list() - test_check_units() - test_get_unit() - test_get_best_unit() - test_switching_off_unit_checks() - test_fail_for_dimension_mismatch() - test_deepcopy() - test_inplace_on_scalars() - test_units_vs_quantities() - test_all_units_list() - test_constants() diff --git a/brian2/tests/test_utils.py b/brian2/tests/test_utils.py deleted file mode 100644 index a3fbb0c33..000000000 --- a/brian2/tests/test_utils.py +++ /dev/null @@ -1,39 +0,0 @@ -import builtins - -import pytest - -from brian2.utils.environment import running_from_ipython -from brian2.utils.stringtools import SpellChecker - - -@pytest.mark.codegen_independent -def test_environment(): - """ - Test information about the environment we are running under. - """ - if hasattr(builtins, "__IPYTHON__"): - testing_under_ipython = True - del builtins.__IPYTHON__ - else: - testing_under_ipython = False - - assert not running_from_ipython() - builtins.__IPYTHON__ = True - assert running_from_ipython() - - if not testing_under_ipython: - del builtins.__IPYTHON__ - - -@pytest.mark.codegen_independent -def test_spell_check(): - checker = SpellChecker(["vm", "alpha", "beta"]) - assert checker.suggest("Vm") == {"vm"} - assert checker.suggest("alphas") == {"alpha"} - assert checker.suggest("bta") == {"beta"} - assert checker.suggest("gamma") == set() - - -if __name__ == "__main__": - test_environment() - test_spell_check() diff --git a/brian2/tests/test_variables.py b/brian2/tests/test_variables.py deleted file mode 100644 index 142e27c4d..000000000 --- a/brian2/tests/test_variables.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Some basic tests for the `Variable` system -""" - -from collections import namedtuple - -import numpy as np -import pytest - -from brian2.core.preferences import prefs -from brian2.core.variables import * -from brian2.units.allunits import second -from brian2.units.fundamentalunits import Unit - - -@pytest.mark.codegen_independent -def test_construction_errors(): - # Boolean variable that isn't dimensionless - with pytest.raises(ValueError): - Variable(name="name", dimensions=second.dim, dtype=bool) - - # Dynamic array variable that is constant but not constant in size - with pytest.raises(ValueError): - DynamicArrayVariable( - name="name", - owner=None, - size=0, - device=None, - constant=True, - needs_reference_update=True, - ) - - -@pytest.mark.codegen_independent -def test_str_repr(): - # Basic test that the str/repr methods work - FakeGroup = namedtuple("G", ["name"]) - group = FakeGroup(name="groupname") - variables = [ - Variable(name="name", dimensions=second.dim), - Constant(name="name", dimensions=second.dim, value=1.0), - AuxiliaryVariable(name="name", dimensions=second.dim), - ArrayVariable( - name="name", dimensions=second.dim, owner=None, size=10, device=None - ), - DynamicArrayVariable( - name="name", dimensions=second.dim, owner=None, size=0, device=None - ), - Subexpression( - name="sub", dimensions=second.dim, expr="a+b", owner=group, device=None - ), - ] - for var in variables: - assert len(str(var)) - # The repr value should contain the name of the class - assert len(repr(var)) and var.__class__.__name__ in repr(var) - - -@pytest.mark.codegen_independent -def test_dtype_str(): - FakeGroup = namedtuple("G", ["name"]) - group = FakeGroup(name="groupname") - for d in ["int32", "int64", "float32", "float64", "bool", "int", "float"]: - nd = np.dtype(d) - for var in [ - Constant(name="name", value=np.zeros(1, dtype=nd)[0]), - AuxiliaryVariable(name="name", dtype=nd), - ArrayVariable(name="name", owner=None, size=10, device=None, dtype=nd), - DynamicArrayVariable( - name="name", owner=None, dtype=nd, size=0, device=None - ), - Subexpression(name="sub", expr="a+b", owner=group, device=None, dtype=nd), - ]: - assert var.dtype_str.startswith(d) - - -if __name__ == "__main__": - test_construction_errors() - test_str_repr() - test_dtype_str()