diff --git a/models/eprop_synapse.h b/models/eprop_synapse.h
index 5509019296..b072fcb222 100644
--- a/models/eprop_synapse.h
+++ b/models/eprop_synapse.h
@@ -481,7 +481,7 @@ inline void
eprop_synapse< targetidentifierT >::delete_optimizer()
{
delete optimizer_;
- // do not set to nullptr to allow detection of double deletion
+ optimizer_ = nullptr;
}
template < typename targetidentifierT >
diff --git a/models/eprop_synapse_bsshslm_2020.h b/models/eprop_synapse_bsshslm_2020.h
index 3ac9144827..7a655f8f95 100644
--- a/models/eprop_synapse_bsshslm_2020.h
+++ b/models/eprop_synapse_bsshslm_2020.h
@@ -511,7 +511,7 @@ inline void
eprop_synapse_bsshslm_2020< targetidentifierT >::delete_optimizer()
{
delete optimizer_;
- // do not set to nullptr to allow detection of double deletion
+ optimizer_ = nullptr;
}
template < typename targetidentifierT >
diff --git a/nestkernel/connection_manager.cpp b/nestkernel/connection_manager.cpp
index 0924fb73e2..4eac92411d 100644
--- a/nestkernel/connection_manager.cpp
+++ b/nestkernel/connection_manager.cpp
@@ -1131,6 +1131,9 @@ nest::ConnectionManager::get_connections( const DictionaryDatum& params )
// as this may involve sorting connections by source node IDs.
if ( connections_have_changed() )
{
+ // TODO fix-3489: Note entirely sure if we need the next two calls here,
+ // but ran into trouble when I took both out, I think
+
// We need to update min_delay because it is used by check_wfr_use() below
// to set secondary event data size.
update_delay_extrema_();
@@ -1139,12 +1142,6 @@ nest::ConnectionManager::get_connections( const DictionaryDatum& params )
// needs to be called before update_connection_infrastructure since
// it resizes coefficient arrays for secondary events
kernel().node_manager.check_wfr_use();
-
-#pragma omp parallel
- {
- const size_t tid = kernel().vp_manager.get_thread_id();
- kernel().simulation_manager.update_connection_infrastructure( tid );
- }
}
// We check, whether a synapse model is given. If not, we will iterate all.
diff --git a/nestkernel/simulation_manager.cpp b/nestkernel/simulation_manager.cpp
index 9f23d7f89b..9bfeaf0178 100644
--- a/nestkernel/simulation_manager.cpp
+++ b/nestkernel/simulation_manager.cpp
@@ -544,7 +544,13 @@ nest::SimulationManager::prepare()
// it resizes coefficient arrays for secondary events
kernel().node_manager.check_wfr_use();
- if ( kernel().node_manager.have_nodes_changed() or kernel().connection_manager.connections_have_changed() )
+ // Only update connection infrastructure if we have not simulated yet. Otherwise,
+ // there might still be spikes in the event delivery data structures destined for
+ // connections that have been deleted. See #3489.
+ // We must do this here once before the first simulation round because WFR otherwise
+ // cannot do its pre-step in the first time slice.
+ if ( not simulated_
+ and ( kernel().node_manager.have_nodes_changed() or kernel().connection_manager.connections_have_changed() ) )
{
#pragma omp parallel
{
@@ -1028,14 +1034,19 @@ nest::SimulationManager::update_()
node->decay_synaptic_elements_vacant();
}
- // after structural plasticity has created and deleted
- // connections, update the connection infrastructure; implies
- // complete removal of presynaptic part and reconstruction
- // from postsynaptic data
- update_connection_infrastructure( tid );
-
} // of structural plasticity
+ // If either the user or structual plasticity has modified the basis of connectivity,
+ // we need to re-build the connection infrastructure. At this point, all spikes from
+ // the previous time slice have been delivered, so we do not risk transmission via
+ // deleted connections. We don't need to do this in slice_ == 0, because there
+ // prepare() has taken care of this.
+ if ( slice_ > 0 and from_step_ == 0
+ and ( kernel().node_manager.have_nodes_changed() or kernel().connection_manager.connections_have_changed() ) )
+ {
+ update_connection_infrastructure( tid );
+ }
+
sw_update_.start();
const SparseNodeArray& thread_local_nodes = kernel().node_manager.get_local_nodes( tid );
diff --git a/nestkernel/sp_manager.cpp b/nestkernel/sp_manager.cpp
index 4705b7de64..3a1c358097 100644
--- a/nestkernel/sp_manager.cpp
+++ b/nestkernel/sp_manager.cpp
@@ -242,15 +242,6 @@ SPManager::disconnect( NodeCollectionPTR sources,
DictionaryDatum& conn_spec,
DictionaryDatum& syn_spec )
{
- if ( kernel().connection_manager.connections_have_changed() )
- {
-#pragma omp parallel
- {
- const size_t tid = kernel().vp_manager.get_thread_id();
- kernel().simulation_manager.update_connection_infrastructure( tid );
- }
- }
-
BipartiteConnBuilder* cb = nullptr;
conn_spec->clear_access_flags();
syn_spec->clear_access_flags();
diff --git a/testsuite/mpitests/test_connect_array_all_to_all_mpi.sli b/testsuite/mpitests/test_connect_array_all_to_all_mpi.sli
index 376d5d7ca7..53565c238d 100644
--- a/testsuite/mpitests/test_connect_array_all_to_all_mpi.sli
+++ b/testsuite/mpitests/test_connect_array_all_to_all_mpi.sli
@@ -37,6 +37,10 @@
SeeAlso: Connect
*/
+% NOTE: Test should be re-designed because the current design can lead to different numbers of GetConnection calls
+% on different Ranks if N is not divisible by all process numbers.
+
+
(unittest) run
/unittest using
@@ -45,7 +49,7 @@ skip_if_not_threaded
[2 3] % check for 2, 3 processes
{
-/N 10 def % number of neurons in each population
+/N 12 def % number of neurons in each population
ResetKernel
@@ -72,6 +76,7 @@ ResetKernel
sources targets conn_dict syn_dict Connect % connects source to target
+
/pass true def % error control flag
/n_loc 0 def % used to check n. of neurons belonging to this MPI process
targets % loop through all target neurons
diff --git a/testsuite/mpitests/test_connect_array_fixed_indegree_mpi.sli b/testsuite/mpitests/test_connect_array_fixed_indegree_mpi.sli
index bd1c6f1998..709c89c816 100644
--- a/testsuite/mpitests/test_connect_array_fixed_indegree_mpi.sli
+++ b/testsuite/mpitests/test_connect_array_fixed_indegree_mpi.sli
@@ -37,6 +37,10 @@
SeeAlso: Connect
*/
+
+% NOTE: Test should be re-designed because the current design can lead to different numbers of GetConnection calls
+% on different Ranks if N is not divisible by all process numbers.
+
(unittest) run
/unittest using
@@ -45,7 +49,7 @@ skip_if_not_threaded
[2 3] % check for 2, 3 processes
{
-/N 20 def % number of neurons in each population
+/N 24 def % number of neurons in each population
/K 5 def % number of connections per neuron
ResetKernel
diff --git a/testsuite/mpitests/test_connect_array_fixed_outdegree_mpi.sli b/testsuite/mpitests/test_connect_array_fixed_outdegree_mpi.sli
index 2f75219a38..2e4ae74005 100644
--- a/testsuite/mpitests/test_connect_array_fixed_outdegree_mpi.sli
+++ b/testsuite/mpitests/test_connect_array_fixed_outdegree_mpi.sli
@@ -37,6 +37,10 @@
SeeAlso: Connect
*/
+% NOTE: Test should be re-designed because the current design can lead to different numbers of GetConnection calls
+% on different Ranks if N is not divisible by all process numbers.
+
+
(unittest) run
/unittest using
@@ -45,7 +49,7 @@ skip_if_not_threaded
[1 2 3] % check for 1, 2, 3 processes
{
- /N 20 def % number of neurons in each population
+ /N 24 def % number of neurons in each population
/K 5 def % number of connections per neuron
ResetKernel
diff --git a/testsuite/pytests/sli2py_regressions/test_issue_2629.py b/testsuite/pytests/sli2py_regressions/test_issue_2629.py
index dfc14e6b2d..6e968602d5 100644
--- a/testsuite/pytests/sli2py_regressions/test_issue_2629.py
+++ b/testsuite/pytests/sli2py_regressions/test_issue_2629.py
@@ -65,13 +65,10 @@ def test_dump_layer_connections_target_1(tmp_path, network):
fname_1 = tmp_path / "conns_1.txt"
nest.DumpLayerConnections(src_layer, tgt_layer_1, "static_synapse", fname_1)
- expected_dump_1 = [
- "1 3 1 1 0 0",
- "1 4 1 1 0.5 0",
- "2 3 1 1 -0.5 0",
- "2 4 1 1 0 0",
- ]
- actual_dump_1 = fname_1.read_text().splitlines()
+
+ # Ordering of lines is irrelevant, therefore compare sorted lists
+ expected_dump_1 = sorted(["1 3 1 1 0 0", "1 4 1 1 0.5 0", "2 3 1 1 -0.5 0", "2 4 1 1 0 0"])
+ actual_dump_1 = sorted(fname_1.read_text().splitlines())
assert actual_dump_1 == expected_dump_1
@@ -82,9 +79,8 @@ def test_dump_layer_connections_target_2(tmp_path, network):
fname_2 = tmp_path / "conns_2.txt"
nest.DumpLayerConnections(src_layer, tgt_layer_2, "static_synapse", fname_2)
- expected_dump_2 = [
- "1 5 1 1 0 0",
- "2 6 1 1 0 0",
- ]
- actual_dump_2 = fname_2.read_text().splitlines()
+
+ # Ordering of lines is irrelevant, therefore compare sorted lists
+ expected_dump_2 = sorted(["1 5 1 1 0 0", "2 6 1 1 0 0"])
+ actual_dump_2 = sorted(fname_2.read_text().splitlines())
assert actual_dump_2 == expected_dump_2
diff --git a/testsuite/pytests/test_connect_after_simulate.py b/testsuite/pytests/test_connect_after_simulate.py
index 99a9685373..b7e47b6aa0 100644
--- a/testsuite/pytests/test_connect_after_simulate.py
+++ b/testsuite/pytests/test_connect_after_simulate.py
@@ -29,6 +29,7 @@
import nest
import pytest
+from testutil import synapse_counts
@pytest.fixture(autouse=True)
@@ -48,11 +49,15 @@ def test_connections_before_after_simulate(use_compressed_spikes):
neurons = nest.Create("iaf_psc_alpha", 10)
nest.Connect(neurons, neurons, conn_spec={"rule": "fixed_indegree", "indegree": 2})
- connections_before = nest.GetConnections()
+ # SynapseCollections are considered equal if they contain the same (source, target, target_thread, syn_id)
+ # tuples, irrespective of order. We do not consider port number, because it can change due to compression.
+ # Therefore, we need to compare explicitly. Since Simulate may invalidate SynapseCollections, we need to
+ # extract data right before calling simulate.
+ connections_before = synapse_counts(nest.GetConnections())
nest.Simulate(1.0)
- connections_after = nest.GetConnections()
+ connections_after = synapse_counts(nest.GetConnections())
assert connections_before == connections_after
diff --git a/testsuite/pytests/test_get_connections.py b/testsuite/pytests/test_get_connections.py
index 06e67138e2..637940f430 100644
--- a/testsuite/pytests/test_get_connections.py
+++ b/testsuite/pytests/test_get_connections.py
@@ -31,6 +31,7 @@
import pandas as pd
import pandas.testing as pdtest
import pytest
+from testutil import expected_synapse_counts, synapse_counts
@pytest.fixture(autouse=True)
@@ -60,15 +61,10 @@ def test_get_connections_with_node_collection_step():
nodes = nest.Create("iaf_psc_alpha", 3)
nest.Connect(nodes, nodes)
- conns = nest.GetConnections(nodes[::2])
- actual_sources = conns.get("source")
- actual_targets = conns.get("target")
-
- expected_sources = [1, 1, 1, 3, 3, 3]
- expected_targets = [1, 2, 3, 1, 2, 3]
+ actual = synapse_counts(nest.GetConnections(nodes[::2]), criteria=["source", "target"])
+ expected = expected_synapse_counts(sources=[1, 1, 1, 3, 3, 3], targets=[1, 2, 3, 1, 2, 3])
- assert actual_sources == expected_sources
- assert actual_targets == expected_targets
+ assert actual == expected
def test_get_connections_with_sliced_node_collection():
@@ -77,11 +73,10 @@ def test_get_connections_with_sliced_node_collection():
nodes = nest.Create("iaf_psc_alpha", 11)
nest.Connect(nodes, nodes)
- conns = nest.GetConnections(nodes[1:9:3])
- actual_sources = conns.get("source")
+ actual = synapse_counts(nest.GetConnections(nodes[1:9:3]), criteria=["source"])
+ expected = expected_synapse_counts(sources=[2] * 11 + [5] * 11 + [8] * 11)
- expected_sources = [2] * 11 + [5] * 11 + [8] * 11
- assert actual_sources == expected_sources
+ assert actual == expected
def test_get_connections_with_sliced_node_collection_2():
@@ -91,11 +86,10 @@ def test_get_connections_with_sliced_node_collection_2():
nest.Connect(nodes, nodes)
# ([ 2 3 4 ] + [ 8 9 10 11 ])[::3] -> [2 8 11]
- conns = nest.GetConnections((nodes[1:4] + nodes[7:])[::3])
- actual_sources = conns.get("source")
+ actual = synapse_counts(nest.GetConnections((nodes[1:4] + nodes[7:])[::3]), criteria=["source"])
+ expected = expected_synapse_counts(sources=[2] * 11 + [8] * 11 + [11] * 11)
- expected_sources = [2] * 11 + [8] * 11 + [11] * 11
- assert actual_sources == expected_sources
+ assert actual == expected
def test_get_connections_bad_source_raises():
@@ -111,11 +105,17 @@ def test_get_connections_bad_source_raises():
def test_get_connections_correct_table_with_node_collection_step():
"""
Test that ``GetConnections`` table from ``NodeCollection`` sliced in step match expectations.
+
+ This test is for internal testing also of the synapse sorting process. It therefore requires
+ that the connection infrastructure has been updated before connections are read out.
"""
nodes = nest.Create("iaf_psc_alpha", 3)
nest.Connect(nodes, nodes)
+ # Force synapse sorting
+ nest.Simulate(0.1)
+
conns = nest.GetConnections(nodes[::2])
actual_row = [
conns.get("source")[3],
@@ -134,11 +134,17 @@ def test_get_connections_correct_table_with_node_collection_step():
def test_get_connections_correct_table_with_node_collection_index():
"""
Test that ``GetConnections`` table from ``NodeCollection`` index match expectations.
+
+ This test is for internal testing also of the synapse sorting process. It therefore requires
+ that the connection infrastructure has been updated before connections are read out.
"""
nodes = nest.Create("iaf_psc_alpha", 3)
nest.Connect(nodes, nodes)
+ # Force synapse sorting
+ nest.Simulate(0.1)
+
actual_conns = pd.DataFrame(
nest.GetConnections(nodes[0]).get(["source", "target", "target_thread", "synapse_id", "port"])
)
diff --git a/testsuite/pytests/test_issue_3489.py b/testsuite/pytests/test_issue_3489.py
new file mode 100644
index 0000000000..e256d47289
--- /dev/null
+++ b/testsuite/pytests/test_issue_3489.py
@@ -0,0 +1,55 @@
+# -*- coding: utf-8 -*-
+#
+# test_issue_3489.py
+#
+# This file is part of NEST.
+#
+# Copyright (C) 2004 The NEST Initiative
+#
+# NEST is free software: you can redistribute it and/or modify
+# it under the terms of the GNU General Public License as published by
+# the Free Software Foundation, either version 2 of the License, or
+# (at your option) any later version.
+#
+# NEST is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU General Public License for more details.
+#
+# You should have received a copy of the GNU General Public License
+# along with NEST. If not, see .
+
+import nest
+import pytest
+
+
+@pytest.mark.parametrize("compressed", [True, False])
+@pytest.mark.parametrize("simtime", [10.9, 11, 11.1])
+def test_spike_delivered_before_connection_removed(compressed, simtime):
+ """
+ Test that spikes are properly transmitted before the connection infrastructure
+ is updated.
+
+ Simtime 10.9 means that Simulate() terminates before the test spike is delivered
+ and as we then step in 0.1 ms steps, we ensure that connection updates happen only
+ after spikes have been delivered at slice boundaries.
+ """
+
+ nest.ResetKernel()
+ nest.use_compressed_spikes = compressed
+
+ nodes_e = nest.Create("parrot_neuron", 3)
+ sg = nest.Create("spike_generator", params={"spike_times": [9.7]})
+
+ nest.Connect(sg, nodes_e[1]) # only neuron with GID 2 gets input
+ nest.Connect(nodes_e[:2], nodes_e[2]) # neurons with GIDs 1 and 2 connect to 3
+
+ # Neuron 2 spikes at 10.7, spike is stored, but not yet delivered
+ # If we simulated here until 11.1, the spike would be delivered here and all is fine
+ nest.Simulate(simtime)
+
+ nest.Disconnect(nodes_e[0], nodes_e[2])
+
+ while nest.biological_time < 11.1:
+ print("TIME:", nest.biological_time)
+ nest.Simulate(0.1) # This call will trigger delivery of the spike, but now the connection is gone -> segfault
diff --git a/testsuite/pytests/utilities/testutil.py b/testsuite/pytests/utilities/testutil.py
index 54a9e0dcdc..ec89438690 100644
--- a/testsuite/pytests/utilities/testutil.py
+++ b/testsuite/pytests/utilities/testutil.py
@@ -19,6 +19,7 @@
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see .
+import collections
import dataclasses
import sys
@@ -113,3 +114,34 @@ def default(d=attr):
def use_simulation(cls):
# If `mark` receives one argument that is a class, it decorates that arg.
return pytest.mark.simulation(cls, "")
+
+
+def synapse_counts(syn_collection, criteria=("source", "target", "target_thread", "synapse_id")):
+ """
+ Returns Counter object representing synapse collection.
+
+ This is useful for comparing whether two synapse collections contain the same connections
+ irrespective of ordering using source, target, target thread and synapse id as criteria.
+ By default, the "port" is ignored, since it is an implemenation detail and affected by
+ synapse sorting.
+ """
+
+ return collections.Counter(zip(*syn_collection.get(criteria).values()))
+
+
+def expected_synapse_counts(sources, targets=None, target_threads=None, syn_ids=None, ports=None):
+ """
+ From given lists of sources, targets and optionally target threads, synapse ids or ports
+ create a Counter object that can be compared to the object returned by synapse_counts.
+ """
+
+ n = len(sources)
+ data = [sources]
+ for c in [targets, target_threads, syn_ids, ports]:
+ if c is None:
+ continue
+
+ assert len(c) == n
+ data.append(c)
+
+ return collections.Counter(zip(*data))