Skip to content
This repository was archived by the owner on Mar 20, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .gitlab-ci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -49,6 +49,7 @@ spack_setup:
.gpu_node:
variables:
bb5_constraint: volta
bb5_ntasks: 16
.test_neuron:
extends: [.ctest]
stage: test_neuron
Expand Down
1 change: 1 addition & 0 deletions coreneuron/apps/main1.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -470,6 +470,7 @@ static void* load_dynamic_mpi(const std::string& libname) {
extern "C" void mk_mech_init(int argc, char** argv) {
// reset all parameters to their default values
corenrn_param.reset();

// read command line parameters and parameter config files
corenrn_param.parse(argc, argv);

Expand Down
11 changes: 11 additions & 0 deletions coreneuron/io/core2nrn_data_return.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -170,6 +170,14 @@ static void core2nrn_corepointer(int tid, NrnThreadMembList* tml) {
*/
static void core2nrn_tqueue(NrnThread&);

/** @brief Callback to clear NEURON thread queues.
In particular need to initialize bin queues to the current time before
transferring events.
*/
extern "C" {
void (*core2nrn_clear_queues_)(double t);
}

/** @brief All activated WATCH statements need activation on NEURON side.
*/
// vector in unpermuted Memb_list index order of vector of
Expand Down Expand Up @@ -200,6 +208,9 @@ void core2nrn_data_return() {
if (!nrn2core_type_return_) {
return;
}

(*core2nrn_clear_queues_)(nrn_threads[0]._t); // all threads at same time

for (int tid = 0; tid < nrn_nthread; ++tid) {
size_t n = 0;
double* data = nullptr;
Expand Down
2 changes: 2 additions & 0 deletions coreneuron/io/nrn2core_data_init.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,8 @@ void direct_mode_initialize() {
dt2thread(-1.);
nrn_thread_table_check();

clear_event_queue();

// Reproduce present NEURON WATCH activation
// Start from nothing active.
watch_activate_clear();
Expand Down
5 changes: 4 additions & 1 deletion coreneuron/io/nrn_setup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ void nrn_read_filesdat(int& ngrp, int*& grp, const char* filesdat) {
FILE* fp = fopen(filesdat, "r");

if (!fp) {
nrn_fatal_error("No input file with nrnthreads, exiting...");
nrn_fatal_error("No input file ( %s ) with nrnthreads, exiting...", filesdat);
}

char version[256];
Expand Down Expand Up @@ -710,6 +710,9 @@ void nrn_cleanup_ion_map() {

void nrn_cleanup() {
clear_event_queue(); // delete left-over TQItem
for (auto psi: gid2in) {
delete psi.second;
}
gid2in.clear();
gid2out.clear();

Expand Down
7 changes: 4 additions & 3 deletions coreneuron/mpi/lib/mpispike.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@ extern MPI_Comm nrnmpi_comm;

static int np;
static int* displs{nullptr};
static int* byteovfl; /* for the compressed transfer method */
static int* byteovfl{nullptr}; /* for the compressed transfer method */
static MPI_Datatype spike_type;

static void* emalloc(size_t size) {
Expand Down Expand Up @@ -175,7 +175,7 @@ The allgather sends the first part of the buf and the allgatherv buffer
sends any overflow.
*/
int nrnmpi_spike_exchange_compressed_impl(int localgid_size,
unsigned char* spfixin_ovfl,
unsigned char*& spfixin_ovfl,
int send_nspike,
int* nin,
int ovfl_capacity,
Expand All @@ -187,9 +187,10 @@ int nrnmpi_spike_exchange_compressed_impl(int localgid_size,
np = nrnmpi_numprocs_;
displs = (int*) emalloc(np * sizeof(int));
displs[0] = 0;
}
if (!byteovfl) {
byteovfl = (int*) emalloc(np * sizeof(int));
}

MPI_Allgather(
spikeout_fixed, ag_send_size, MPI_BYTE, spikein_fixed, ag_send_size, MPI_BYTE, nrnmpi_comm);
int novfl = 0;
Expand Down
7 changes: 7 additions & 0 deletions coreneuron/mpi/lib/nrnmpi.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,12 @@ static void nrn_fatal_error(const char* msg) {
}

nrnmpi_init_ret_t nrnmpi_init_impl(int* pargc, char*** pargv, bool is_quiet) {
// Execute at most once per launch. Avoid memory leak.
static bool executed = false;
if (executed) {
return {nrnmpi_numprocs_, nrnmpi_myid_};
}

nrnmpi_under_nrncontrol_ = true;

if (!nrnmpi_initialized_impl()) {
Expand Down Expand Up @@ -62,6 +68,7 @@ nrnmpi_init_ret_t nrnmpi_init_impl(int* pargc, char*** pargv, bool is_quiet) {
#endif
}

executed = true;
return {nrnmpi_numprocs_, nrnmpi_myid_};
}

Expand Down
2 changes: 1 addition & 1 deletion coreneuron/mpi/nrnmpidec.h
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ extern mpi_function<cnrn_make_integral_constant_t(nrnmpi_write_file_impl)> nrnmp
extern "C" int nrnmpi_spike_exchange_impl(int* nin, NRNMPI_Spike* spikeout, int icapacity, NRNMPI_Spike** spikein, int& ovfl, int nout, NRNMPI_Spikebuf* spbufout, NRNMPI_Spikebuf* spbufin);
extern mpi_function<cnrn_make_integral_constant_t(nrnmpi_spike_exchange_impl)>
nrnmpi_spike_exchange;
extern "C" int nrnmpi_spike_exchange_compressed_impl(int, unsigned char*, int, int*, int, unsigned char*, int, unsigned char*, int& ovfl);
extern "C" int nrnmpi_spike_exchange_compressed_impl(int, unsigned char*&, int, int*, int, unsigned char*, int, unsigned char*, int& ovfl);
extern mpi_function<cnrn_make_integral_constant_t(nrnmpi_spike_exchange_compressed_impl)>
nrnmpi_spike_exchange_compressed;
extern "C" int nrnmpi_int_allmax_impl(int i);
Expand Down
11 changes: 10 additions & 1 deletion coreneuron/network/multisend_setup.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -224,7 +224,7 @@ void TarList::alloc() {

// for two phase

static nrnran123_State* ranstate;
static nrnran123_State* ranstate{nullptr};

static void random_init(int i) {
if (!ranstate) {
Expand All @@ -236,6 +236,14 @@ static unsigned int get_random() {
return nrnran123_ipick(ranstate);
}

// Avoid warnings if the global index is changed on subsequent psolve.
static void random_delete() {
if (ranstate) {
nrnran123_deletestream(ranstate);
ranstate = nullptr;
}
}

static int iran(int i1, int i2) {
// discrete uniform random integer from i2 to i2 inclusive. Must
// work if i1 == i2
Expand Down Expand Up @@ -575,6 +583,7 @@ static std::vector<int> setup_target_lists(bool use_phase2) {
phase2organize(tl);
}
}
random_delete();
}

// For clarity, use the all2allv_int style of information flow
Expand Down
8 changes: 6 additions & 2 deletions coreneuron/network/netcvode.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,10 @@ void NetCvodeThreadData::interthread_send(double td, DiscreteEvent* db, NrnThrea
inter_thread_events_.emplace_back(InterThreadEvent{db, td});
}

void interthread_enqueue(NrnThread* nt) {
net_cvode_instance->p[nt->id].enqueue(net_cvode_instance, nt);
}

void NetCvodeThreadData::enqueue(NetCvode* nc, NrnThread* nt) {
std::lock_guard<OMP_Mutex> lock(mut);
for (const auto& ite: inter_thread_events_) {
Expand Down Expand Up @@ -229,14 +233,14 @@ void NetCvode::clear_events() {
d.unreffed_event_cnt_ = 0;
d.inter_thread_events_.clear();
d.tqe_->nshift_ = -1;
d.tqe_->shift_bin(nrn_threads->_t);
d.tqe_->shift_bin(nrn_threads->_t - 0.5 * nrn_threads->_dt);
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There is a similar statement in the following function I did not change. A specific test would have to be written to see if there could be an issue with file mode demonstrating the need for an equivalent change.

Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The corresponding change was made in NetCvode::init_events. In retrospect, it is likely that BinQ is not supported for --restore since there is no mention of shift_bin in the nrn_checkpoint files. (Analogous to shift_bin in nrn/src/nrniv/bbsavestate.cpp)

}
}

void NetCvode::init_events() {
for (int i = 0; i < nrn_nthread; ++i) {
p[i].tqe_->nshift_ = -1;
p[i].tqe_->shift_bin(nrn_threads->_t);
p[i].tqe_->shift_bin(nrn_threads->_t - 0.5 * nrn_threads->_dt);
}

for (int tid = 0; tid < nrn_nthread; ++tid) { // can be done in parallel
Expand Down
1 change: 1 addition & 0 deletions coreneuron/network/netcvode.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ class DiscreteEvent;
class NetCvode;

extern NetCvode* net_cvode_instance;
extern void interthread_enqueue(NrnThread*);

struct InterThreadEvent {
DiscreteEvent* de_;
Expand Down
13 changes: 12 additions & 1 deletion coreneuron/network/netpar.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -286,7 +286,10 @@ void nrn_spike_exchange_init() {
t_exchange_ = t;
dt1_ = rev_dt;
usable_mindelay_ = floor(mindelay_ * dt1_ + 1e-9) * dt;
assert(usable_mindelay_ >= dt && (usable_mindelay_ * dt1_) < 255);
if (usable_mindelay_ * dt1_ >= 255.) {
usable_mindelay_ = 255. / dt1_;
}
assert(usable_mindelay_ >= dt && (usable_mindelay_ * dt1_) <= 255.);
} else {
#if nrn_spikebuf_size > 0
if (spbufout) {
Expand Down Expand Up @@ -366,6 +369,7 @@ void nrn_spike_exchange(NrnThread* nt) {
ps->send(spikein[i].spiketime, net_cvode_instance, nt);
}
}
nrn_multithread_job(interthread_enqueue);
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure this change is needed. But it can't hurt.

wt1_ = nrn_wtime() - wt;
}

Expand Down Expand Up @@ -482,6 +486,12 @@ void nrn_spike_exchange_compressed(NrnThread* nt) {
}
}
}
// In case of multiple threads some above ps->send events put
// NetCon events into interthread buffers. Some of those may
// need to be delivered early enough that the interthread buffers
// need transfer to the thread event queues before the next dqueue_bin
// while loop in deliver_net_events. So enqueue now...
nrn_multithread_job(interthread_enqueue);
t_exchange_ = nrn_threads->_t;
wt1_ = nrn_wtime() - wt;
}
Expand Down Expand Up @@ -606,6 +616,7 @@ void BBS_netpar_solve(double tstop) {
}

nrn_timeout(timeout_);
nrn_multithread_job(interthread_enqueue);
ncs2nrn_integrate(tstop * (1. + 1e-11));
nrn_spike_exchange(nrn_threads);
nrn_timeout(0);
Expand Down