Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Support for using file transfer mode via coreneuron embedded module #927

Merged
merged 5 commits into from
Jan 18, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions share/lib/python/neuron/coreneuron.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
# Properties settable by user
enable = False # Use CoreNEURON when calling ParallelContext.psolve(tstop).
gpu = False # Activate GPU computation.
file_mode = False # Run via file transfer mode instead of in-memory transfer
cell_permute = 1 # 0 no permutation; 1 optimize node adjacency
# 2 optimize parent node adjacency (only for gpu = True)
warp_balance = 0 # Number of warps to balance. (0 no balance)
Expand Down Expand Up @@ -38,8 +39,12 @@ def nrncore_arg(tstop):
if not enable:
return arg

# note that this name is also used in C++ file nrncore_write.cpp
CORENRN_DATA_DIR = "corenrn_data"

# args derived from user properties
arg += ' --gpu' if gpu else ''
arg += ' --datpath %s' % CORENRN_DATA_DIR if file_mode else ''
arg += ' --tstop %g' % tstop
arg += (' --cell-permute %d' % cell_permute) if cell_permute > 0 else ''
arg += (' --nwarp %d' % warp_balance) if warp_balance > 0 else ''
Expand Down
50 changes: 43 additions & 7 deletions src/nrniv/nrncore_write.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,8 @@ extern size_t nrncore_netpar_bytes();
extern short* nrn_is_artificial_;

int (*nrnpy_nrncore_enable_value_p_)();
int (*nrnpy_nrncore_file_mode_value_p_)();

char* (*nrnpy_nrncore_arg_p_)(double tstop);

CellGroup* cellgroups_;
Expand All @@ -140,23 +142,37 @@ bool corenrn_direct;
static size_t part1();
static void part2(const char*);

/// dump neuron model to given directory path
size_t write_corenrn_model(const std::string& path) {


// accessible from ParallelContext.total_bytes()
size_t nrnbbcore_write() {
// if writing to disk then in-memory mode is false
corenrn_direct = false;

// make sure model is ready to transfer
model_ready();
const std::string& path = get_write_path();

size_t rankbytes = part1(); // can arrange to be just before part2
// directory to write model
create_dir_path(path);

// calculate size of the model
size_t rankbytes = part1();

// mechanism and global variables
write_memb_mech_types(get_filename(path, "bbcore_mech.dat").c_str());
write_globals(get_filename(path, "globals.dat").c_str());

// write main model data
part2(path.c_str());

return rankbytes;
}

// accessible from ParallelContext.total_bytes()
size_t nrnbbcore_write() {
const std::string& path = get_write_path();
return write_corenrn_model(path);
}

static size_t part1() {
size_t rankbytes = 0;
if (!bbcore_dparam_size) {
Expand Down Expand Up @@ -282,13 +298,29 @@ int nrncore_is_enabled() {
return 0;
}

/** Return value of neuron.coreneuron.file_mode flag */
int nrncore_is_file_mode() {
if (nrnpy_nrncore_file_mode_value_p_) {
int result = (*nrnpy_nrncore_file_mode_value_p_)();
return result;
}
return 0;
}

/** Run coreneuron with arg string from neuron.coreneuron.nrncore_arg(tstop)
* Return 0 on success
*/
int nrncore_psolve(double tstop) {
*/
int nrncore_psolve(double tstop, int file_mode) {
if (nrnpy_nrncore_arg_p_) {
char* arg = (*nrnpy_nrncore_arg_p_)(tstop);
if (arg) {
// if file mode is requested then write model to a directory
// note that CORENRN_DATA_DIR name is also used in module
// file coreneuron.py
if (file_mode) {
const char* CORENRN_DATA_DIR = "corenrn_data";
write_corenrn_model(CORENRN_DATA_DIR);
}
nrncore_run(arg);
// data return nt._t so copy to t
t = nrn_threads[0]._t;
Expand All @@ -309,6 +341,10 @@ int nrncore_is_enabled() {
return 0;
}

int nrncore_is_file_mode() {
return 0;
}

int nrncore_psolve(double tstop) {
return 0;
}
Expand Down
2 changes: 1 addition & 1 deletion src/nrniv/nrncore_write.h
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ extern void nrncore_netpar_cellgroups_helper(CellGroup*);

int nrncore_run(const char* arg);
int nrncore_is_enabled();
int nrncore_psolve(double tstop);
int nrncore_psolve(double tstop, int file_mode);

#if defined(__cplusplus)
}
Expand Down
26 changes: 16 additions & 10 deletions src/nrniv/nrncore_write/io/nrncore_io.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -26,20 +26,26 @@ extern void (*nrnthread_v_transfer_)(NrnThread*);
int chkpnt;
const char *bbcore_write_version = "1.4"; // Generalize *_gap.dat to allow transfer of any range variable

/// create directory with given path
void create_dir_path(const std::string& path) {
// only one rank needs to create directory
if (nrnmpi_myid == 0) {
if (!isDirExist(path)) {
if (!makePath(path)) {
hoc_execerror(path.c_str(), "directory did not exist and makePath for it failed");
}
}
}
// rest of the ranks should wait before continue simulation
#ifdef NRNMPI
nrnmpi_barrier();
#endif
}

std::string get_write_path(){
std::string path("."); // default path
if (ifarg(1)) {
path = hoc_gargstr(1);
if (nrnmpi_myid == 0) {
if (!isDirExist(path)) {
if (!makePath(path)) {
hoc_execerror(path.c_str(), "directory did not exist and makePath for it failed");
}
}
}
#ifdef NRNMPI
nrnmpi_barrier();
#endif
}
return path;
}
Expand Down
1 change: 1 addition & 0 deletions src/nrniv/nrncore_write/io/nrncore_io.h
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ class NrnThread;
union Datum;
class NrnMappingInfo;

void create_dir_path(const std::string& path);
std::string get_write_path();
std::string get_filename(const std::string &path, std::string file_name);

Expand Down
31 changes: 25 additions & 6 deletions src/nrnpython/nrnpy_hoc.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -2939,17 +2939,25 @@ static void sectionlist_helper_(void* sl, Object* args) {
}
}

/** value of neuron.coreneuron.enable as 0, 1 (-1 if error)
* TODO: seems like this could be generalized so that
* additional cases would require less code.
*/
/// value of neuron.coreneuron.enable as 0, 1 (-1 if error)
extern int (*nrnpy_nrncore_enable_value_p_)();
static int nrncore_enable_value() {

/// value of neuron.coreneuron.file_mode as 0, 1 (-1 if error)
extern int (*nrnpy_nrncore_file_mode_value_p_)();

/*
* Helper function to inspect value of int/boolean option
* under coreneuron module.
*
* \todo : seems like this could be generalized so that
* additional cases would require less code.
*/
static int get_nrncore_opt_value(const char* option) {
PyObject* modules = PyImport_GetModuleDict();
if (modules) {
PyObject* module = PyDict_GetItemString(modules, "neuron.coreneuron");
if (module) {
PyObject* val = PyObject_GetAttrString(module, "enable");
PyObject* val = PyObject_GetAttrString(module, option);
if (val) {
long enable = PyLong_AsLong(val);
Py_DECREF(val);
Expand All @@ -2966,6 +2974,16 @@ static int nrncore_enable_value() {
return 0;
}

/// return value of neuron.coreneuron.enable
static int nrncore_enable_value() {
return get_nrncore_opt_value("enable");
}

/// return value of neuron.coreneuron.file_mode
static int nrncore_file_mode_value() {
return get_nrncore_opt_value("file_mode");
}

/** Gets the python string returned by neuron.coreneuron.nrncore_arg(tstop)
return a strdup() copy of the string which should be free when the caller
finishes with it. Return NULL if error or bool(neuron.coreneuron.enable)
Expand Down Expand Up @@ -3013,6 +3031,7 @@ myPyMODINIT_FUNC nrnpy_hoc() {
nrnpy_decref = nrnpy_decref_;
nrnpy_nrncore_arg_p_ = nrncore_arg;
nrnpy_nrncore_enable_value_p_ = nrncore_enable_value;
nrnpy_nrncore_file_mode_value_p_ = nrncore_file_mode_value;
nrnpy_object_to_double_ = object_to_double_;
nrnpy_rvp_rxd_to_callable = rvp_rxd_to_callable_;
PyLockGIL lock;
Expand Down
6 changes: 4 additions & 2 deletions src/parallel/ocbbs.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,8 @@ extern "C" {
extern int nrncore_run(const char*);
extern bool nrn_trajectory_request_per_time_step_;
extern int nrncore_is_enabled();
extern int nrncore_psolve(double tstop);
extern int nrncore_is_file_mode();
extern int nrncore_psolve(double tstop, int file_mode);
}

class OcBBS : public BBS , public Resource {
Expand Down Expand Up @@ -666,8 +667,9 @@ static double psolve(void* v) {
OcBBS* bbs = (OcBBS*)v;
double tstop = chkarg(1, t, 1e9);
int enabled = nrncore_is_enabled();
int file_mode = nrncore_is_file_mode();
if (enabled == 1) {
nrncore_psolve(tstop);
nrncore_psolve(tstop, file_mode);
}else if (enabled == 0) {
// Classic case
bbs->netpar_solve(tstop);
Expand Down
18 changes: 14 additions & 4 deletions test/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND)
endif()
if (NRN_ENABLE_MPI)
# Launching mpi executable with full path can mangle different python versions and libraries(see issue #894)
# Therefore we extract the executable name and create a dedicated target, because add_test($MPIEXEC_NAME) would re-append the full path.
# Therefore we extract the executable name and create a dedicated target, because add_test($MPIEXEC_NAME) would re-append the full path.
get_filename_component(MPIEXEC_NAME ${MPIEXEC} NAME)
add_custom_target(test_subworld_mpiexec_name
COMMAND
Expand Down Expand Up @@ -152,6 +152,10 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND)
add_test(NAME coreneuron_spikes_py COMMAND ${PYTHON_EXECUTABLE}
${PROJECT_SOURCE_DIR}/test/coreneuron/test_spikes.py
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/test/mod)
add_test(NAME coreneuron_spikes_file_mode_py COMMAND ${PYTHON_EXECUTABLE}
${PROJECT_SOURCE_DIR}/test/coreneuron/test_spikes.py
file_mode
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/test/mod)
add_test(NAME coreneuron_fornetcon_py COMMAND ${PYTHON_EXECUTABLE}
${PROJECT_SOURCE_DIR}/test/coreneuron/test_fornetcon.py
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/test/mod)
Expand All @@ -165,18 +169,24 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND)
${PROJECT_SOURCE_DIR}/test/gjtests/test_natrans.py
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/test/mod)
list(APPEND TESTS coreneuron_direct_py coreneuron_direct_hoc
coreneuron_spikes_py coreneuron_fornetcon_py
coreneuron_spikes_py coreneuron_spikes_file_mode_py coreneuron_fornetcon_py
coreneuron_datareturn_py coreneuron_test_units_py
coreneuron_test_natrans_py)
if(NRN_ENABLE_MPI)
find_python_module(mpi4py)
if(mpi4py_FOUND)
add_test(
NAME coreneuron_mpi_spikes_py COMMAND ${MPIEXEC} -np 2 ${PYTHON_EXECUTABLE}
NAME coreneuron_spikes_mpi_py COMMAND ${MPIEXEC} -np 2 ${PYTHON_EXECUTABLE}
${PROJECT_SOURCE_DIR}/test/coreneuron/test_spikes.py mpi4py
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/test/mod)
list(APPEND TESTS coreneuron_mpi_spikes_py)
list(APPEND TESTS coreneuron_spikes_mpi_py)
endif()
# using -pyexe to workaround #894
add_test(
NAME coreneuron_spikes_mpi_file_mode_py COMMAND ${MPIEXEC} -np 2 ${CMAKE_HOST_SYSTEM_PROCESSOR}/special
-python -pyexe ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/test/coreneuron/test_spikes.py nrnmpi_init file_mode
WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/test/mod)
list(APPEND TESTS coreneuron_spikes_mpi_file_mode_py)
endif()
endif()
endif()
Expand Down
39 changes: 25 additions & 14 deletions test/coreneuron/test_spikes.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,20 @@
import pytest
import sys

from neuron import h, gui

def test_spikes(use_mpi4py=False):
def test_spikes(use_mpi4py=False, use_nrnmpi_init=False, file_mode=False):
# mpi4py needs tp be imported before importing h
if use_mpi4py:
from mpi4py import MPI
from neuron import h, gui
# without mpi4py we need to call nrnmpi_init explicitly
elif use_nrnmpi_init:
from neuron import h, gui
h.nrnmpi_init()
# otherwise serial execution
else:
from neuron import h, gui

h('''create soma''')
h.soma.L=5.6419
h.soma.diam=5.6419
Expand All @@ -26,47 +35,49 @@ def test_spikes(use_mpi4py=False):
h.cvode.cache_efficient(1)

pc = h.ParallelContext()

pc.set_gid2node(pc.id()+1, pc.id())
myobj = h.NetCon(h.soma(0.5)._ref_v, None, sec=h.soma)
pc.cell(pc.id()+1, myobj)


# NEURON spikes run
# NEURON run
nrn_spike_t = h.Vector()
nrn_spike_gids = h.Vector()
pc.spike_record(-1, nrn_spike_t, nrn_spike_gids)

h.run()

nrn_spike_t = nrn_spike_t.to_python()
nrn_spike_gids = nrn_spike_gids.to_python()

# CORENEURON spike_record(-1) / spike_record(gidlist):
# CORENEURON run
from neuron import coreneuron
coreneuron.enable = True
coreneuron.file_mode = file_mode
coreneuron.verbose = 0
h.stdinit()
corenrn_all_spike_t = h.Vector()
corenrn_all_spike_gids = h.Vector()

pc.spike_record( -1 if pc.id() == 0 else (pc.id()),
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

By removing this we are no longer testing 2nd case when we iterate over PreSyns and match up vectors.

corenrn_all_spike_t,
corenrn_all_spike_gids )

pc.spike_record(-1, corenrn_all_spike_t, corenrn_all_spike_gids )
pc.psolve(h.tstop)

corenrn_all_spike_t = corenrn_all_spike_t.to_python()
corenrn_all_spike_gids = corenrn_all_spike_gids.to_python()


# check spikes match
assert(len(nrn_spike_t)) # check we've actually got spikes
assert(len(nrn_spike_t) == len(nrn_spike_gids)); # matching no. of gids
assert(nrn_spike_t == corenrn_all_spike_t)
assert(nrn_spike_gids == corenrn_all_spike_gids)

h.quit()


if __name__ == "__main__":
test_spikes('mpi4py' in sys.argv)
# simple CLI arguments handling
mpi4py_option = 'mpi4py' in sys.argv
file_mode_option = 'file_mode' in sys.argv
nrnmpi_init_option = 'nrnmpi_init' in sys.argv

test_spikes(mpi4py_option, nrnmpi_init_option, file_mode_option)