Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

References for 'https://github.com/BlueBrain/nmodl/pull/1260'. #18

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
10 changes: 7 additions & 3 deletions hodgkin_huxley/neuron/hodhux.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -447,7 +447,11 @@ namespace neuron {
}


inline double nrn_current_hodhux(size_t id, hodhux_Instance& inst, hodhux_NodeData& node_data, double v) {
inline double nrn_current_hodhux(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, Memb_list* _ml_arg, int _type, size_t id, hodhux_Instance& inst, hodhux_NodeData& node_data, double v) {
_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};
auto* const _ml = &_lmr;
auto* _thread = _ml_arg->_thread;
auto* _ppvar = _ml_arg->pdata[id];
double current = 0.0;
inst.ina[id] = inst.gnabar[id] * inst.m[id] * inst.m[id] * inst.m[id] * inst.h[id] * (v - inst.ena[id]);
inst.ik[id] = inst.gkbar[id] * inst.n[id] * inst.n[id] * inst.n[id] * inst.n[id] * (v - inst.ek[id]);
Expand All @@ -472,10 +476,10 @@ namespace neuron {
double v = node_data.node_voltages[node_id];
inst.ena[id] = (*inst.ion_ena[id]);
inst.ek[id] = (*inst.ion_ek[id]);
double I1 = nrn_current_hodhux(id, inst, node_data, v+0.001);
double I1 = nrn_current_hodhux(_sorted_token, _nt, _ml_arg, _type, id, inst, node_data, v+0.001);
double dina = inst.ina[id];
double dik = inst.ik[id];
double I0 = nrn_current_hodhux(id, inst, node_data, v);
double I0 = nrn_current_hodhux(_sorted_token, _nt, _ml_arg, _type, id, inst, node_data, v);
double rhs = I0;
double g = (I1-I0)/0.001;
(*inst.ion_dinadv[id]) += (dina-inst.ina[id])/0.001;
Expand Down
10 changes: 7 additions & 3 deletions net_receive/neuron/snapsyn.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -220,7 +220,11 @@ namespace neuron {
}


inline double nrn_current_SnapSyn(size_t id, SnapSyn_Instance& inst, SnapSyn_NodeData& node_data, double v) {
inline double nrn_current_SnapSyn(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, Memb_list* _ml_arg, int _type, size_t id, SnapSyn_Instance& inst, SnapSyn_NodeData& node_data, double v) {
_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};
auto* const _ml = &_lmr;
auto* _thread = _ml_arg->_thread;
auto* _ppvar = _ml_arg->pdata[id];
double current = 0.0;
inst.i[id] = inst.g[id] * (v - inst.e[id]);
current += inst.i[id];
Expand All @@ -239,8 +243,8 @@ namespace neuron {
for (int id = 0; id < nodecount; id++) {
int node_id = node_data.nodeindices[id];
double v = node_data.node_voltages[node_id];
double I1 = nrn_current_SnapSyn(id, inst, node_data, v+0.001);
double I0 = nrn_current_SnapSyn(id, inst, node_data, v);
double I1 = nrn_current_SnapSyn(_sorted_token, _nt, _ml_arg, _type, id, inst, node_data, v+0.001);
double I0 = nrn_current_SnapSyn(_sorted_token, _nt, _ml_arg, _type, id, inst, node_data, v);
double rhs = I0;
double g = (I1-I0)/0.001;
double mfactor = 1.e2/(*inst.node_area[id]);
Expand Down
26 changes: 26 additions & 0 deletions nonspecific_current/coreneuron/leonhard.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -210,6 +210,29 @@ namespace coreneuron {
}


inline int func_leonhard(int id, int pnodecount, leonhard_Instance* inst, double* data, const Datum* indexes, ThreadDatum* thread, NrnThread* nt, double v);
inline int func_with_v_leonhard(int id, int pnodecount, leonhard_Instance* inst, double* data, const Datum* indexes, ThreadDatum* thread, NrnThread* nt, double v, double arg_v);
inline int func_with_other_leonhard(int id, int pnodecount, leonhard_Instance* inst, double* data, const Datum* indexes, ThreadDatum* thread, NrnThread* nt, double v, double q);


inline int func_leonhard(int id, int pnodecount, leonhard_Instance* inst, double* data, const Datum* indexes, ThreadDatum* thread, NrnThread* nt, double v) {
int ret_func = 0;
return ret_func;
}


inline int func_with_v_leonhard(int id, int pnodecount, leonhard_Instance* inst, double* data, const Datum* indexes, ThreadDatum* thread, NrnThread* nt, double v, double arg_v) {
int ret_func_with_v = 0;
return ret_func_with_v;
}


inline int func_with_other_leonhard(int id, int pnodecount, leonhard_Instance* inst, double* data, const Datum* indexes, ThreadDatum* thread, NrnThread* nt, double v, double q) {
int ret_func_with_other = 0;
return ret_func_with_other;
}


/** initialize channel */
void nrn_init_leonhard(NrnThread* nt, Memb_list* ml, int type) {
int nodecount = ml->nodecount;
Expand Down Expand Up @@ -239,6 +262,9 @@ namespace coreneuron {

inline double nrn_current_leonhard(int id, int pnodecount, leonhard_Instance* inst, double* data, const Datum* indexes, ThreadDatum* thread, NrnThread* nt, double v) {
double current = 0.0;
func_leonhard(id, pnodecount, inst, data, indexes, thread, nt, v);
func_with_v_leonhard(id, pnodecount, inst, data, indexes, thread, nt, v, v);
func_with_other_leonhard(id, pnodecount, inst, data, indexes, thread, nt, v, inst->c[id]);
inst->il[id] = inst->c[id] * (v - 1.5);
current += inst->il[id];
return current;
Expand Down
147 changes: 144 additions & 3 deletions nonspecific_current/neuron/leonhard.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -147,6 +147,9 @@ namespace neuron {
hoc_retpushx(1.);
}
/* Mechanism procedures and functions */
inline int func_leonhard(_nrn_mechanism_cache_range* _ml, leonhard_Instance& inst, size_t id, Datum* _ppvar, Datum* _thread, NrnThread* _nt);
inline int func_with_v_leonhard(_nrn_mechanism_cache_range* _ml, leonhard_Instance& inst, size_t id, Datum* _ppvar, Datum* _thread, NrnThread* _nt, double v);
inline int func_with_other_leonhard(_nrn_mechanism_cache_range* _ml, leonhard_Instance& inst, size_t id, Datum* _ppvar, Datum* _thread, NrnThread* _nt, double q);


/** connect global (scalar) variables to hoc -- */
Expand All @@ -162,15 +165,146 @@ namespace neuron {


/* declaration of user functions */
static void _hoc_func(void);
static void _hoc_func_with_v(void);
static void _hoc_func_with_other(void);
static double _npy_func(Prop*);
static double _npy_func_with_v(Prop*);
static double _npy_func_with_other(Prop*);


/* connect user functions to hoc names */
static VoidFunc hoc_intfunc[] = {
{"setdata_leonhard", _hoc_setdata},
{"func_leonhard", _hoc_func},
{"func_with_v_leonhard", _hoc_func_with_v},
{"func_with_other_leonhard", _hoc_func_with_other},
{0, 0}
};
static NPyDirectMechFunc npy_direct_func_proc[] = {
{"func", _npy_func},
{"func_with_v", _npy_func_with_v},
{"func_with_other", _npy_func_with_other},
};
static void _hoc_func(void) {
double _r{};
Datum* _ppvar;
Datum* _thread;
NrnThread* _nt;
Prop* _local_prop = _prop_id ? _extcall_prop : nullptr;
_nrn_mechanism_cache_instance _ml_real{_local_prop};
auto* const _ml = &_ml_real;
size_t const id{};
_ppvar = _local_prop ? _nrn_mechanism_access_dparam(_local_prop) : nullptr;
_thread = _extcall_thread.data();
_nt = nrn_threads;
auto inst = make_instance_leonhard(_ml_real);
_r = 1.;
func_leonhard(_ml, inst, id, _ppvar, _thread, _nt);
hoc_retpushx(_r);
}
static double _npy_func(Prop* _prop) {
double _r{};
Datum* _ppvar;
Datum* _thread;
NrnThread* _nt;
_nrn_mechanism_cache_instance _ml_real{_prop};
auto* const _ml = &_ml_real;
size_t const id{};
_ppvar = _nrn_mechanism_access_dparam(_prop);
_thread = _extcall_thread.data();
_nt = nrn_threads;
auto inst = make_instance_leonhard(_ml_real);
_r = 1.;
func_leonhard(_ml, inst, id, _ppvar, _thread, _nt);
return(_r);
}
static void _hoc_func_with_v(void) {
double _r{};
Datum* _ppvar;
Datum* _thread;
NrnThread* _nt;
Prop* _local_prop = _prop_id ? _extcall_prop : nullptr;
_nrn_mechanism_cache_instance _ml_real{_local_prop};
auto* const _ml = &_ml_real;
size_t const id{};
_ppvar = _local_prop ? _nrn_mechanism_access_dparam(_local_prop) : nullptr;
_thread = _extcall_thread.data();
_nt = nrn_threads;
auto inst = make_instance_leonhard(_ml_real);
_r = 1.;
func_with_v_leonhard(_ml, inst, id, _ppvar, _thread, _nt, *getarg(1));
hoc_retpushx(_r);
}
static double _npy_func_with_v(Prop* _prop) {
double _r{};
Datum* _ppvar;
Datum* _thread;
NrnThread* _nt;
_nrn_mechanism_cache_instance _ml_real{_prop};
auto* const _ml = &_ml_real;
size_t const id{};
_ppvar = _nrn_mechanism_access_dparam(_prop);
_thread = _extcall_thread.data();
_nt = nrn_threads;
auto inst = make_instance_leonhard(_ml_real);
_r = 1.;
func_with_v_leonhard(_ml, inst, id, _ppvar, _thread, _nt, *getarg(1));
return(_r);
}
static void _hoc_func_with_other(void) {
double _r{};
Datum* _ppvar;
Datum* _thread;
NrnThread* _nt;
Prop* _local_prop = _prop_id ? _extcall_prop : nullptr;
_nrn_mechanism_cache_instance _ml_real{_local_prop};
auto* const _ml = &_ml_real;
size_t const id{};
_ppvar = _local_prop ? _nrn_mechanism_access_dparam(_local_prop) : nullptr;
_thread = _extcall_thread.data();
_nt = nrn_threads;
auto inst = make_instance_leonhard(_ml_real);
_r = 1.;
func_with_other_leonhard(_ml, inst, id, _ppvar, _thread, _nt, *getarg(1));
hoc_retpushx(_r);
}
static double _npy_func_with_other(Prop* _prop) {
double _r{};
Datum* _ppvar;
Datum* _thread;
NrnThread* _nt;
_nrn_mechanism_cache_instance _ml_real{_prop};
auto* const _ml = &_ml_real;
size_t const id{};
_ppvar = _nrn_mechanism_access_dparam(_prop);
_thread = _extcall_thread.data();
_nt = nrn_threads;
auto inst = make_instance_leonhard(_ml_real);
_r = 1.;
func_with_other_leonhard(_ml, inst, id, _ppvar, _thread, _nt, *getarg(1));
return(_r);
}


inline int func_leonhard(_nrn_mechanism_cache_range* _ml, leonhard_Instance& inst, size_t id, Datum* _ppvar, Datum* _thread, NrnThread* _nt) {
int ret_func = 0;
auto v = inst.v_unused[id];
return ret_func;
}


inline int func_with_v_leonhard(_nrn_mechanism_cache_range* _ml, leonhard_Instance& inst, size_t id, Datum* _ppvar, Datum* _thread, NrnThread* _nt, double v) {
int ret_func_with_v = 0;
return ret_func_with_v;
}


inline int func_with_other_leonhard(_nrn_mechanism_cache_range* _ml, leonhard_Instance& inst, size_t id, Datum* _ppvar, Datum* _thread, NrnThread* _nt, double q) {
int ret_func_with_other = 0;
auto v = inst.v_unused[id];
return ret_func_with_other;
}


void nrn_init_leonhard(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, Memb_list* _ml_arg, int _type) {
Expand All @@ -190,8 +324,15 @@ namespace neuron {
}


inline double nrn_current_leonhard(size_t id, leonhard_Instance& inst, leonhard_NodeData& node_data, double v) {
inline double nrn_current_leonhard(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, Memb_list* _ml_arg, int _type, size_t id, leonhard_Instance& inst, leonhard_NodeData& node_data, double v) {
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Setting up _lmr should happen outside the loop. nrn_current_* is a function that's only called in the loop body of nrn_cur_*.

It might be better to pass it in.

_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};
auto* const _ml = &_lmr;
auto* _thread = _ml_arg->_thread;
auto* _ppvar = _ml_arg->pdata[id];
double current = 0.0;
func_leonhard(_ml, inst, id, _ppvar, _thread, _nt);
Copy link
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This seems to be a sensible set of stuff to also pass into nrn_current_leonhard:

_ml, inst, id, _ppvar, _thread, _nt

(plus specific things).

func_with_v_leonhard(_ml, inst, id, _ppvar, _thread, _nt, v);
func_with_other_leonhard(_ml, inst, id, _ppvar, _thread, _nt, inst.c[id]);
inst.il[id] = inst.c[id] * (v - 1.5);
current += inst.il[id];
return current;
Expand All @@ -209,8 +350,8 @@ namespace neuron {
for (int id = 0; id < nodecount; id++) {
int node_id = node_data.nodeindices[id];
double v = node_data.node_voltages[node_id];
double I1 = nrn_current_leonhard(id, inst, node_data, v+0.001);
double I0 = nrn_current_leonhard(id, inst, node_data, v);
double I1 = nrn_current_leonhard(_sorted_token, _nt, _ml_arg, _type, id, inst, node_data, v+0.001);
double I0 = nrn_current_leonhard(_sorted_token, _nt, _ml_arg, _type, id, inst, node_data, v);
double rhs = I0;
double g = (I1-I0)/0.001;
node_data.node_rhs[node_id] -= rhs;
Expand Down