Supplementary Material \[.5cm] Firing rate response of neocortical pyramidal neurons in the fluctuation-driven regime
\newpage
<sec:exp-details>
For each cell, its properties and the quality of the electrical access was quantified. We present here those data and look for relations between them.
./figures/experimental_details.pdf
\clearpage
import sys
sys.path.append('/home/yann/work/python_library/')
from my_graph import set_plot
import matplotlib.pylab as plt
import numpy as np
sys.path.append('../experimental_data/')
from dataset_structure import load_params_of_dataset
CELL_INDEX, RS, ILEAK, RM, CM, TM, RECORDING_LENGTH, PST_NATAL,\
N_SPIKES, DURATION = np.load('../experimental_data/dataset.npy')
INTEREST = [RS, ILEAK, RM, TM, RECORDING_LENGTH, PST_NATAL]
INTEREST_LABEL = [r'$R_\mathrm{S}$ (M$\Omega$)',\
r'$I_\mathrm{leak}$ (pA)', '$R_\mathrm{m}$ (M$\Omega$)',\
r'$\tau_\mathrm{m}^0$ (ms)', '$T_\mathrm{rec}$ (min)',\
'P (day)']
INTEREST_LABEL2 = ['Access Resistance', 'Seal Quality',\
'Membrane Resistance', 'Membrane Time Constant',\
'Full Recording Time', 'Post-Natal Day']
LABEL = ['RS', 'ILEAK', 'RM', 'TM', 'RECORDING_LENGTH', 'PST_NATAL']
for i in range(len(INTEREST)):
fig, ax = plt.subplots(1, 1, figsize=(4,3))
plt.subplots_adjust(bottom=.4, left=.3)
ax.hist(INTEREST[i], bins=15, color='grey', lw=2)
set_plot(ax, ylabel='cell count', xlabel=INTEREST_LABEL[i])
ax.annotate(INTEREST_LABEL2[i],\
(0.3,0.05), xycoords='figure fraction', fontsize=17)
fig.savefig('../figures/'+LABEL[i]+'.svg', format='svg', transparent=True)
plt.show()
import sys
sys.path.append('/home/yann/work/python_library/')
from my_graph import set_plot
import matplotlib.pylab as plt
import numpy as np
from scipy.stats.stats import pearsonr
sys.path.append('../experimental_data/')
from dataset_structure import load_params_of_dataset
CELL_INDEX, RS, ILEAK, RM, CM, TM, RECORDING_LENGTH, PST_NATAL,\
N_SPIKES, DURATION = np.load('../experimental_data/dataset.npy')
INTEREST = [RS, ILEAK, RM, TM, RECORDING_LENGTH, PST_NATAL]
INTEREST_LABEL = [r'$R_\mathrm{S}$ (M$\Omega$)',\
r'$I_\mathrm{leak}$ (pA)', '$R_\mathrm{m}$ (M$\Omega$)',\
r'$\tau_\mathrm{m}^0$ (ms)', '$T_\mathrm{rec}$ (min)',\
'post-natal day']
fig, AX = plt.subplots(len(INTEREST), len(INTEREST)-1, figsize=(15,20))
plt.subplots_adjust(wspace=.3, hspace=.5)
AX.reshape((len(INTEREST), len(INTEREST)-1))
for i in range(len(INTEREST)):
for j in range(i):
x = INTEREST[j]
xth = np.linspace(x.min(), x.max())
y = INTEREST[i]
AX[i,j].plot(x, y, 'kD', ms=5)
cc, pp = pearsonr(x, y)
AX[i,j].plot(xth, np.polyval(np.polyfit(x, y, 1), xth), 'k-')
AX[i,j].annotate('c='+str(np.round(cc,1))+', p='+'%.1e' % pp,\
(0.1,1.03), xycoords='axes fraction', fontsize=14)
if j==0:
ylabel=INTEREST_LABEL[i]
else:
ylabel=''
if i==len(INTEREST)-1:
xlabel=INTEREST_LABEL[j]
else:
xlabel=''
set_plot(AX[i,j], xlabel=xlabel, ylabel=ylabel)
for i in range(len(INTEREST)):
for j in range(i, len(INTEREST)-1):
AX[i,j].axis('off')
fig.savefig('figures/fig_experimental_correlations.svg', format='svg', transparent=True)
plt.show()
import svgutils.transform as sg
fig = sg.SVGFigure("8.5cm", "10.5cm")
fig1 = sg.fromfile('../figures/experimental_correlations.svg')
fig2 = sg.fromfile('../figures/RS.svg')
fig3 = sg.fromfile('../figures/ILEAK.svg')
fig4 = sg.fromfile('../figures/RECORDING_LENGTH.svg')
fig6 = sg.fromfile('../figures/RM.svg')
fig5 = sg.fromfile('../figures/TM.svg')
fig7 = sg.fromfile('../figures/PST_NATAL.svg')
txt1 = sg.TextElement(0,50, "G", size=11, weight='bold')
txt2 = sg.TextElement(70,10, "A", size=11, weight='bold')
txt3 = sg.TextElement(140,10, "B", size=11, weight='bold')
txt4 = sg.TextElement(210,10, "C", size=11, weight='bold')
txt5 = sg.TextElement(140,83, "D", size=11, weight='bold')
txt6 = sg.TextElement(210,83, "E", size=11, weight='bold')
txt7 = sg.TextElement(210,157, "F", size=11, weight='bold')
# add text labels
# append plots and labels to figure
plot1 = fig1.getroot();plot1.moveto(-15, -60, scale=.315)
plot2 = fig2.getroot();plot2.moveto(60, 10, scale=.3)
plot3 = fig3.getroot();plot3.moveto(130, 10, scale=.3)
plot4 = fig4.getroot();plot4.moveto(200, 10, scale=.3)
plot5 = fig5.getroot();plot5.moveto(130, 80, scale=.3)
plot6 = fig6.getroot();plot6.moveto(200, 80, scale=.3)
plot7 = fig7.getroot();plot7.moveto(200, 150, scale=.3)
fig.append([plot4, plot3, plot2, plot6, plot5, plot7, plot1])
fig.append([txt1, txt2, txt3, txt4, txt5, txt6, txt7])
# save generated SVG files
fig.save("../figures/experimental_details.svg")
import os
os.system('inkscape --export-pdf=../figures/experimental_details.pdf ../figures/experimental_details.svg')
os.system('eog ../figures/experimental_details.svg')
<sec:single-comp-supp>
\clearpage
import sys
sys.path.append('/home/yann/work/python_library/')
from electrophy import IC_membrane_test as IC
sys.path.append('../experimental_data/')
import dataset_structure as DATA
import numpy as np
RESIDUAL_LIST, CELL_LIST = [], []
Tm_factor = 6
for i in DATA.CELL_LIST[np.concatenate([np.arange(28), np.arange(29,len(DATA.CELL_LIST))])]:
exec('from cell'+str(i)+' import cell'+str(i))
exec('cell = cell'+str(i)+'.cell_params')
exp, time, t, data, params = IC.load(cell['ROOT_FOLDER']+cell['IC_datafile'])
exp, time, t, data, params, Rm, El, Cm, t_fit, v_fit,\
RmS, CmS, Ra, RmD, CmD, v_fit_2comp, mean_v_response, mean_i = \
IC.analyze(exp, time, t, data, params)
Tm = Rm*Cm*1e-3
dt = t[1]-t[0]
DI = np.abs(np.diff(mean_i[5:])).max() # pA, pulse
# we find where the pulse start !
i1 = np.where(np.abs(np.diff(mean_i[5:]))>.6*DI)[0]
it = np.arange(i1[0], min([i1[0]+int(Tm_factor*Tm/dt), len(t_fit)-1]))
residual = np.abs((mean_v_response[it]-v_fit[it])/(v_fit[-1]-v_fit[0]))
RESIDUAL_LIST.append(residual.sum()*dt/Tm/Tm_factor)
CELL_LIST.append(i)
# then the cell28, that has an AP in one trial and can not be evaluated !
CELL_LIST.append(DATA.CELL_LIST[28])
RESIDUAL_LIST.append(np.array(RESIDUAL_LIST).mean())
CELL_LIST = np.array(CELL_LIST)
RESIDUAL_LIST = np.array(RESIDUAL_LIST)
isort = np.argsort(CELL_LIST)
np.save('../experimental_data/analyzed_data/residuals.npy',\
[CELL_LIST[isort], RESIDUAL_LIST[isort]])
import sys
sys.path.append('/home/yann/work/python_library/')
from electrophy import IC_membrane_test as IC
from my_graph import set_plot
sys.path.append('../experimental_data/')
import dataset_structure as DATA
import matplotlib.pylab as plt
import numpy as np
CELL_LIST, RESIDUAL_LIST = \
np.load('../experimental_data/analyzed_data/residuals.npy')
figA = plt.figure(figsize=(4,3))
plt.subplots_adjust(bottom=.25, left=.25)
ax = plt.subplot(111)
plt.hist(RESIDUAL_LIST, bins=15, color='grey')
set_plot(ax, xlabel='$C_{sc}$', ylabel='cell count')
Tm_factor = 10
imax = np.argmax(RESIDUAL_LIST)
imin = np.argmin(RESIDUAL_LIST)
FIG = []
for i in [DATA.CELL_LIST[imin], DATA.CELL_LIST[imax]]:
f, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(5,5))
plt.subplots_adjust(bottom=.15, left=.25)
FIG.append(f)
exec('from cell'+str(i)+' import cell'+str(i))
exec('cell = cell'+str(i)+'.cell_params')
exp, time, t, data, params = IC.load(cell['ROOT_FOLDER']+cell['IC_datafile'])
exp, time, t, data, params, Rm, El, Cm, t_fit, v_fit,\
RmS, CmS, Ra, RmD, CmD, v_fit_2comp, mean_v_response, mean_i = \
IC.analyze(exp, time, t, data, params)
Tm = Rm*Cm*1e-3
dt = t[1]-t[0]
DI = np.abs(np.diff(mean_i[5:])).max() # pA, pulse
# we find where the pulse start !
i1 = np.where(np.abs(np.diff(mean_i[5:]))>.6*DI)[0]
it = np.arange(i1[0], min([i1[0]+int(Tm_factor*Tm/dt), len(t_fit)-1]))
residual = np.abs((mean_v_response[it]-v_fit[it])/(v_fit[-1]-v_fit[0]))
ax1.plot(t_fit, mean_i[:len(t_fit)], 'k')
set_plot(ax1, ylabel='I (pA)', spines=['left'])
ax2.plot(t_fit, mean_v_response[:len(t_fit)], 'k')
ax2.plot(t_fit[it], v_fit[it], 'r--', lw=3)
set_plot(ax2, ylabel='$V_m$ (mV)', spines=['left'])
ax3.plot(t_fit[it], residual, 'k-')
ax3.plot(t_fit, 0*t_fit, 'k-')
ax3.fill_between(t_fit[it], residual, 0*residual, color='grey')
ax3.plot([10,10],[0,0.08],color='w')
set_plot(ax3, ylabel='residual', xlabel='time (ms)')
figA.savefig('../figures/single_comp_accuracy_hist.svg', format='svg', tranparent=True)
FIG[0].suptitle('best match')
FIG[0].savefig('../figures/single_comp_accuracy_best.svg', format='svg', tranparent=True)
FIG[1].suptitle('worst match')
FIG[1].savefig('../figures/single_comp_accuracy_worst.svg', format='svg', tranparent=True)
import svgutils.transform as sg
fig = sg.SVGFigure("12.5cm", "4.5cm")
# load matpotlib-generated figures
fig1 = sg.fromfile('../figures/single_comp_accuracy_hist.svg')
fig2 = sg.fromfile('../figures/single_comp_accuracy_best.svg')
fig3 = sg.fromfile('../figures/single_comp_accuracy_worst.svg')
# get the plot objects
plot1 = fig1.getroot();plot1.moveto(2, 20, scale=.5)
plot2 = fig2.getroot();plot2.moveto(160, 2, scale=.4)
plot3 = fig3.getroot();plot3.moveto(310, 2, scale=.4)
# add text labels
txt1 = sg.TextElement(0,20, "A", size=14, weight="bold")
txt2 = sg.TextElement(155,15, "B", size=14, weight="bold")
txt3 = sg.TextElement(305,15, "C", size=14, weight="bold")
# append plots and labels to figure
fig.append([plot1, plot2, plot3])
fig.append([txt1, txt2, txt3])
# save generated SVG files
fig.save("../figures/single_comp.svg")
import os
os.system('inkscape --export-pdf=../figures/single_comp.pdf ../figures/single_comp.svg')
os.system('eog ../figures/single_comp.svg')
# os.system('rm fig2.svg')
\newpage
<sec:fluctuation-control>
./figures/approx_allows_control_of_fluct.pdf
\clearpage
<sec:finite-sampling>
Three main components could be identified as contributing to the measured dispersion of the firing rate dependencies. 1) cellular heterogeneity, 2) experimental changes across experiments and 3) finite sampling of the irregular firing process.
Because the cellular heterogeneity point is the biologically relevant phenomena cite:Mejias2012 that we would like to evaluate, we try here to estimate the contribution of the finite sampling effect so that we get an higher bound for the cellular heterogeneity (higher bound because contribution of the experimental bias is unknown).
Experimentally, we estimate the firing rate on a finite amount of time
The Poisson process has been shown to be a good model for the
irregularity of the firing process REF?, so that this calculation
will take this as an assumption. Given an ideal frequency
\begin{equation} Pν, T(νobs = \frac{k}{T}) = \frac{e-ν \, T}{T} ⋅ \frac{(ν \, T)k}{k!} \label{eq-poisson-proba} \end{equation}
Where the mean and standard deviation of observed spike number are given by: \( \langle k \rangle = ν_\mathrm{id} T \) and \( \sqrt{\langle \big( k^2 - \langle k \rangle^2 \big) \rangle} = \sqrt{νid \, T} \).
Let’s say that we study the firing rate as a function of a variable
\begin{equation} P( \{ k_i^s \}) = \mathrm{e}-S \, T \, ∑_{i ν(x_i)} × Π_i \frac{(ν(x_i) \, T)∑_s k_i^s}{Π_s \, k_i^s!} \label{eq-poisson-set-proba} \end{equation}
We evaluated the response heterogeneity on the coefficients of the
\textit{effective threshold} so we need to translate the set of
measurements
So, for each type of protocol, we take the average behavior (in terms
of phenomenological threshold), we convert it to a firing rate
thanks to the average (
\clearpage
<sec:brunel-comp>
The situation of an Integrate and Fire neuron stimulated by delta-synapses can be instructive because we benefit from a very accurate analytical approximation in our regime of interest cite:Amit1997. We will use this model to investigate the properties and limitations of the approximation for the firing rate.
We stimulate a neuron with a Poisson process of frequency
After the so-called diffusion approximation, we can transform the
current input made of
\begin{equation} \hspace{-.4cm} \label{eq-cds-iaf-final} \left \lbrace \begin{split} & γ(V) \, \longrightarrow \, \frac{V-E_\mathrm{L} - μ_V}{\sqrt{2} \, σ_V } \[.3cm] & ν_\mathrm{out} = \Big( τ \, \sqrt{π} \, ∫γ(V_{reset)}γ(V_\mathrm{thre)} ex^2 \, \, (\textnormal{Erf}(x)+1) \, \, dx\Big)-1 \[.3cm] & P(V) = \frac{\sqrt{2} \, \, ν_\mathrm{out} \, e- \big( γ(V) \big) ^2}{σ_V} ∫_{ γ \big(max (V, Vreset) \big) }γ(V_\mathrm{thre)} ex^2 \, dx \end{split} \right. \end{equation}
In \ref{fig:fpt-wn-insight1}, we examine the difference between
this analytical solution and our naive gaussian approximation. We
stimulate the neuron with presynaptic spike trains of increasing
frequency $νin$ (for given
We observe that for low input (\ref{subfig:firing-traces}), the
approximation underestimates the firing probability. This is a
consequence of the boundary conditions. In absence of such conditions,
the solution for the membrane potential distribution would be our
gaussian approximation (dashed lines in
\ref{subfig:membrane-pot-distrib}). But the IaF model imposes
The previous explanation does not consider any temporal dynamics[fn::even if this is a stationary solution, the fact to have a non zero probability flux gives a meaning to the temporal dynamics at the population level], when the firing rate will become high enough, the mean time to drift toward the threshold will become a limiting factor for the firing probability. This is what happens at high input, much of the probability content is made of elements transiting between thre reset and the threshold (see in \ref{subfig:membrane-pot-distrib}, the distribution becomes much thicker at intermediate values). This limiting phenomena is obviously not taken into account by gaussian approximation, so for this range of input, the heuristic estimation gives an overestimation of the firing probability.
Therefore, very grossly, the value where the approximation works (in \ref{subfig:firing-traces}, where the Fokker Planck solution crosses the heuristic approximation) corresponds to the point at which the bias due to the boundary conditions is compensated by the temporal inertia of the membrane distribution to go toward the threshold.
So depending on where we lie with respect to this point, we will need
to lower or raise the threshold to make the heuristic template
correspond.
To investigate more quantitatively how the defined effective
threshold
On \ref{subfig:fpt-wn-varying-mean-var}, we fix the membrane time
constant and vary
<sec:muV-sV>
We first investigate here the firing rate response as a function of
the (
Introducing a linear function of
\begin{equation} \label{eq-vthre-muV-sV} V_\mathrm{thre}^\mathrm{eff} = V_\mathrm{thre}^0 + Pμ_V \, \frac{μ_V - μ_V^0}{δ μ_V^0} + Pσ_V \, \frac{σ_V - σ_V^0}{δ σ_V^0} \end{equation}
Here, to obtain comparable quantities, we have arbitrily normalized
the dependency on
We next investigated the firing rate response of neocortical neurons
as a function of the
An individual cellular behavior corresponds to a set of coefficients \(V_\mathrm{thre}^0\), $Pμ_V$ and $Pσ_V$. We show on Figure 2F, the histogram of those coefficients across the recorded pyramidal cell population.
The first coefficient \(V_\mathrm{thre}^0\), account for a mean
threshold level, it represents the mean excitability level of the
neuron, we will see in the next section that it can depend on other
variables. The adaptative Exponential Integrate and Fire with Regular
Spiking features (AdExp-RS) shows a higher mean phenomenological
threshold (see Figure 6), indeed because of its finite sodium
activation curve and the firing adaptation phenomena, it is less
excitable that the IaF model. We see here that this mean excitability
level (
The second coefficient \(Pμ_V\) represents the deviation from the
behavior of Equation eq-template in the dependency to
The third coefficient $Pσ_V$ represents the
deviation from the behavior of Equation \ref{eq-template} in the
dependency to
\newpage
<sec:Tv>
Because the firing rate is a temporal quantity, we expect a strong
dependency of the firing to the temporal dynamics properties of the
membrane potential fluctuations. It was shown in cite:Kuhn2004 that
the firing rate can be greatly affected by the effective membrane time
constant $τ_\mathrm{m}eff$ for inputs leading to the same mean
Here, the dynamic-clamp technique plays a crucial role, it allows to
investigate values of the global autocorrelation that lie below the
resting membrane time constant
We used the expressions derived in the Methods to design a
stimulation keeping
We show on Figure 3A this relationship for three different models: the Iaf model, the EIF model with a sharpness of \(k_a=2\mathrm{mV}\) and the inactivating leaky Integrate and Fire model (iLIF, cite:Platkiewicz2011).
As expected in a threshold crossing situation, faster fluctuations
leads to higher firing rate than slow fluctuations, we thus observe a
decreasing relationship between
For the analytical description, we found that introducing a linear
dependency on
We introduce: \begin{equation} \label{eq-Tv-shift} V_\mathrm{thre}^\mathrm{eff}=V_\mathrm{thre}^0 +Pτ_V^N \, \frac{τ_V^N-τ_VN0}{δ τ_VN0} \end{equation}
where $Pτ$ accounts for the threshold dependency induced by the
behavior discussed above. The higher it is, the lower the
This expression provides a quantitative way to evaluate the sensitivity to the speed of the fluctuations. Thus we investigated this sensitivity on several pyramidal neurons (Figure 3C). It is striking to note that the mean behavior over the cells showed a remarkable sensitivity to the global autocorrelation time, much stronger than the IaF model.
As suggested by the theoretical models, this high sensitivity
presumably results from the combination of 1) a high ability to track
fast input, close to the IaF model cite:Naundorf2006
cite:Ilin2013 and 2) a mechanism that penalizes slow fluctuating
input: the inactivation of sodium channels (again revealed by the use
of the dynamic-clamp technique that allows to produce fast membrane
potential fluctuations,
\newpage
<sec:muG>
In neocortical neurons, the spike is produced by a sodium current abruptly activated by membrane depolarisation. Under in vivo conditions, the somatic input conductance is greatly increased as a consequence of synaptic activity (see cite:Destexhe2003 for a review). Because the depolarization induced by the sodium current depends on the input conductance, it is an important question to evaluate how much the shunting of the sodium current reduces the cellular excitability as a function of an increased input conductance in the fluctuation-driven regime.
The minimal model exhibiting this feature is the Exponential Integrate
and Fire (EIF) model. We can vary the sharpness of the spike
initiation current from an infinitly sharp current
(
In cite:Platkiewicz2010, in the context of their threshold equation, the authors proposed a way to account for this decreased excitability. We found that the mathematical expression that they proposed:
\begin{equation} \label{eq-muG-shift} V_\mathrm{thre}^\mathrm{eff} = V_\mathrm{thre}^0 + PG ⋅ log \left( \frac{μ_G}{g_\mathrm{L}} \right) \end{equation}
was a good way to account for the dependency on the input conductance
in our phenomenological threshold when introduced into the template
Equation eq-template (note that $PG$ is
different from
We now investigate this dependency in neocortical neurons by
artificial conductance increase using the dynamic-clamp technique
(see the Methods). In Figure 4C, we tested the impact of an increased
input conductance at the soma on several pyramidal neurons. The
accuracy of the linear description for the phenomenological
threshold as a function of
It is striking to note that the mean behavior of neocortical cells can
should explained by a very smooth activation curve in a single
compartment model (thick black curve in Figure 4F), similar to the
sodium activation curve obtained under voltage-clamp measurements
(ref?). In addition, unlike the depdency on
\newpage
<sec:muG-Tv>
In the two previous sections, we have investigated independently
the dependency on the input conductance and the autocorrelation. A
more physiological situation would correspond to a comodulation of
The two previous sections predict opposite effects as a response to this type of comodulation. Increasing conductance reduces the firing rate for non infinitely sharp activation curves and faster temporal fluctuations increase the firing rate. It is therefore important to understand what is the final output of the combination of those two effects.
For the IaF neuron, the effect is clear, the spiking mechanism does
not create a dependency on
We have run the same protocol on neocortical neurons, we found that the response to this comodulation is systematically increasing, still showing a high sensitivity to the speed of the fluctuations despite the potential dampening of the input conductance increase (see Figure 5).
As this comodulation is likely to be the physiologically relevant
case (though we could imagine situations where those values could
vary independently, e.g. increase
\newpage
\newpage
<sec:fitting-coefficients>
\newpage
The minimal number of points in the dataset of long recordings
(presented in fig-full-3D-data) is n=40 points (meaning there has been
40 episodes of a given seed and a given (
Here, we investigate how reliable is the characterization over this limited number of points. We do this by taking
the cell having the lowest number of points
we split the dataset into two and we check whether the sensitivity is unchanged !!
\newpage
N_MIN = 70 # 30 points minimum !!!
import sys
import matplotlib.pylab as plt
import numpy as np
from scipy.stats.stats import pearsonr
sys.path.append('../theoretical_tools/')
from encoding_power import get_mean_encoding_power
from template_and_fitting import erfc_func, fitting_Vthre_then_Fout,\
final_threshold_func, print_reduce_parameters
sys.path.append('../3d_scan/')
from fit_Fout_response_of_data import load_full_data_set
sys.path.append('../experimental_data/')
from funcs_for_exp_analysis import load_reformated_data
## importing data
import dataset_structure as data
FourD_list = data.FourD_list
print FourD_list[-4:]
sys.path.append('/home/yann/work/python_library/')
from my_graph import set_plot
DATA2, INDEX2 = load_full_data_set(reformat=False)
DATA, INDEX = [], []
for data, index in zip(DATA2, INDEX2):
if len(data[4])>N_MIN and index!='cell44' and index!='cell12':
DATA.append(data)
INDEX.append(index)
print index, len(data[4])
E0_1, EmuV_1, EsV_1, ETv_1 = [], [], [], []
E0_2, EmuV_2, EsV_2, ETv_2 = [], [], [], []
i=0
for data in DATA:
muV, sV, Tv_ratio, muGn, Fout, s_Fout, Vthre_eff, Gl, Cm, El,\
muV_exp, sV_exp, Tv_exp, s_muV_exp, s_sV_exp, s_Tv_exp =\
data
i0 = int(len(data[0])/2.)
### first slice
P1 = fitting_Vthre_then_Fout(Fout[:i0], 1e-3*muV[:i0],\
1e-3*sV[:i0], Tv_ratio[:i0],\
muGn[:i0], Gl, Cm, El, dep_muG=False, print_things=False)
E01, EmuV1, EsV1, ETv1 = get_mean_encoding_power(P1, El, Gl, Cm)
### first slice
P2 = fitting_Vthre_then_Fout(Fout[i0:], 1e-3*muV[i0:],\
1e-3*sV[i0:], Tv_ratio[i0:],\
muGn[i0:], Gl, Cm, El, dep_muG=False, print_things=False)
E02, EmuV2, EsV2, ETv2 = get_mean_encoding_power(P2, El, Gl, Cm)
if np.isfinite([E01, EmuV1, EsV1, ETv1, E02, EmuV2, EsV2, ETv2]).all():
E0_1.append(E01);EmuV_1.append(EmuV1);EsV_1.append(EsV1);ETv_1.append(ETv1)
E0_2.append(E02);EmuV_2.append(EmuV2);EsV_2.append(EsV2);ETv_2.append(ETv2)
if EmuV1>1.6:
print i, INDEX[i], len(data[4])
i+=1
fig, ax = plt.subplots(1,4,figsize=(18,5))
plt.subplots_adjust(wspace=.4, bottom=.25, right=.99)
i=0
LABELS = [r"$\langle V_\mathrm{thre}^\mathrm{eff} \rangle_\mathcal{D}$",\
r"$\langle d \nu / d \mu_V \rangle_\mathcal{D}$",\
r"$\langle d \nu / d \sigma_V \rangle_\mathcal{D}$",\
r"$\langle d \nu / d \tau_V^{N}' \rangle_\mathcal{D}$"]
''
for x, y, label in zip([E0_1, EmuV_1, EsV_1, ETv_1], [E0_2, EmuV_2, EsV_2, ETv_2], LABELS):
x = np.array(x)
y = np.array(y)
cc, pp = pearsonr(x, y)
ax[i].plot(x,y, 'kD')
xth = np.linspace(x.min(), x.max())
ax[i].plot(xth, np.polyval(np.polyfit(x, y, 1), xth), 'k--')
ax[i].annotate('c='+str(np.round(cc,1))+', p='+'%.1e' % pp,\
(0.1,1.03), xycoords='axes fraction', fontsize=17)
set_plot(ax[i], xlabel=label+'\n first slice', ylabel=label+'\n second slice')
i+=1
fig.savefig('../figures/fitting_robustness.pdf')
N_MIN = 70 # 30 points minimum !!!
import sys
import matplotlib.pylab as plt
import numpy as np
from scipy.stats.stats import pearsonr
sys.path.append('../theoretical_tools/')
from encoding_power import get_mean_encoding_power
from template_and_fitting import erfc_func, fitting_Vthre_then_Fout,\
final_threshold_func, print_reduce_parameters
sys.path.append('../3d_scan/')
from fit_Fout_response_of_data import load_full_data_set
sys.path.append('../experimental_data/')
from funcs_for_exp_analysis import load_reformated_data
## importing data
import dataset_structure as data
FourD_list = data.FourD_list
print FourD_list[-4:]
sys.path.append('/home/yann/work/python_library/')
from my_graph import set_plot
DATA2, INDEX2 = load_full_data_set(reformat=False)
DATA, INDEX = [], []
for data, index in zip(DATA2, INDEX2):
if len(data[4])>N_MIN and index!='cell44' and index!='cell12':
DATA.append(data)
INDEX.append(index)
print index, len(data[4])
E0_1, EmuV_1, EsV_1, ETv_1 = [], [], [], []
E0_2, EmuV_2, EsV_2, ETv_2 = [], [], [], []
i=0
for data in DATA:
muV, sV, Tv_ratio, muGn, Fout, s_Fout, Vthre_eff, Gl, Cm, El,\
muV_exp, sV_exp, Tv_exp, s_muV_exp, s_sV_exp, s_Tv_exp =\
data
muV_unique, sV_unique, Tv_unique = np.unique(muV), np.unique(sV),\
np.unique(Tv_ratio)
muV1, sV1, Tv1, Fout1 = [], [], [], []
muV2, sV2, Tv2, Fout2 = [], [], [], []
for m,s,t in zip(muV_unique, sV_unique, Tv_unique):
ii = np.where((muV==m) & (sV==s) & (t==Tv_ratio))[0]
if len(ii)>1:
muV1.append(muV[ii[0]]);sV1.append(muV[ii[0]])
Tv1.append(Tv_ratio[ii[0]]);Fout1.append(Fout[ii[0]])
muV2.append(muV[ii[1]]);sV2.append(muV[ii[1]])
Tv2.append(Tv_ratio[ii[1]]);Fout2.append(Fout[ii[1]])
muV1, sV1, Tv1, Fout1 = np.array(muV1), np.array(sV1), np.array(Tv1), np.array(Fout1)
muV2, sV2, Tv2, Fout2 = np.array(muV2), np.array(sV2), np.array(Tv2), np.array(Fout2)
### first slice
P1 = fitting_Vthre_then_Fout(Fout1, 1e-3*muV1,\
1e-3*sV1, Tv1,\
0*Tv1, Gl, Cm, El, dep_muG=False, print_things=False)
E01, EmuV1, EsV1, ETv1 = get_mean_encoding_power(P1, El, Gl, Cm)
### first slice
P2 = fitting_Vthre_then_Fout(Fout2, 1e-3*muV2,\
1e-3*sV2, Tv2,\
0*Tv2, Gl, Cm, El, dep_muG=False, print_things=False)
E02, EmuV2, EsV2, ETv2 = get_mean_encoding_power(P2, El, Gl, Cm)
if np.isfinite([E01, EmuV1, EsV1, ETv1, E02, EmuV2, EsV2, ETv2]).all():
E0_1.append(E01);EmuV_1.append(EmuV1);EsV_1.append(EsV1);ETv_1.append(ETv1)
E0_2.append(E02);EmuV_2.append(EmuV2);EsV_2.append(EsV2);ETv_2.append(ETv2)
if EmuV1>1.6:
print i, INDEX[i], len(data[4])
i+=1
fig, ax = plt.subplots(1,4,figsize=(18,5))
plt.subplots_adjust(wspace=.4, bottom=.25, right=.99)
i=0
LABELS = [r"$\langle V_\mathrm{thre}^\mathrm{eff} \rangle_\mathcal{D}$",\
r"$\langle d \nu / d \mu_V \rangle_\mathcal{D}$",\
r"$\langle d \nu / d \sigma_V \rangle_\mathcal{D}$",\
r"$\langle d \nu / d \tau_V^{N}' \rangle_\mathcal{D}$"]
''
for x, y, label in zip([E0_1, EmuV_1, EsV_1, ETv_1], [E0_2, EmuV_2, EsV_2, ETv_2], LABELS):
x = np.array(x)
y = np.array(y)
cc, pp = pearsonr(x, y)
ax[i].plot(x,y, 'rD')
xth = np.linspace(x.min(), x.max())
ax[i].plot(xth, np.polyval(np.polyfit(x, y, 1), xth), 'r--')
ax[i].annotate('c='+str(np.round(cc,1))+', p='+'%.1e' % pp,\
(0.1,1.03), xycoords='axes fraction', fontsize=17)
set_plot(ax[i], xlabel=label+'\n first slice', ylabel=label+'\n second slice')
i+=1
fig.savefig('../figures/fitting_robustness2.pdf')
<sec:other-strategies>
<<sec:higher-order-in stim>>
Figure with PSP event shapes where we illustrate that for the IaF
neuron, you slightly increase the firing probability for the high
conductance stimuli. Probably the amplitude of the variations around
the constant level is a way to assess the impact of the varying higher
order terms resulting from the stimulation choice. For example, here
we fix (
\bibliographystyle{apalike} \bibliography{tex/biblio}
\onecolumn \newpage
## ## WE EXPLICIT THE FOOTNOTEMARK IN THE AUTHORS (for easier change) :
\beginsupplement
<sec:exp-details>
For each cell, its properties and the quality of the electrical access was quantified. We present here those data and look for relations between them.
Some of the relations that appear are :
- Very naturally, the membrane time constant is proportional to the input resistance (c=0.8, $p<2.10-10$)
- The recording time diminishes with the age of the animal ($p<1.10-3$)
- The membrane resistances and membrane time constants decrease with the age of the animal even if they keep a strong variability ($p<5.10-3$ and $p<3.10-2$ respectively).
- The access resistance seems independent of all parameters
- The quality of the seal does not seem to impede much the duration of the recording (despite the fact that they are both correlated with the age of the animal, there mutual correlation is low).
\newpage
<sec:single-comp>
The single-compartment approximation is important in this study as it is used to constrain the fluctuations of the membrane potential. We do here a cell by cell quantification of the accuracy of the approximation.
We quantify the accuracy of the approximation as follows. We take the
protocols that were used to determine the membrane properties: prior
to each protocol, we recorded and averaged the response to 10 current
pulses of
\begin{equation} \mathrm{Res}(t) = \frac{ \| V(t) - V_\mathrm{sc}^\mathrm{fit}(t) \| }{R_m^0 Δ I} \label{eq:residual-single-comp} \end{equation}
Now we quantify the accuracy of the single-compartment approximation by taking the (normalized) integral over 7 membrane time constant of the residual trace (see bottom traces in fig:single-comp-approxB and fig:single-comp-approxC).
\begin{equation} C_\mathrm{sc} = ∫t_0t_0+7 \, τ_m^0 \frac{dt}{7\,τ_m^0} \mathrm{Res}(t) \label{eq:accuracy-coeff-single-comp} \end{equation}
The two normalizations (by membrane resistance and membrane time constant) were performed to yield comparable quantities for different membrane parameters.
We present the histogram of this quantity over the cells of the dataset in Figure fig:single-comp-approxA.
We conclude that the approximation was satisfactory : the worst case in Figure fig:single-comp-approxC corresponds to a pretty good match.
\newpage
import sys
sys.path.append('/home/yann/work/python_library/')
from electrophy import IC_membrane_test as IC
sys.path.append('../experimental_data/')
import dataset_structure as DATA
import numpy as np
RESIDUAL_LIST, CELL_LIST = [], []
Tm_factor = 6
for i in DATA.CELL_LIST[np.concatenate([np.arange(28), np.arange(29,len(DATA.CELL_LIST))])]:
exec('from cell'+str(i)+' import cell'+str(i))
exec('cell = cell'+str(i)+'.cell_params')
exp, time, t, data, params = IC.load(cell['ROOT_FOLDER']+cell['IC_datafile'])
exp, time, t, data, params, Rm, El, Cm, t_fit, v_fit,\
RmS, CmS, Ra, RmD, CmD, v_fit_2comp, mean_v_response, mean_i = \
IC.analyze(exp, time, t, data, params)
Tm = Rm*Cm*1e-3
dt = t[1]-t[0]
DI = np.abs(np.diff(mean_i[5:])).max() # pA, pulse
# we find where the pulse start !
i1 = np.where(np.abs(np.diff(mean_i[5:]))>.6*DI)[0]
it = np.arange(i1[0], min([i1[0]+int(Tm_factor*Tm/dt), len(t_fit)-1]))
residual = np.abs((mean_v_response[it]-v_fit[it])/(v_fit[-1]-v_fit[0]))
RESIDUAL_LIST.append(residual.sum()*dt/Tm/Tm_factor)
CELL_LIST.append(i)
# then the cell28, that has an AP in one trial and can not be evaluated !
CELL_LIST.append(DATA.CELL_LIST[28])
RESIDUAL_LIST.append(np.array(RESIDUAL_LIST).mean())
CELL_LIST = np.array(CELL_LIST)
RESIDUAL_LIST = np.array(RESIDUAL_LIST)
isort = np.argsort(CELL_LIST)
np.save('../experimental_data/analyzed_data/residuals.npy',\
[CELL_LIST[isort], RESIDUAL_LIST[isort]])
import sys
sys.path.append('/home/yann/work/python_library/')
from electrophy import IC_membrane_test as IC
from my_graph import set_plot
sys.path.append('../experimental_data/')
import dataset_structure as DATA
import matplotlib.pylab as plt
import numpy as np
CELL_LIST, RESIDUAL_LIST = \
np.load('../experimental_data/analyzed_data/residuals.npy')
figA = plt.figure(figsize=(4,3))
plt.subplots_adjust(bottom=.25, left=.25)
ax = plt.subplot(111)
plt.hist(RESIDUAL_LIST, bins=15, color='grey')
set_plot(ax, xlabel='$C_{sc}$', ylabel='cell count')
Tm_factor = 10
imax = np.argmax(RESIDUAL_LIST)
imin = np.argmin(RESIDUAL_LIST)
FIG = []
for i in [DATA.CELL_LIST[imin], DATA.CELL_LIST[imax]]:
f, (ax1, ax2, ax3) = plt.subplots(3, 1, figsize=(5,5))
plt.subplots_adjust(bottom=.15, left=.25)
FIG.append(f)
exec('from cell'+str(i)+' import cell'+str(i))
exec('cell = cell'+str(i)+'.cell_params')
exp, time, t, data, params = IC.load(cell['ROOT_FOLDER']+cell['IC_datafile'])
exp, time, t, data, params, Rm, El, Cm, t_fit, v_fit,\
RmS, CmS, Ra, RmD, CmD, v_fit_2comp, mean_v_response, mean_i = \
IC.analyze(exp, time, t, data, params)
Tm = Rm*Cm*1e-3
dt = t[1]-t[0]
DI = np.abs(np.diff(mean_i[5:])).max() # pA, pulse
# we find where the pulse start !
i1 = np.where(np.abs(np.diff(mean_i[5:]))>.6*DI)[0]
it = np.arange(i1[0], min([i1[0]+int(Tm_factor*Tm/dt), len(t_fit)-1]))
residual = np.abs((mean_v_response[it]-v_fit[it])/(v_fit[-1]-v_fit[0]))
ax1.plot(t_fit, mean_i[:len(t_fit)], 'k')
set_plot(ax1, ylabel='I (pA)', spines=['left'])
ax2.plot(t_fit, mean_v_response[:len(t_fit)], 'k')
ax2.plot(t_fit[it], v_fit[it], 'r--', lw=3)
set_plot(ax2, ylabel='$V_m$ (mV)', spines=['left'])
ax3.plot(t_fit[it], residual, 'k-')
ax3.plot(t_fit, 0*t_fit, 'k-')
ax3.fill_between(t_fit[it], residual, 0*residual, color='grey')
ax3.plot([10,10],[0,0.08],color='w')
set_plot(ax3, ylabel='residual', xlabel='time (ms)')
figA.savefig('../figures/single_comp_accuracy_hist.svg', format='svg', tranparent=True)
FIG[0].suptitle('best match')
FIG[0].savefig('../figures/single_comp_accuracy_best.svg', format='svg', tranparent=True)
FIG[1].suptitle('worst match')
FIG[1].savefig('../figures/single_comp_accuracy_worst.svg', format='svg', tranparent=True)
import svgutils.transform as sg
fig = sg.SVGFigure("12.5cm", "4.5cm")
# load matpotlib-generated figures
fig1 = sg.fromfile('../figures/single_comp_accuracy_hist.svg')
fig2 = sg.fromfile('../figures/single_comp_accuracy_best.svg')
fig3 = sg.fromfile('../figures/single_comp_accuracy_worst.svg')
# get the plot objects
plot1 = fig1.getroot();plot1.moveto(2, 20, scale=.5)
plot2 = fig2.getroot();plot2.moveto(160, 2, scale=.4)
plot3 = fig3.getroot();plot3.moveto(310, 2, scale=.4)
# add text labels
txt1 = sg.TextElement(0,20, "A", size=14, weight="bold")
txt2 = sg.TextElement(155,15, "B", size=14, weight="bold")
txt3 = sg.TextElement(305,15, "C", size=14, weight="bold")
# append plots and labels to figure
fig.append([plot1, plot2, plot3])
fig.append([txt1, txt2, txt3])
# save generated SVG files
fig.save("../figures/single_comp.svg")
import os
os.system('inkscape --export-pdf=../figures/single_comp.png ../figures/single_comp.svg')
os.system('eog ../figures/single_comp.svg')
# os.system('rm fig2.svg')
\newpage
<sec:linear-threshold-is-good>
todo :
- write the 11 params form
\begin{equation} ksdjfh \end{equation}
see Figure fig:robustness-all-data
\newpage
\newpage
<sec:more-robustness>
Additional analysis about the robustness of the characterization
\newpage
<sec:muG-Tv-deintricate-supp>
\newpage
<sec:impact-on-rate-coding>
We showed that the firing rate of neocortical neurons exhibits a strong dependency on the speed of the membrane potential fluctuations under physiological conditions (Section muG-Tv): close to the IaF model although impaired by the conductance dependency. This is a priori difficult to implement biologically as it would require an infinitely sharp activation curve. But we showed that sodium inactivation is able to maintain a strong sensitivity to the speed of the fluctuations despite a finite sharpness of the activation curve. This would give another functional role to the sodium inactivation mechanism (cite:Platkiewicz2011).
This optimization suggests that this feature is important for
population coding in neocortical networks. Transient variations of
the speed of the fluctuations across the population seems to be a
reliable mechanism to change the network firing rate (though
detectable only at the
On the other hand, our quantification of the dependency on
To illustrate this, we return to the situation presented above of a
2-3 fold conductance increase with respect to background activity
level (a rather strong stimulation) would correspond to
Finally, our measurements showed that a change of conductance can, by itself, change the firing rate and therefore encode information because of its suppressive effect on the spiking probability. This effect remains weak though.
Relevant models to account for the firing rate response of neocortical neurons at a mesoscopic scale
<<response-to-in-vivo-like>>
Reduced neuronal model are widely used in theoretical studies to understand the computational properties of neural networks, especially in the asynchronous regime (the analogous at the network level of the fluctuation-driven at the cellular level). It is therefore an important question to understand the potential limitations of those models.
When looking at the response as a function of the two first moments of the membrane potential fluctuations, we found very similar results to the one presented in cite:Rauch2003. The same function (with adjusted parameters) was able to reproduce the firing rate response for the IaF model as well as for the neocortical neurons of our experimental model. Thus suggesting that the IaF model is a sufficient model to qualitatively describe the cite:Rauch2003 if we limit ourselves to those two moments.
But when we introduced the effect of the somatic conductance
More importantly, the IaF model over-estimate the contributions of
<sec:spk-mech>
The properties of the spike initiation mechanism should determine the characteristics of the firing rate response reported in this study.
At the soma, the spike seems to be initiated abruptly (cite:Naundorf2006), whether this is a functional property to extract fast signals (cite:Naundorf2006, Tchumatchenko2011, Ilin2013 and cite:Brette2013 alternatively) or an epiphenomena due to backpropagation toward the soma cite:McCormick2007 is still debated (reviewed in cite:Brette2015).
The measurements and analysis performed here provides some additional material for theoretical considerations in this controversy.
We showed that neurons
On the one hand, we show that another functional advantage of
having a sharp activation is to resist to an increasing input
conductance (in the sense that the shunting due to background synaptic
conductances would not impact the efficiency of the sodium current at
eliciting spikes). For example, thanks to his infinitely sharp
activation curve, the LIF model has this characteristics (see Figure
[[]]). Nevertheless, neocortical neurons showed a dependency that
could be explained only by a smooth activation curve (
On the other hand, Because we investigated only single-compartment models, we suggested that this was the consequence of the finite sharpness (Section muG) but an increasing the somatic input conductance also affects the electrical proximity between the soma and the initiation site (despite the constant electrotonic distance) thus potentially filtering the membrane potential fluctuations. Interpreting this measurement in the light of the existing models (cite:Naundorf2006 or cite:McCormick2007 or cite:Brette2013) should be the focus of future investigations as it could be a key additional constraint for theoretical models.
<sec:discussion-heterogeneity>
Our approach provided a way to quantify heterogeneity of the firing rate response in pyramidal neurons by looking at the dispersion
of
the
First pyramidal cells seems to have a very strong variability of
excitability levels (
The question of the functional advantage of cellular has been tackled in a theoretical study cite:Mejias2012, the authors found that there might be an optimal heterogeneity level for neural coding in spiking networks.
Importantly, the cellular heterogeneity considered in their study only covers the mean excitability heterogeneity in our data, the impact on the computation of spiking networks of the heterogeneity of the three other components remains to be investigated and would be the focus of future analysis.
Neocortical neurons in this experimental model are not mature neurons, a possibility is that the measured heterogeneity is the consequence of differences in the maturation level of individual cells.
<<higher-order>>
Although we believe that the four variables (
First, the stationary distribution of the subthreshold fluctuations
could deviate from the gaussian approximation (REF?), potentially
because of the assymetry of the driving force between excitation and
inhibition (unlike the symmetry considered in this study). For the
temporal dynamics, we provided a very approximated quantity: the
\textit{global autocorrelation time}
Finally, we have put all non-linear effects (subthreshold adaptation, sodium inactivation, spike frequency adaptation) in the black box of the firing rate response. A more careful analysis could take into account the impact of those properties on the membrane potential fluctuations and then investigate spiking probability.
<<adaptation>>
No treatment of the
`** De-normalization of the response :noexport: <<denormalization>>
For all neurons, despite their different electrophysiological
properties, we investigated a fixed domain of the two normalized
quantities
For example, under the hypothesis that the synaptic weights and time
constant are fixed across cells, a small cell (∼ low
<<non-stationary-case>>
The results above apply to the stationary firing rate. They are necessary to compute the stationary properties of the dynamics of a recurrent network, but what is the relevance of those results for the dynamics in response to time-varying stimuli ?
A first deviation, might be an overestimation of the adaptation effect (so an underestimation of the firing rate response) because the population response will involve a few spikes at the cellular level while the firing rate response for those higher levels have been evaluated while adaptation was already at its stationary level. But if the firing rate response of the population remains low, the error induced by our estimate might remain low.
% For the more general non stationary effects occuring at the population % level in a network stimulated by transient stimuli, we should mention % here that we tested the use of this stationary rate estimate in a mean % field model (cite:ElBoustani2009) estimate Zerlaut \& Destexhe, % unpublished observations.
<<network-applic>>
A natural extension of this work is to investigate how this firing
rate estimate can be use as the cellular transfer functions in
macroscopic models of network dynamics.
Preliminary results (Goethals 2014) showed that this template was able to account for the stationary as well as the transient dynamics of artificial neural networks displaying asynchronous activity (cite:Kumar2008).
The investigation of the consequences of those results to the dynamical properties of neural networks will be investigated in future studies.
Very briefly, the decreasing relationship as a function of the total conductance observed in neocortical neurons could help to stabilize the point of self-sustained activity in recurrent networks (cite:Kuhn2004, cite:Kumar2008).
In cite:Destexhe2009a, it was shown that including different electrophysiological types of neurons in recurrent networks could either stabilize asynchronous activity or trigger slow population oscillations depending on the parameters. In the present study, we provided a simple analytical form, quite easy to manipulate analytically, where variations of parameter’s template account for those different elctrophysiological classes. It is now achievable to study analytically the impact (at least to first order) of those complex biophysical features on network dynamics. This is currently under investigation.
<sec:muG-Tv-deintricate>
A striking feature in the firing rate response of neocortical neurons is that they can show a remarkable sensitivity to the speed of the membrane potential fluctuations. Our modeling results suggested (see Figure fig:mean-sensitivitiesC and fig:biophysics-explain-heterE) that a way to obtain this strong sensitivity is to have a mechanism that penalizes how slow fluctuations are converted into spikes and fast sodium inactivation would be such a mechanism.
Another possibility for having a strong dependency to the speed of the fluctuations is that those neocortical neurons could have a very sharp activation curve that would enable them to extract very fast input cite:Fourcaud-Trocme2003, such as suggested for more mature pyramidal neurons in rat neocortex cite:Ilin2013. Nevertheless, even at the soma, the neurons of our recordings show a rather smooth activation curve (k_a ∼ 1.5 mV, from the dynamic I-V curve analysis cite:Badel2008, not shown) rendering this possibility unlikely. On the other hand, our hypothesis is then that, despite their relatively smooth activation curve, the sensitivity to the speed of the fluctuations is obtained from fast sodium inactivation that penalizes slow fluctuations.
We examine this hypothesis in details in section sec:muG-Tv-deintricate-supp and sum it up here. We first evidenced the relative smoothness of the sodium activation curve by its effect on the firing probability in the fluctuation-driven regime. Indeed, if the sodium activation curve is smooth, then an increase of somatic conductance would have the ability to shunt the sodium current and the spiking probability should decrease when all fluctuations parameters are kept identical (μ_V, σ_V, τ_V^N). We performed those experiments and indeed observed a decrease of the firing probability (see Figure fig:muG-Tv-mergedA) corresponding to an activation curve of k_a ∼ 3 mV. This phenomena then gives us a way to test our hypothesis. In our protocols, to reproduce in vivo conditions, the fast fluctuations were achieved by increasing the somatic conductance (to reduce the membrane time constant, see methodssec:stimulation-design) meaning that our data contains the effect of the shunting conductance. Therefore if we achieved fast fluctuations without the shunting effect (by playing with the correlations of the input current, see methodssec:stimulation-design) we should observe an even stronger sensitivity to the speed of the fluctuations (see Figure fig:muG-Tv-mergedB & C). Indeed, we observed an increase in the sensitivity to τ_V under those conditions, the mean sensitivity over cells was then much higher than the LIF model (that is a higher bound for the sensitivity in absence of other mechanism), meaning that fast extraction capabilities contribute few to the global sensitivity. Taken together, those experiments demonstrates that the sensitivity comes from a mechanism that penalizes slow fluctuations. Additionally, the phenomena described here is the continuous analogous of the phenomena described in cite:Fernandez2011 for CA1 pyramidal cells, where the authors found that a high conductance state (corresponding to fast fluctuations in our study) could evoke more spikes than a low conductance state (slow fluctuations here). Their study also provides evidence for the role of fast sodium inactivation in the sensitivity to the speed of the fluctuations and is therefore compatible with our modeling results.