Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

current progress #6

Draft
wants to merge 3 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
25 changes: 25 additions & 0 deletions .devcontainer/devcontainer.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,25 @@
// For format details, see https://aka.ms/devcontainer.json. For config options, see the
// README at: https://github.com/devcontainers/templates/tree/main/src/ubuntu
{
"name": "Ubuntu",
// Or use a Dockerfile or Docker Compose file. More info: https://containers.dev/guide/dockerfile
"image": "mcr.microsoft.com/devcontainers/base:jammy",
"features": {
"ghcr.io/devcontainers/features/python:1": {}
}

// Features to add to the dev container. More info: https://containers.dev/features.
// "features": {},

// Use 'forwardPorts' to make a list of ports inside the container available locally.
// "forwardPorts": [],

// Use 'postCreateCommand' to run commands after the container is created.
// "postCreateCommand": "uname -a",

// Configure tool-specific properties.
// "customizations": {},

// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
// "remoteUser": "root"
}
9 changes: 3 additions & 6 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -3,12 +3,6 @@ __pycache__/
*.py[cod]
*$py.class







# Distribution / packaging
.Python
build/
Expand Down Expand Up @@ -105,3 +99,6 @@ venv.bak/

# mypy
.mypy_cache/

# macos
.DS_Store
6 changes: 6 additions & 0 deletions .vscode/settings.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,6 @@
{
"[python]": {
"editor.defaultFormatter": "ms-python.black-formatter"
},
"python.formatting.provider": "none"
}
2 changes: 1 addition & 1 deletion ouropy/genconnection.py
Original file line number Diff line number Diff line change
Expand Up @@ -1026,7 +1026,7 @@ def write_aps(self, directory='', fname=''):
directory = os.getcwd()
if not os.path.isdir(directory):
os.mkdir(directory)
path = directory + '\\' + fname + '.npz'
path = os.path.join(directory, f'{fname}.npz')
try:
ap_list = [x[0].as_numpy() for x in self.ap_counters]
except:
Expand Down
38 changes: 28 additions & 10 deletions ouropy/gennetwork.py
Original file line number Diff line number Diff line change
Expand Up @@ -153,7 +153,7 @@ def save_ap_fig(self, fig, directory=None, file_name=None):
if not os.path.isdir(directory):
os.mkdir(directory)

full_file_path = directory + "\\" + file_name
full_file_path = os.path.join(directory, file_name)
if os.path.isfile(full_file_path):
raise ValueError("The file already exists.\n" +
"shelve_network does not overwrite files.")
Expand Down Expand Up @@ -181,7 +181,7 @@ def shelve_network(self, directory=None, file_name=None):
if not os.path.isdir(directory):
os.mkdir(directory)

full_file_path = directory + "\\" + file_name + '.pydd'
full_file_path = os.path.join(directory, file_name+'.pydd')
if os.path.isfile(full_file_path):
raise ValueError("The file already exists.\n" +
"shelve_network does not overwrite files.")
Expand All @@ -190,6 +190,26 @@ def shelve_network(self, directory=None, file_name=None):
# BUG with paradigm_frequency_inhibition at 1Hz, possibly too long sim?
curr_shelve[str(self)] = self.get_properties()
curr_shelve.close()

def shelve_aps(self, directory=None, file_name=None):
if not directory:
directory = os.getcwd()
if not file_name:
loc_time_str = '_'.join(time.asctime(time.localtime()).split(' '))
file_name = str(self) + '_' + loc_time_str
if not os.path.isdir(directory):
os.mkdir(directory)

full_file_path = os.path.join(directory, file_name+'.pydd')
if os.path.isfile(full_file_path):
raise ValueError("The file already exists.\n" +
"shelve_aps does not overwrite files.")

curr_shelve = shelve.open(full_file_path, flag='n')
# BUG with paradigm_frequency_inhibition at 1Hz, possibly too long sim?
curr_shelve['populations'] = [[(i, timestamps) for i, timestamps in enumerate(p.get_timestamps()) if len(timestamps) > 0] for p in self.populations]
Copy link
Author

@FujishigeTemma FujishigeTemma May 26, 2023

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Here(L210) is the only change I want you to check:pray:

curr_shelve.close()


def __str__(self):
return str(self.__class__).split("'")[1]
Expand Down Expand Up @@ -939,7 +959,7 @@ def write_aps(self, directory='', fname=''):
directory = os.getcwd()
if not os.path.isdir(directory):
os.mkdir(directory)
path = directory + '\\' + fname + '.npz'
path = os.path.join(directory, f'{fname}.npz')
try:
ap_list = [x[0].as_numpy() for x in self.ap_counters]
except:
Expand Down Expand Up @@ -972,8 +992,10 @@ def mk_current_clamp(self, cells, amp=0.3, dur=5, delays=3):
self.cells[cell]._current_clamp_soma(amp=amp, dur=dur,
delay=delay)
def get_timestamps(self):
ap_list = [np.array(x[0]) for x in self.ap_counters]
return ap_list
try:
return [x[0].as_numpy() for x in self.ap_counters]
except:
return [np.array(x[0]) for x in self.ap_counters]

def current_clamp_rnd(self, n_cells, amp=0.3, dur=5, delay=3):
"""DEPRECATE"""
Expand Down Expand Up @@ -1003,10 +1025,7 @@ def add_connection(self, conn):

def get_properties(self):
"""Get the properties of the network"""
try:
ap_time_stamps = [x[0].as_numpy() for x in self.ap_counters]
except:
ap_time_stamps = [np.array(x[0]) for x in self.ap_counters]
ap_time_stamps = self.get_timestamps()
ap_numbers = [x[1].n for x in self.ap_counters]
try:
v_rec = [x.as_numpy() for x in self.VRecords]
Expand All @@ -1024,7 +1043,6 @@ def get_properties(self):
'v_records': v_rec,
'VClamps_i': vclamp_i}

properties
return properties

def __str__(self):
Expand Down
2 changes: 1 addition & 1 deletion ouropy/genpopulation.py
Original file line number Diff line number Diff line change
Expand Up @@ -158,7 +158,7 @@ def write_aps(self, directory='', fname=''):
directory = os.getcwd()
if not os.path.isdir(directory):
os.mkdir(directory)
path = directory + '\\' + fname + '.npz'
path = os.path.join(directory, f'{fname}.npz')
try:
ap_list = [x[0].as_numpy() for x in self.ap_counters]
except:
Expand Down
2 changes: 1 addition & 1 deletion paradigm_pattern_separation_baseline.py
Original file line number Diff line number Diff line change
Expand Up @@ -129,7 +129,7 @@
str(input_scale).zfill(3) + '_' +
str(run).zfill(3) + '_')

nw.shelve_network(savedir, tuned_save_file_name)
nw.shelve_aps(savedir, tuned_save_file_name)

fig = nw.plot_aps(time=600)
tuned_fig_file_name = (str(nw) + "_spike-plot_paradigm_local-pattern" +
Expand Down
137 changes: 69 additions & 68 deletions pydentate/inputs.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,14 +5,15 @@
@author: Daniel & barisckuru
"""

import pdb

import numpy as np
import quantities as pq
from elephant import spike_train_generation as stg
from neo.core import AnalogSignal
import quantities as pq
from scipy import stats
from scipy.stats import skewnorm
from skimage.measure import profile_line
from scipy import stats
import pdb


def inhom_poiss(modulation_rate=10, max_rate=100, n_cells=400):
Expand All @@ -26,13 +27,9 @@ def inhom_poiss(modulation_rate=10, max_rate=100, n_cells=400):

t = np.arange(0, 0.5, sampling_interval.magnitude)

rate_profile = (np.sin(t*modulation_rate*np.pi*2-np.pi/2) + 1) * max_rate / 2
rate_profile = (np.sin(t * modulation_rate * np.pi * 2 - np.pi / 2) + 1) * max_rate / 2

rate_profile_as_asig = AnalogSignal(rate_profile,
units=1*pq.Hz,
t_start=0*pq.s,
t_stop=0.5*pq.s,
sampling_period=sampling_interval)
rate_profile_as_asig = AnalogSignal(rate_profile, units=1 * pq.Hz, t_start=0 * pq.s, t_stop=0.5 * pq.s, sampling_period=sampling_interval)

spike_trains = []
for x in range(n_cells):
Expand All @@ -41,11 +38,10 @@ def inhom_poiss(modulation_rate=10, max_rate=100, n_cells=400):
# If there is not, we move the next spike by 0.1ms
spike_trains.append(curr_train)

array_like = np.array([np.around(np.array(x.times)*1000, decimals=1)
for x in spike_trains], dtype=np.object)
array_like = np.array([np.around(np.array(x.times) * 1000, decimals=1) for x in spike_trains], dtype=object)
for arr_idx in range(array_like.shape[0]):
bad_idc = np.argwhere(np.diff(array_like[arr_idx]) == 0).flatten()
bad_idc = bad_idc+1
bad_idc = bad_idc + 1
while bad_idc.any():
for bad_idx in bad_idc:
array_like[arr_idx][bad_idx] = array_like[arr_idx][bad_idx] + 0.1
Expand All @@ -54,11 +50,20 @@ def inhom_poiss(modulation_rate=10, max_rate=100, n_cells=400):

return array_like


def gaussian_connectivity_gc_bc(n_pre, n_gc, n_bc, n_syn_gc, n_syn_bc, scale_gc, scale_bc):
"""TODO"""
pass

def gaussian_connectivity(n_pre, n_post, n_syn=[100,], scale=[1000, 12]):

def gaussian_connectivity(
n_pre,
n_post,
n_syn=[
100,
],
scale=[1000, 12],
):
"""TODO THIS IS A STUB FOR A GENERALIZED VERSION OF gaussian_connectivity_gc_bc
Choose n_syn postsynaptic cells for each presynaptic cells.
Clean up this function. It is not Pythonic.
Expand All @@ -84,9 +89,9 @@ def gaussian_connectivity(n_pre, n_post, n_syn=[100,], scale=[1000, 12]):
center = np.array(n_post) // 2
gauss = stats.norm(loc=center[0], scale=scale[0])
pdf = gauss.pdf(np.arange(n_post[0]))
pdf = pdf/pdf.sum()
pdf = pdf / pdf.sum()
post_idc = np.arange(n_post[0])
start_idc = np.random.randint(0, n_post[0]-1, size=n_pre)
start_idc = np.random.randint(0, n_post[0] - 1, size=n_pre)

out_list = []
for idx, n_post_pop in enumerate(n_post):
Expand All @@ -95,66 +100,68 @@ def gaussian_connectivity(n_pre, n_post, n_syn=[100,], scale=[1000, 12]):
for x in start_idc:
curr_idc = np.concatenate((post_idc[x:n_post_pop], post_idc[0:x]))
# pdb.set_trace()
pre_to_post.append(np.random.choice(curr_idc, size=n_syn[idx], replace=False,
p=pdf))
pre_to_post.append(np.random.choice(curr_idc, size=n_syn[idx], replace=False, p=pdf))
out_list.append(pre_to_post)
if idx+1 < len(n_post):
gauss = stats.norm(loc=center[idx+1], scale=scale[idx+1])
pdf = gauss.pdf(n_post[idx+1])
pdf = pdf/pdf.sum()
post_idc = np.arange(n_post[idx+1])
start_idc = np.array(((start_idc/n_post_pop)*n_post[idx+1]), dtype=int)

if idx + 1 < len(n_post):
gauss = stats.norm(loc=center[idx + 1], scale=scale[idx + 1])
pdf = gauss.pdf(n_post[idx + 1])
pdf = pdf / pdf.sum()
post_idc = np.arange(n_post[idx + 1])
start_idc = np.array(((start_idc / n_post_pop) * n_post[idx + 1]), dtype=int)

return np.array(pre_to_post)

#Solstad 2006 Grid Model

# Solstad 2006 Grid Model
def _grid_maker(spacing, orientation, pos_peak, arr_size, sizexy, max_rate):
#define the params from input here, scale the resulting array for maxrate and sperate the xy for size and shift
arr_size = arr_size #200*200 dp was good enough in terms of resolution
# define the params from input here, scale the resulting array for maxrate and sperate the xy for size and shift
arr_size = arr_size # 200*200 dp was good enough in terms of resolution
x, y = pos_peak
pos_peak = np.array([x,y])
pos_peak = np.array([x, y])
max_rate = max_rate
lambda_spacing = spacing*(arr_size/100) #100 required for conversion
k = (4*np.pi)/(lambda_spacing*np.sqrt(3))
lambda_spacing = spacing * (arr_size / 100) # 100 required for conversion
k = (4 * np.pi) / (lambda_spacing * np.sqrt(3))
degrees = orientation
theta = np.pi*(degrees/180)
theta = np.pi * (degrees / 180)
meterx, metery = sizexy
arrx = meterx*arr_size # *arr_size for defining the 2d array size
arry = metery*arr_size
dims = np.array([arrx,arry])
arrx = meterx * arr_size # *arr_size for defining the 2d array size
arry = metery * arr_size
dims = np.array([arrx, arry])
rate = np.ones(dims)
#implementation of grid function
# implementation of grid function
# 3 k values for 3 cos gratings with different angles to generate grid fields
k1 = ((k/np.sqrt(2))*np.array((np.cos(theta+(np.pi)/12) + np.sin(theta+(np.pi)/12),
np.cos(theta+(np.pi)/12) - np.sin(theta+(np.pi)/12)))).reshape(2,)
k2 = ((k/np.sqrt(2))*np.array((np.cos(theta+(5*np.pi)/12) + np.sin(theta+(5*np.pi)/12),
np.cos(theta+(5*np.pi)/12) - np.sin(theta+(5*np.pi)/12)))).reshape(2,)
k3 = ((k/np.sqrt(2))*np.array((np.cos(theta+(9*np.pi)/12) + np.sin(theta+(9*np.pi)/12),
np.cos(theta+(9*np.pi)/12) - np.sin(theta+(9*np.pi)/12)))).reshape(2,)

rate[i,j] = (np.cos(np.dot(k1, curr_dist))+
np.cos(np.dot(k2, curr_dist))+ np.cos(np.dot(k3, curr_dist)))/3
rate = max_rate*2/3*(rate+1/2) # arr is the resulting 2d grid out of 3 gratings
k1 = ((k / np.sqrt(2)) * np.array((np.cos(theta + (np.pi) / 12) + np.sin(theta + (np.pi) / 12), np.cos(theta + (np.pi) / 12) - np.sin(theta + (np.pi) / 12)))).reshape(
2,
)
k2 = ((k / np.sqrt(2)) * np.array((np.cos(theta + (5 * np.pi) / 12) + np.sin(theta + (5 * np.pi) / 12), np.cos(theta + (5 * np.pi) / 12) - np.sin(theta + (5 * np.pi) / 12)))).reshape(
2,
)
k3 = ((k / np.sqrt(2)) * np.array((np.cos(theta + (9 * np.pi) / 12) + np.sin(theta + (9 * np.pi) / 12), np.cos(theta + (9 * np.pi) / 12) - np.sin(theta + (9 * np.pi) / 12)))).reshape(
2,
)

rate[i, j] = (np.cos(np.dot(k1, curr_dist)) + np.cos(np.dot(k2, curr_dist)) + np.cos(np.dot(k3, curr_dist))) / 3
rate = max_rate * 2 / 3 * (rate + 1 / 2) # arr is the resulting 2d grid out of 3 gratings
return rate

def _grid_population(n_grid, max_rate, seed, arena_size=[1,1], arr_size=200):


def _grid_population(n_grid, max_rate, seed, arena_size=[1, 1], arr_size=200):
# skewed normal distribution for grid spacings
np.random.seed(seed)
median_spc = 43
spc_max = 100
skewness = 6 #Negative values are left skewed, positive values are right skewed.
grid_spc = skewnorm.rvs(a = skewness,loc=spc_max, size=n_grid) #Skewnorm function
grid_spc = grid_spc - min(grid_spc) #Shift the set so the minimum value is equal to zero.
grid_spc = grid_spc / max(grid_spc) #Standadize all the vlues between 0 and 1.
grid_spc = grid_spc * spc_max #Multiply the standardized values by the maximum value.
skewness = 6 # Negative values are left skewed, positive values are right skewed.
grid_spc = skewnorm.rvs(a=skewness, loc=spc_max, size=n_grid) # Skewnorm function
grid_spc = grid_spc - min(grid_spc) # Shift the set so the minimum value is equal to zero.
grid_spc = grid_spc / max(grid_spc) # Standadize all the vlues between 0 and 1.
grid_spc = grid_spc * spc_max # Multiply the standardized values by the maximum value.
grid_spc = grid_spc + (median_spc - np.median(grid_spc))
grid_ori = np.random.randint(0, high=60, size=[n_grid,1]) #uniform dist for orientation btw 0-60 degrees
grid_phase = np.random.randint(0, high=(arr_size-1), size=[n_grid,2]) #uniform dist grid phase

grid_ori = np.random.randint(0, high=60, size=[n_grid, 1]) # uniform dist for orientation btw 0-60 degrees
grid_phase = np.random.randint(0, high=(arr_size - 1), size=[n_grid, 2]) # uniform dist grid phase

# create a 3d array with grids for n_grid
rate_grids = np.zeros((arr_size, arr_size, n_grid))#empty array
rate_grids = np.zeros((arr_size, arr_size, n_grid)) # empty array
for i in range(n_grid):
x = grid_phase[i][0]
y = grid_phase[i][1]
Expand All @@ -166,17 +173,11 @@ def _grid_population(n_grid, max_rate, seed, arena_size=[1,1], arr_size=200):
def _inhom_poiss(arr, dur_s, poiss_seed=0, dt_s=0.025):
np.random.seed(poiss_seed)
n_cells = arr.shape[0]
spi_arr = np.zeros((n_cells), dtype = np.ndarray)
spi_arr = np.zeros((n_cells), dtype=np.ndarray)
for grid_idc in range(n_cells):
np.random.seed(poiss_seed+grid_idc)
rate_profile = arr[grid_idc,:]
asig = AnalogSignal(rate_profile,
units=1*pq.Hz,
t_start=0*pq.s,
t_stop=dur_s*pq.s,
sampling_period=dt_s*pq.s,
sampling_interval=dt_s*pq.s)
np.random.seed(poiss_seed + grid_idc)
rate_profile = arr[grid_idc, :]
asig = AnalogSignal(rate_profile, units=1 * pq.Hz, t_start=0 * pq.s, t_stop=dur_s * pq.s, sampling_period=dt_s * pq.s, sampling_interval=dt_s * pq.s)
curr_train = stg.inhomogeneous_poisson_process(asig)
spi_arr[grid_idc] = np.array(curr_train.times*1000) #time conv to ms
spi_arr[grid_idc] = np.array(curr_train.times * 1000) # time conv to ms
return spi_arr