From e67aaf362501290fdbe0d17d937ef37508aae927 Mon Sep 17 00:00:00 2001 From: vemundss Date: Tue, 9 Nov 2021 18:20:09 +0100 Subject: [PATCH 1/8] Fixes division by zero --- spatial_maps/maps.py | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/spatial_maps/maps.py b/spatial_maps/maps.py index 5cc534e..b57d38a 100644 --- a/spatial_maps/maps.py +++ b/spatial_maps/maps.py @@ -140,17 +140,13 @@ def rate_map(self, smoothing, mask_zero_occupancy=False, interpolate_invalid=Fal rate_map : array ''' spike_map = self.spike_map(smoothing=smoothing, **kwargs) + # to avoid infinity (x/0) we set zero occupancy to nan occupancy_map = self.occupancy_map( - smoothing=smoothing, threshold=threshold, **kwargs) - if mask_zero_occupancy: - # to avoid infinity (x/0) we set zero occupancy to nan - # this can be handy when analyzing low occupancy maps - rate_map = spike_map / occupancy_map - rate_map[self.time_pos == 0] = np.nan + smoothing=smoothing, mask_zero_occupancy=True, threshold=threshold, **kwargs) + rate_map = spike_map / occupancy_map + if not mask_zero_occupancy: + rate_map[np.isnan(rate_map)] = 0 elif interpolate_invalid: - rate_map = spike_map / occupancy_map rate_map = interpolate_nan_2D(rate_map) - else: - rate_map = spike_map / occupancy_map - rate_map[np.isinf(rate_map)] = 0 return rate_map + From 2681d64c66842ba7b914b4c47747b05055af9f28 Mon Sep 17 00:00:00 2001 From: vemundss Date: Tue, 9 Nov 2021 18:33:20 +0100 Subject: [PATCH 2/8] Adds nancorrelate2d --- spatial_maps/__init__.py | 2 +- spatial_maps/tools.py | 43 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 44 insertions(+), 1 deletion(-) diff --git a/spatial_maps/__init__.py b/spatial_maps/__init__.py index 1039e12..c26fa44 100644 --- a/spatial_maps/__init__.py +++ b/spatial_maps/__init__.py @@ -8,4 +8,4 @@ from .stats import ( sparsity, selectivity, information_rate, information_specificity, prob_dist) -from .tools import autocorrelation, fftcorrelate2d +from .tools import autocorrelation, fftcorrelate2d, nancorrelate2d diff --git a/spatial_maps/tools.py b/spatial_maps/tools.py index a9868ad..67fdb2c 100644 --- a/spatial_maps/tools.py +++ b/spatial_maps/tools.py @@ -42,6 +42,49 @@ def fftcorrelate2d(arr1, arr2, mode='full', normalize=False): corr = fftconvolve(arr1, np.fliplr(np.flipud(arr2)), mode=mode) return corr +def nancorrelate2d(X, Y, mode='pearson') -> np.ndarray: + """ + Calculate 2d pearson correlation from matrices with nans interpreted as + missing values, i.e. they are not included in any calculations. Also ignore + values outside correlation windows of X and Y. + + Parameters + ---------- + X : : + 2D array + Y : np.array + 2D array + mode : string + either 'pearson' or 'frobenius' for window aggregations + + Returns + ------- + result : np.array + nan and border ignored spatial cross correlation + + Example + ------- + >>> import numpy as np + >>> X, Y = np.ones((2,2)), np.ones((2,2)) + >>> Z = nancorrelate2d(X, Y, mode='frobenius') + """ + import numpy.ma as ma + X = ma.masked_array(X, mask=np.isnan(X)) + Y = Y[::-1][:,::-1] # + Y = ma.masked_array(Y, mask=np.isnan(Y)) + + result = np.zeros(X.shape) + for i in range(X.shape[0]): + for j in range(X.shape[1]): + scope_i = slice(max(0, i - X.shape[0]//2), min(i + X.shape[0]//2, X.shape[0])) + scope_j = slice(max(0, j - X.shape[1]//2), min(j + X.shape[1]//2, X.shape[1])) + if mode == 'pearson': + result[i,j] = ma.corrcoef(X[scope_i,scope_j].flatten(), Y[scope_i,scope_j][::-1][:,::-1].flatten())[0,1] + else: # scaled (average) frobenius inner product + result[i,j] = (X[scope_i,scope_j] * Y[scope_i,scope_j][::-1][:,::-1]).mean() + + return result + def masked_corrcoef2d(arr1, arr2): """ From a018521c7f30b764c477c882a0afa2a25fed4580 Mon Sep 17 00:00:00 2001 From: vemundss Date: Wed, 10 Nov 2021 09:32:25 +0100 Subject: [PATCH 3/8] Formatting changes to 'numpy'doctstring --- spatial_maps/tools.py | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/spatial_maps/tools.py b/spatial_maps/tools.py index 67fdb2c..f72dde4 100644 --- a/spatial_maps/tools.py +++ b/spatial_maps/tools.py @@ -50,16 +50,16 @@ def nancorrelate2d(X, Y, mode='pearson') -> np.ndarray: Parameters ---------- - X : : + X : np.ndarray 2D array - Y : np.array + Y : np.ndarray 2D array mode : string either 'pearson' or 'frobenius' for window aggregations Returns ------- - result : np.array + np.ndarray nan and border ignored spatial cross correlation Example @@ -67,6 +67,9 @@ def nancorrelate2d(X, Y, mode='pearson') -> np.ndarray: >>> import numpy as np >>> X, Y = np.ones((2,2)), np.ones((2,2)) >>> Z = nancorrelate2d(X, Y, mode='frobenius') + >>> Z + array([[1., 1.], + [1., 1.]]) """ import numpy.ma as ma X = ma.masked_array(X, mask=np.isnan(X)) From acb3dda7b6350a4d2e1ac6305f0dae6356ca333d Mon Sep 17 00:00:00 2001 From: vemundss Date: Wed, 10 Nov 2021 10:02:10 +0100 Subject: [PATCH 4/8] Small reformatting to 'numpy'docstring --- spatial_maps/tools.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/spatial_maps/tools.py b/spatial_maps/tools.py index f72dde4..0c6c9fb 100644 --- a/spatial_maps/tools.py +++ b/spatial_maps/tools.py @@ -51,16 +51,16 @@ def nancorrelate2d(X, Y, mode='pearson') -> np.ndarray: Parameters ---------- X : np.ndarray - 2D array + 2D array, input. Y : np.ndarray - 2D array + 2D array, kernel. mode : string either 'pearson' or 'frobenius' for window aggregations Returns ------- - np.ndarray - nan and border ignored spatial cross correlation + Z : np.ndarray + 2D array (same shape as X and Y) of nan and border ignored spatial cross correlation Example ------- From b19fb0db3800dcbddf63a160eeecbd958ed1b17c Mon Sep 17 00:00:00 2001 From: vemundss Date: Fri, 12 Nov 2021 16:35:21 +0100 Subject: [PATCH 5/8] return empty list before crash on idexing --- spatial_maps/fields.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/spatial_maps/fields.py b/spatial_maps/fields.py index f3e9472..61157de 100644 --- a/spatial_maps/fields.py +++ b/spatial_maps/fields.py @@ -211,6 +211,9 @@ def calculate_field_centers(rate_map, labels, center_method='maxima'): else: raise ValueError( "invalid center_method flag '{}'".format(center_method)) + if not bc: + # empty list + return bc bc = np.array(bc) bc[:,[0, 1]] = bc[:,[1, 0]] # y, x -> x, y return bc From 447796016f8e1a512e0927cad83a474f8a44a5c4 Mon Sep 17 00:00:00 2001 From: vemundss Date: Tue, 23 Nov 2021 13:51:57 +0100 Subject: [PATCH 6/8] Blacks. And updates mode param in nancorrelate2d --- spatial_maps/tools.py | 116 ++++++++++++++++++++++++++---------------- 1 file changed, 72 insertions(+), 44 deletions(-) diff --git a/spatial_maps/tools.py b/spatial_maps/tools.py index 0c6c9fb..389c967 100644 --- a/spatial_maps/tools.py +++ b/spatial_maps/tools.py @@ -1,11 +1,11 @@ import numpy as np -def autocorrelation(rate_map, mode='full', normalize=True): +def autocorrelation(rate_map, mode="full", normalize=True): return fftcorrelate2d(rate_map, rate_map, mode=mode, normalize=normalize) -def fftcorrelate2d(arr1, arr2, mode='full', normalize=False): +def fftcorrelate2d(arr1, arr2, mode="full", normalize=False): """ Cross correlation of two 2 dimensional arrays using fftconvolve from scipy. Here we exploit the fact that correlation is convolution with one input @@ -34,6 +34,7 @@ def fftcorrelate2d(arr1, arr2, mode='full', normalize=False): """ # TODO replace with astropy - just verify results are the same from scipy.signal import fftconvolve + if normalize: a_ = np.reshape(arr1, (1, arr1.size)) v_ = np.reshape(arr2, (1, arr2.size)) @@ -42,7 +43,8 @@ def fftcorrelate2d(arr1, arr2, mode='full', normalize=False): corr = fftconvolve(arr1, np.fliplr(np.flipud(arr2)), mode=mode) return corr -def nancorrelate2d(X, Y, mode='pearson') -> np.ndarray: + +def nancorrelate2d(X, Y, mode="frobenius") -> np.ndarray: """ Calculate 2d pearson correlation from matrices with nans interpreted as missing values, i.e. they are not included in any calculations. Also ignore @@ -72,19 +74,31 @@ def nancorrelate2d(X, Y, mode='pearson') -> np.ndarray: [1., 1.]]) """ import numpy.ma as ma + X = ma.masked_array(X, mask=np.isnan(X)) - Y = Y[::-1][:,::-1] # + Y = Y[::-1][:, ::-1] # Y = ma.masked_array(Y, mask=np.isnan(Y)) result = np.zeros(X.shape) for i in range(X.shape[0]): for j in range(X.shape[1]): - scope_i = slice(max(0, i - X.shape[0]//2), min(i + X.shape[0]//2, X.shape[0])) - scope_j = slice(max(0, j - X.shape[1]//2), min(j + X.shape[1]//2, X.shape[1])) - if mode == 'pearson': - result[i,j] = ma.corrcoef(X[scope_i,scope_j].flatten(), Y[scope_i,scope_j][::-1][:,::-1].flatten())[0,1] - else: # scaled (average) frobenius inner product - result[i,j] = (X[scope_i,scope_j] * Y[scope_i,scope_j][::-1][:,::-1]).mean() + scope_i = slice( + max(0, i - X.shape[0] // 2), min(i + X.shape[0] // 2, X.shape[0]) + ) + scope_j = slice( + max(0, j - X.shape[1] // 2), min(j + X.shape[1] // 2, X.shape[1]) + ) + if mode == "pearson": + result[i, j] = ma.corrcoef( + X[scope_i, scope_j].flatten(), + Y[scope_i, scope_j][::-1][:, ::-1].flatten(), + )[0, 1] + elif mode == "frobenius": # scaled (average) frobenius inner product + result[i, j] = ( + X[scope_i, scope_j] * Y[scope_i, scope_j][::-1][:, ::-1] + ).mean() + else: + raise NotImplementedError("Method does not have mode={}".format(mode)) return result @@ -126,6 +140,7 @@ def masked_corrcoef2d(arr1, arr2): fill_value=1e+20) """ import numpy.ma as ma + a_ = np.reshape(arr1, (1, arr1.size)) v_ = np.reshape(arr2, (1, arr2.size)) corr = ma.corrcoef(a_, v_) @@ -133,21 +148,28 @@ def masked_corrcoef2d(arr1, arr2): def gaussian2D(amp, x, y, xc, yc, s): - return amp * np.exp(- 0.5 * (((x - xc) / s)**2 + ((y - yc) / s)**2)) + return amp * np.exp(-0.5 * (((x - xc) / s) ** 2 + ((y - yc) / s) ** 2)) def gaussian2D_asym(pos, amplitude, xc, yc, sigma_x, sigma_y, theta): - x,y = pos - - a = (np.cos(theta)**2)/(2*sigma_x**2) + (np.sin(theta)**2)/(2*sigma_y**2) - b = -(np.sin(2*theta))/(4*sigma_x**2) + (np.sin(2*theta))/(4*sigma_y**2) - c = (np.sin(theta)**2)/(2*sigma_x**2) + (np.cos(theta)**2)/(2*sigma_y**2) - g = amplitude*np.exp( - (a*((x-xc)**2) + 2*b*(x-xc)*(y-yc) - + c*((y-yc)**2))) + x, y = pos + + a = (np.cos(theta) ** 2) / (2 * sigma_x ** 2) + (np.sin(theta) ** 2) / ( + 2 * sigma_y ** 2 + ) + b = -(np.sin(2 * theta)) / (4 * sigma_x ** 2) + (np.sin(2 * theta)) / ( + 4 * sigma_y ** 2 + ) + c = (np.sin(theta) ** 2) / (2 * sigma_x ** 2) + (np.cos(theta) ** 2) / ( + 2 * sigma_y ** 2 + ) + g = amplitude * np.exp( + -(a * ((x - xc) ** 2) + 2 * b * (x - xc) * (y - yc) + c * ((y - yc) ** 2)) + ) return g.ravel() -def fit_gauss_asym(data, p0 = None, return_data=True): +def fit_gauss_asym(data, p0=None, return_data=True): """Fits an asymmetric 2D gauss function to the given data set, with optional guess parameters. Optimizes amplitude, center coordinates, sigmax, sigmay and angle. If no guess parameters, initializes with a thin gauss bell @@ -170,6 +192,7 @@ def fit_gauss_asym(data, p0 = None, return_data=True): """ from scipy.optimize import curve_fit + # Create x and y indices sx, sy = data.shape xmin, xmax = 0, 1 @@ -180,10 +203,10 @@ def fit_gauss_asym(data, p0 = None, return_data=True): if p0 is None: # initial guesses, use small gaussian at maxima as initial guess - ia = np.max(data) # amplitude + ia = np.max(data) # amplitude index = np.unravel_index(np.argmax(data), (sx, sy)) # center ix, iy = x[index], y[index] - isig = 0.01 + isig = 0.01 iang = 0 p0 = (ia, ix, iy, isig, isig, iang) @@ -192,7 +215,7 @@ def fit_gauss_asym(data, p0 = None, return_data=True): # TODO : Add test for pcov if return_data: data_fitted = gaussian2D_asym((x, y), *popt) - return popt, data_fitted.reshape(sx,sy) + return popt, data_fitted.reshape(sx, sy) else: return popt @@ -214,22 +237,27 @@ def stationary_poisson(t_start, t_stop, rate): time points from a Poisson process with rate rate. """ n_exp = rate * (t_stop - t_start) - return np.sort( - np.random.uniform( - t_start, t_stop, np.random.poisson(n_exp))) + return np.sort(np.random.uniform(t_start, t_stop, np.random.poisson(n_exp))) def make_test_grid_rate_map( - box_size, bin_size, sigma=0.05, spacing=0.3, amplitude=1., repeat=2, - orientation=0, offset=0): + box_size, + bin_size, + sigma=0.05, + spacing=0.3, + amplitude=1.0, + repeat=2, + orientation=0, + offset=0, +): box_size = np.array(box_size) xbins = np.arange(0, box_size[0], bin_size[0]) ybins = np.arange(0, box_size[1], bin_size[1]) - x,y = np.meshgrid(xbins, ybins) + x, y = np.meshgrid(xbins, ybins) p0 = np.array([box_size[0] / 2, box_size[1] / 2]) + offset pos = [p0] - angles = np.linspace(0, 2 * np.pi, 7)[:-1] + orientation + angles = np.linspace(0, 2 * np.pi, 7)[:-1] + orientation rate_map = np.zeros_like(x) rate_map += gaussian2D(amplitude, x, y, *p0, sigma) @@ -251,12 +279,11 @@ def add_hex(p0, pos, rate_map): return rate_map, np.array(pos), xbins, ybins -def make_test_border_map( - box_size, bin_size, sigma=0.05, amplitude=1., offset=0): +def make_test_border_map(box_size, bin_size, sigma=0.05, amplitude=1.0, offset=0): xbins = np.arange(0, box_size[0], bin_size[0]) ybins = np.arange(0, box_size[1], bin_size[1]) - x,y = np.meshgrid(xbins, ybins) + x, y = np.meshgrid(xbins, ybins) p0 = np.array((box_size[0], box_size[1] / 2)) + offset pos = [p0] @@ -271,17 +298,20 @@ def make_test_border_map( def random_walk(box_size, step_size, n_step, sampling_rate, low_pass=5): import scipy.signal as ss + # edited from https://stackoverflow.com/questions/48777345/vectorized-random-walk-in-python-with-boundaries start = np.array([0, 0]) - directions = np.array([(i,j) for i in [-1,0,1] for j in [-1,0,1]]) + directions = np.array([(i, j) for i in [-1, 0, 1] for j in [-1, 0, 1]]) boundaries = np.array([(0, box_size[0]), (0, box_size[1])]) size = np.diff(boundaries, axis=1).ravel() # "simulation" trajectory = np.cumsum( - directions[np.random.randint(0, 9, (n_step,))] * step_size, axis=0) + directions[np.random.randint(0, 9, (n_step,))] * step_size, axis=0 + ) x, y = ( np.abs((trajectory + start - boundaries[:, 0] + size) % (2 * size) - size) - + boundaries[:, 0]).T + + boundaries[:, 0] + ).T b, a = ss.butter(N=1, Wn=low_pass * 2 / sampling_rate) # zero phase shift filter @@ -295,26 +325,24 @@ def random_walk(box_size, step_size, n_step, sampling_rate, low_pass=5): def make_test_spike_map( - rate, sigma, pos_fields, box_size, n_step=10**4, step_size=.05): + rate, sigma, pos_fields, box_size, n_step=10 ** 4, step_size=0.05 +): from scipy.interpolate import interp1d def infield(pos, pos_fields): - dist = np.sqrt(np.sum((pos - pos_fields)**2, axis=1)) + dist = np.sqrt(np.sum((pos - pos_fields) ** 2, axis=1)) if any(dist <= sigma): return True else: return False - t = np.linspace(0, n_step * step_size / 1.5, n_step) # s / max_speed - x, y = random_walk( - box_size, step_size, n_step, sampling_rate=1 / (t[1] - t[0])) + t = np.linspace(0, n_step * step_size / 1.5, n_step) # s / max_speed + x, y = random_walk(box_size, step_size, n_step, sampling_rate=1 / (t[1] - t[0])) - st = stationary_poisson( - rate=rate, t_start=0, t_stop=t[-1]) + st = stationary_poisson(rate=rate, t_start=0, t_stop=t[-1]) spike_pos = np.array([interp1d(t, x)(st), interp1d(t, y)(st)]) - spikes = [times for times, pos in zip(st, spike_pos.T) - if infield(pos, pos_fields)] + spikes = [times for times, pos in zip(st, spike_pos.T) if infield(pos, pos_fields)] return np.array(x), np.array(y), np.array(t), np.array(spikes) From 9b7c71af2239a8127dd2b02cdbc09af0dddec509 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Vemund=20Sigmundson=20Sch=C3=B8yen?= Date: Mon, 28 Feb 2022 11:05:51 +0100 Subject: [PATCH 7/8] Off by one --- spatial_maps/fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/spatial_maps/fields.py b/spatial_maps/fields.py index 61157de..a00c140 100644 --- a/spatial_maps/fields.py +++ b/spatial_maps/fields.py @@ -25,7 +25,7 @@ def find_peaks(image): indices = np.arange(1, num_objects+1) peaks = ndimage.maximum_position(image, labels=labels, index=indices) peaks = np.array(peaks) - center = np.array(image.shape) / 2 + center = (np.array(image.shape) - 1) / 2 distances = np.linalg.norm(peaks - center, axis=1) peaks = peaks[distances.argsort()] return peaks From 864910d705f4587b1559fac36e89ca27d9dc5908 Mon Sep 17 00:00:00 2001 From: vemundss Date: Tue, 21 Jun 2022 13:25:57 +0200 Subject: [PATCH 8/8] NEW VERSION - Simplifies SpatialMap() usage --- spatial_maps/maps.py | 119 ++++++++++++++++++++++--------------------- 1 file changed, 62 insertions(+), 57 deletions(-) diff --git a/spatial_maps/maps.py b/spatial_maps/maps.py index b57d38a..8dd7a1c 100644 --- a/spatial_maps/maps.py +++ b/spatial_maps/maps.py @@ -17,12 +17,15 @@ def _adjust_bin_size(box_size, bin_size=None, bin_count=None): box_size = np.array([box_size, box_size]) if bin_size is None: - bin_size = np.array([box_size[0] / bin_count[0], - box_size[1] / bin_count[1]]) + bin_size = np.array([box_size[0] / bin_count[0], box_size[1] / bin_count[1]]) # round bin size of to closest requested bin size - bin_size = np.array([box_size[0] / int(box_size[0] / bin_size[0]), - box_size[1] / int(box_size[1] / bin_size[1])]) + bin_size = np.array( + [ + box_size[0] / int(box_size[0] / bin_size[0]), + box_size[1] / int(box_size[1] / bin_size[1]), + ] + ) return box_size, bin_size @@ -42,111 +45,113 @@ def smooth_map(rate_map, bin_size, smoothing, **kwargs): def _occupancy_map(x, y, t, xbins, ybins): t_ = np.append(t, t[-1] + np.median(np.diff(t))) time_in_bin = np.diff(t_) - values, _, _ = np.histogram2d( - x, y, bins=[xbins, ybins], weights=time_in_bin) + values, _, _ = np.histogram2d(x, y, bins=[xbins, ybins], weights=time_in_bin) return values def _spike_map(x, y, t, spike_times, xbins, ybins): t_ = np.append(t, t[-1] + np.median(np.diff(t))) spikes_in_bin, _ = np.histogram(spike_times, t_) - values, _, _ = np.histogram2d( - x, y, bins=[xbins, ybins], weights=spikes_in_bin) + values, _, _ = np.histogram2d(x, y, bins=[xbins, ybins], weights=spikes_in_bin) return values -def interpolate_nan_2D(array, method='nearest'): +def interpolate_nan_2D(array, method="nearest"): from scipy import interpolate + x = np.arange(0, array.shape[1]) y = np.arange(0, array.shape[0]) - #mask invalid values + # mask invalid values array = np.ma.masked_invalid(array) xx, yy = np.meshgrid(x, y) - #get only the valid values + # get only the valid values x1 = xx[~array.mask] y1 = yy[~array.mask] newarr = array[~array.mask] return interpolate.griddata( - (x1, y1), newarr.ravel(), (xx, yy), - method=method, fill_value=0) + (x1, y1), newarr.ravel(), (xx, yy), method=method, fill_value=0 + ) class SpatialMap: - def __init__(self, x, y, t, spike_times, box_size, bin_size, bin_count=None): + def __init__( + self, smoothing=0.05, box_size=[1.0, 1.0], bin_size=0.02, bin_count=None + ): + """ + Parameters + ---------- + smoothing : float + Smoothing of spike_map and occupancy_map before division + box_size : Sequence-like + Size of box in x and y direction + bin_size : float + Resolution of spatial maps + """ box_size, bin_size = _adjust_bin_size(box_size, bin_size, bin_count) xbins, ybins = _make_bins(box_size, bin_size) - self.spike_pos = _spike_map(x, y, t, spike_times, xbins, ybins) - self.time_pos = _occupancy_map(x, y, t, xbins, ybins) - assert all(self.spike_pos[self.time_pos == 0] == 0) + self.smoothing = smoothing self.bin_size = bin_size self.box_size = box_size self.xbins = xbins self.ybins = ybins - def spike_map(self, smoothing, mask_zero_occupancy=False, **kwargs): - if smoothing == 0.0: - spmap = copy(self.spike_pos) - else: - spmap = smooth_map(self.spike_pos, self.bin_size, smoothing, **kwargs) - + def spike_map(self, x, y, t, spike_times, mask_zero_occupancy=True, **kwargs): + spmap = _spike_map(x, y, t, spike_times, self.xbins, self.ybins) + spmap = ( + smooth_map(spmap, self.bin_size, self.smoothing, **kwargs) + if self.smoothing + else spmap + ) if mask_zero_occupancy: - spmap[self.time_pos == 0] = np.nan - + spmap[_occupancy_map(x, y, t, self.xbins, self.ybins) == 0] = np.nan return spmap - def occupancy_map(self, smoothing, mask_zero_occupancy=False, threshold=None, **kwargs): - '''Compute occupancy map as a histogram of postition - weighted with time spent. - Parameters: - ----------- - mask_zero_occupancy : bool - Set zero occupancy to nan - threshold : float - Set low occupancy times to zero as not to get spurious - large values in rate. - - ''' - ocmap = copy(self.time_pos) - if threshold is not None: - scmap[self.time_pos <= threshold] = 0 - if smoothing > 0: - ocmap = smooth_map(ocmap, self.bin_size, smoothing, **kwargs) - + def occupancy_map(self, x, y, t, mask_zero_occupancy=True, **kwargs): + ocmap = _occupancy_map(x, y, t, self.xbins, self.ybins) + ocmap_copy = copy(ocmap) # to mask zero occupancy after smoothing + ocmap = ( + smooth_map(ocmap, self.bin_size, self.smoothing, **kwargs) + if self.smoothing + else ocmap + ) if mask_zero_occupancy: - ocmap[self.time_pos == 0] = np.nan - + ocmap[ocmap_copy == 0] = np.nan return ocmap - def rate_map(self, smoothing, mask_zero_occupancy=False, interpolate_invalid=False, threshold=None, **kwargs): - '''Calculate rate map as spike_map / occupancy_map + def rate_map( + self, + x, + y, + t, + spike_times, + mask_zero_occupancy=True, + interpolate_invalid=False, + **kwargs + ): + """Calculate rate map as spike_map / occupancy_map Parameters ---------- - smoothing : float - Smoothing of spike_map and occupancy_map before division mask_zero_occupancy : bool Set pixels of zero occupancy to nan interpolate_invalid : bool Interpolate rate_map after division to remove invalid values, if False, and mask_zero_occupancy is False, invalid values are set to zero. - threshold : float - Low occupancy can produce spurious high rate, a threshold sets - occupancy below this to zero. kwargs : key word arguments to scipy.interpolate, when smoothing > 0 Returns ------- rate_map : array - ''' - spike_map = self.spike_map(smoothing=smoothing, **kwargs) + """ + spike_map = self.spike_map( + x, y, t, spike_times, mask_zero_occupancy=mask_zero_occupancy, **kwargs + ) # to avoid infinity (x/0) we set zero occupancy to nan - occupancy_map = self.occupancy_map( - smoothing=smoothing, mask_zero_occupancy=True, threshold=threshold, **kwargs) + occupancy_map = self.occupancy_map(x, y, t, mask_zero_occupancy=True, **kwargs) rate_map = spike_map / occupancy_map if not mask_zero_occupancy: rate_map[np.isnan(rate_map)] = 0 elif interpolate_invalid: rate_map = interpolate_nan_2D(rate_map) return rate_map -