Skip to content

Commit

Permalink
Merge pull request #2299 from AllenInstitute/rc/2.13.4
Browse files Browse the repository at this point in the history
rc/2.13.4
  • Loading branch information
morriscb committed Feb 23, 2022
2 parents 328d30f + 0ef1e95 commit c8a4ee5
Show file tree
Hide file tree
Showing 9 changed files with 143 additions and 62 deletions.
5 changes: 5 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,6 +1,11 @@
# Change Log
All notable changes to this project will be documented in this file.

## [2.13.4] = 2022-02-23
- Bug fix in ecephys
- Added support for VBN source density jobs.
- Bug fix for pg8000

## [2.13.3] = 2022-02-02
- Add ability to extract running speed from mulit-stimulus experiments
- Compatible with pandas 1.4
Expand Down
2 changes: 1 addition & 1 deletion allensdk/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
#
import logging

__version__ = '2.13.3'
__version__ = '2.13.4'


try:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -55,11 +55,17 @@ def get_inputs_from_lims(args) -> dict:

data['pre_stimulus_time'] = float(data['pre_stimulus_time'])
data['post_stimulus_time'] = float(data['post_stimulus_time'])
data['surface_channel_adjustment'] = int(data['surface_channel_adjustment'])
data['surface_channel_adjustment'] = int(
data['surface_channel_adjustment']
)

for probe in data['probes']:
probe['surface_channel_adjustment'] = int(probe['surface_channel_adjustment'])
probe['csd_output_path'] = os.path.join(output_root, os.path.split(probe['csd_output_path'])[-1])
probe['surface_channel_adjustment'] = int(
probe['surface_channel_adjustment']
)
probe['csd_output_path'] = os.path.join(
output_root, os.path.split(probe['csd_output_path'])[-1]
)
probe['phase'] = str(probe['phase'])

return data
Expand All @@ -85,32 +91,39 @@ def run_csd(args: dict) -> dict:
pre_stimulus_time=args['pre_stimulus_time'],
post_stimulus_time=args['post_stimulus_time'],
num_trials=args['num_trials'],
stimulus_index=args['stimulus']['index']
stimulus_index=args['stimulus']['index'],
start_field=args['start_field']
)

logging.info('Loading LFP data')
lfp_data_file = ContinuousFile(probe['lfp_data_path'],
probe['lfp_timestamps_path'],
probe['total_channels'])
lfp_raw, timestamps = lfp_data_file.load(memmap=args['memmap'],
memmap_thresh=args['memmap_thresh'])
lfp_raw, timestamps = lfp_data_file.load(
memmap=args['memmap'],
memmap_thresh=args['memmap_thresh']
)

if probe['phase'].lower() == '3a':
lfp_channels = lfp_data_file.get_lfp_channel_order()
else:
lfp_channels = np.arange(0, probe['total_channels'])

logging.info('Accumulating LFP data')
accumulated_lfp_data = accumulate_lfp_data(timestamps=timestamps,
lfp_raw=lfp_raw,
lfp_channels=lfp_channels,
trial_windows=trial_windows,
volts_per_bit=args['volts_per_bit'])
accumulated_lfp_data = accumulate_lfp_data(
timestamps=timestamps,
lfp_raw=lfp_raw,
lfp_channels=lfp_channels,
trial_windows=trial_windows,
volts_per_bit=args['volts_per_bit']
)

logging.info('Removing noisy and reference channels')
clean_lfp, clean_channels = select_good_channels(lfp=accumulated_lfp_data,
reference_channels=probe['reference_channels'],
noisy_channel_threshold=args['noisy_channel_threshold'])
clean_lfp, clean_channels = select_good_channels(
lfp=accumulated_lfp_data,
reference_channels=probe['reference_channels'],
noisy_channel_threshold=args['noisy_channel_threshold']
)

logging.info('Bandpass filtering LFP channel data')
filt_lfp = filter_lfp_channels(lfp=clean_lfp,
Expand All @@ -119,19 +132,29 @@ def run_csd(args: dict) -> dict:
filter_order=args['filter_order'])

logging.info('Interpolating LFP channel locations')
actual_locs = make_actual_channel_locations(0, accumulated_lfp_data.shape[1])
actual_locs = make_actual_channel_locations(
0,
accumulated_lfp_data.shape[1]
)
clean_actual_locs = actual_locs[clean_channels, :]
interp_locs = make_interp_channel_locations(0, accumulated_lfp_data.shape[1])
interp_lfp, spacing = interp_channel_locs(lfp=filt_lfp,
actual_locs=clean_actual_locs,
interp_locs=interp_locs)
interp_locs = make_interp_channel_locations(
0,
accumulated_lfp_data.shape[1]
)
interp_lfp, spacing = interp_channel_locs(
lfp=filt_lfp,
actual_locs=clean_actual_locs,
interp_locs=interp_locs
)

logging.info('Averaging LFPs over trials')
trial_mean_lfp = np.nanmean(interp_lfp, axis=0)

logging.info('Computing CSD')
current_source_density, csd_channels = compute_csd(trial_mean_lfp=trial_mean_lfp,
spacing=spacing)
current_source_density, csd_channels = compute_csd(
trial_mean_lfp=trial_mean_lfp,
spacing=spacing
)

logging.info('Saving data')
write_csd_to_h5(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -9,12 +9,17 @@


def extract_trial_windows(
stimulus_table: pd.DataFrame, stimulus_name: str,
time_step: float, pre_stimulus_time: float, post_stimulus_time: float,
stimulus_table: pd.DataFrame,
stimulus_name: str,
time_step: float,
pre_stimulus_time: float,
post_stimulus_time: float,
num_trials: Optional[int] = None,
stimulus_index: Optional[int] = None,
name_field: str = 'stimulus_name', index_field: str = 'stimulus_index',
start_field: str = 'Start', end_field: str = 'End'
name_field: str = 'stimulus_name',
index_field: str = 'stimulus_index',
start_field: str = 'Start',
end_field: str = 'End'
) -> Tuple[List[np.ndarray], np.ndarray]:
'''Obtains time interval surrounding stimulus sweep onsets
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,12 @@ def select_good_channels(lfp: np.ndarray,
channel_variance = np.mean(np.std(lfp, 2), 0)
noisy_channels = np.where(channel_variance > noisy_channel_threshold)[0]

to_remove = np.concatenate((np.array(reference_channels), noisy_channels))
to_remove = np.concatenate(
(
np.array(reference_channels),
noisy_channels
)
).astype(int)
good_indices = np.delete(np.arange(0, lfp.shape[1]), to_remove)

# Remove noisy or reference channels (axis=1)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,6 +83,10 @@ class InputParameters(ArgSchema):
noisy_channel_threshold = Float(default=1500.0,
help='Threshold for removing noisy '
'channels from analysis')
start_field = String(
default='Start',
help='Column from which to extract start times.'
)


class ProbeOutputParameters(DefaultSchema):
Expand Down
102 changes: 67 additions & 35 deletions allensdk/internal/core/lims_utilities.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,7 @@
import os, platform, re, logging
import os
import platform
import re
import logging
from allensdk.core.json_utilities import read_url_get

HDF5_FILE_TYPE_ID = 306905526
Expand All @@ -11,13 +14,14 @@


def get_well_known_files_by_type(wkfs, wkf_type_id):
out = [ os.path.join( wkf['storage_directory'], wkf['filename'] )
for wkf in wkfs
if wkf.get('well_known_file_type_id',None) == wkf_type_id ]
out = [os.path.join(wkf['storage_directory'], wkf['filename'])
for wkf in wkfs
if wkf.get('well_known_file_type_id', None) == wkf_type_id]

if len(out) == 0:
raise IOError("Could not find well known files with type %d." % wkf_type_id)

raise IOError(
"Could not find well known files with type %d." % wkf_type_id)

return out


Expand All @@ -26,35 +30,42 @@ def get_well_known_file_by_type(wkfs, wkf_type_id):

nout = len(out)
if nout != 1:
raise IOError("Expected single well known file with type %d. Got %d." % (wkf_type_id, nout))

raise IOError(
"Expected single well known file with type %d. Got %d." % (
wkf_type_id, nout))

return out[0]


def get_well_known_files_by_name(wkfs, filename):
out = [ os.path.join( wkf['storage_directory'], wkf['filename'] )
for wkf in wkfs
if wkf['filename'] == filename ]
out = [os.path.join(wkf['storage_directory'], wkf['filename'])
for wkf in wkfs
if wkf['filename'] == filename]

if len(out) == 0:
raise IOError("Could not find well known files with name %s." % filename)
raise IOError(
"Could not find well known files with name %s." % filename)

return out


def get_well_known_file_by_name(wkfs, filename):
out = get_well_known_files_by_name(wkfs, filename)

nout = len(out)
if nout != 1:
raise IOError("Expected single well known file with name %s. Got %d." % (filename, nout))

raise IOError(
"Expected single well known file with name %s. Got %d." % (
filename, nout))

return out[0]


def append_well_known_file(wkfs, path, wkf_type_id=None, content_type=None):
record = {
'filename': os.path.basename(path),
'storage_directory': os.path.dirname(path)
}
}

if wkf_type_id is not None:
record['well_known_file_type_id'] = wkf_type_id
Expand All @@ -64,53 +75,72 @@ def append_well_known_file(wkfs, path, wkf_type_id=None, content_type=None):

for wkf in wkfs:
if wkf['filename'] == record['filename']:
logging.debug("found existing well known file record for %s, updating", path)
logging.debug(
"found existing well known file record for %s, updating", path)
wkf.update(record)
return

logging.debug("could not find existing well known file record for %s, appending", path)
logging.debug(
"could not find existing well known file record for %s, appending",
path)
wkfs.append(record)

def _connect(user="limsreader", host="limsdb2", database="lims2", password="limsro", port=5432):

def _connect(user="limsreader", host="limsdb2", database="lims2",
password="limsro", port=5432):
import pg8000

conn = pg8000.connect(user=user, host=host, database=database, password=password, port=port)
conn = pg8000.connect(user=user, host=host, database=database,
password=password, port=port)
return conn, conn.cursor()


def _select(cursor, query):
cursor.execute(query)
columns = [ d[0].decode("utf-8") for d in cursor.description ]
return [ dict(zip(columns, c)) for c in cursor.fetchall() ]
columns = [d[0].decode("utf-8") if isinstance(d[0], bytes) else d[0] for d
in cursor.description]
return [dict(zip(columns, c)) for c in cursor.fetchall()]


def select(cursor, query):
raise DeprecationWarning("lims_utilities.select is deprecated. Please use lims_utilities.query instead.")
raise DeprecationWarning(
"lims_utilities.select is deprecated. Please use "
"lims_utilities.query instead.")


def connect(user="limsreader", host="limsdb2", database="lims2", password="limsro", port=5432):
raise DeprecationWarning("lims_utilities.connect is deprecated. Please use lims_utilities.query instead.")
def connect(user="limsreader", host="limsdb2", database="lims2",
password="limsro", port=5432):
raise DeprecationWarning(
"lims_utilities.connect is deprecated. Please use "
"lims_utilities.query instead.")

def query(query, user="limsreader", host="limsdb2", database="lims2", password="limsro", port=5432):

def query(query, user="limsreader", host="limsdb2", database="lims2",
password="limsro", port=5432):
conn, cursor = _connect(user, host, database, password, port)

# Guard against non-ascii characters in query
query = ''.join([i if ord(i) < 128 else ' ' for i in query])
query = ''.join([i if ord(i) < 128 else ' ' for i in query])

try:
results = _select(cursor, query)
finally:
cursor.close()
conn.close()
return results


def safe_system_path(file_name):
if platform.system() == "Windows":
return linux_to_windows(file_name)
else:
return convert_from_titan_linux(os.path.normpath(file_name))


def convert_from_titan_linux(file_name):
# Lookup table mapping project to program
project_to_program= {
"neuralcoding": "braintv",
project_to_program = {
"neuralcoding": "braintv",
'0378': "celltypes",
'conn': "celltypes",
'ctyconn': "celltypes",
Expand All @@ -128,18 +158,19 @@ def convert_from_titan_linux(file_name):
newpath = os.path.normpath(os.path.join(
'/allen',
'programs',
project_to_program.get(m.group(1),'undefined'),
project_to_program.get(m.group(1), 'undefined'),
'production',
m.group(1),
m.group(2)
))
return newpath
return file_name


def linux_to_windows(file_name):
# Lookup table mapping project to program
project_to_program= {
"neuralcoding": "braintv",
project_to_program = {
"neuralcoding": "braintv",
'0378': "celltypes",
'conn': "celltypes",
'ctyconn': "celltypes",
Expand All @@ -154,14 +185,15 @@ def linux_to_windows(file_name):
# Simple case for new world order
m = re.match('/allen', file_name)
if m:
return "\\" + file_name.replace('/','\\')
return "\\" + file_name.replace('/', '\\')

# /data/ paths are being retained (for now)
# this will need to be extended to map directories to
# /allen/{programs,aibs}/workgroups/foo
m = re.match('/data/([^/]+)/(.*)', file_name)
if m:
return os.path.normpath(os.path.join('\\\\aibsdata', m.group(1), m.group(2)))
return os.path.normpath(
os.path.join('\\\\aibsdata', m.group(1), m.group(2)))

# Tough intermediary state where we have old paths
# being translated to new paths
Expand All @@ -170,7 +202,7 @@ def linux_to_windows(file_name):
newpath = os.path.normpath(os.path.join(
'\\\\allen',
'programs',
project_to_program.get(m.group(1),'undefined'),
project_to_program.get(m.group(1), 'undefined'),
'production',
m.group(1),
m.group(2)
Expand Down
1 change: 1 addition & 0 deletions doc_requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,4 @@ seaborn<1.0.0
aiohttp==3.7.4
nest_asyncio==1.2.0
docutils<0.18
markupsafe==2.0.1

0 comments on commit c8a4ee5

Please sign in to comment.