From 98ef0678a479ca9b37df3abda469431125af2364 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Mon, 1 Apr 2024 03:11:11 -0400 Subject: [PATCH 01/86] Fix SBX functions for older tifffile versions and Windows (close mmaps after using) --- caiman/tests/test_sbx.py | 62 ++++++++++++++++++++------------------- caiman/utils/sbx_utils.py | 13 ++++---- 2 files changed, 40 insertions(+), 35 deletions(-) diff --git a/caiman/tests/test_sbx.py b/caiman/tests/test_sbx.py index 9aa1538ae..342c9d799 100644 --- a/caiman/tests/test_sbx.py +++ b/caiman/tests/test_sbx.py @@ -91,50 +91,52 @@ def test_load_subind(): def test_sbx_to_tif(): - tif_file = os.path.join(caiman_datadir(), 'temp', 'from_sbx.tif') + tif_filename = os.path.join(caiman_datadir(), 'temp', 'from_sbx.tif') + tif_file = None + try: file_2d = os.path.join(TESTDATA_PATH, '2d_sbx.sbx') data_2d_from_sbx = cm.load(file_2d) - sbx_utils.sbx_to_tif(file_2d, fileout=tif_file) - data_2d_from_tif = cm.load(tif_file) + sbx_utils.sbx_to_tif(file_2d, fileout=tif_filename) + data_2d_from_tif = cm.load(tif_filename) npt.assert_array_almost_equal(data_2d_from_sbx, data_2d_from_tif, err_msg='Data do not match when loaded from .sbx vs. .tif') - + file_3d = os.path.join(TESTDATA_PATH, '3d_sbx_1.sbx') data_3d_from_sbx = cm.load(file_3d, is3D=True) - sbx_utils.sbx_to_tif(file_3d, fileout=tif_file) - data_3d_from_tif = cm.load(tif_file, is3D=True) + sbx_utils.sbx_to_tif(file_3d, fileout=tif_filename) + data_3d_from_tif = cm.load(tif_filename, is3D=True) npt.assert_array_almost_equal(data_3d_from_sbx, data_3d_from_tif, err_msg='3D data do not match when loaded from .sbx vs. .tif') # make sure individual planes are not saved as 3D (i.e. RGB) - Y = tifffile.TiffFile(tif_file).series[0] - assert Y.shape == SHAPE_3D, 'Shape of data in tif file is wrong' - if Y[0].shape != SHAPE_3D[2:]: - if Y[0].shape == SHAPE_3D[1:]: - assert False, 'Tif "plane" is 3-dimensional (i.e., has channel dimension)' - else: - assert False, 'Shape of tif plane is wrong' - + with tifffile.TiffFile(tif_filename) as tif_file: + Y = tif_file.series[0] + assert Y.shape == SHAPE_3D, 'Shape of data in tif file is wrong' + if Y[0].shape != SHAPE_3D[2:]: + if Y[0].shape == SHAPE_3D[1:]: + assert False, 'Tif "plane" is 3-dimensional (i.e., has channel dimension)' + else: + assert False, 'Shape of tif plane is wrong' + # with subindices subinds = (slice(0, None, 2), [0, 1, 3], slice(None)) - sbx_utils.sbx_to_tif(file_2d, fileout=tif_file, subindices=subinds) - sub_data_from_tif = cm.load(tif_file) + sbx_utils.sbx_to_tif(file_2d, fileout=tif_filename, subindices=subinds) + sub_data_from_tif = cm.load(tif_filename) npt.assert_array_almost_equal(data_2d_from_sbx[subinds_to_ix(subinds, data_2d_from_sbx.shape)], sub_data_from_tif) # with plane - sbx_utils.sbx_to_tif(file_3d, fileout=tif_file, plane=0) - plane_data_from_tif = cm.load(tif_file) + sbx_utils.sbx_to_tif(file_3d, fileout=tif_filename, plane=0) + plane_data_from_tif = cm.load(tif_filename) npt.assert_array_almost_equal(data_3d_from_sbx[:, :, :, 0], plane_data_from_tif) - finally: # cleanup - if os.path.isfile(tif_file): - os.remove(tif_file) + if os.path.isfile(tif_filename): + os.remove(tif_filename) def test_sbx_chain_to_tif(): - tif_file = os.path.join(caiman_datadir(), 'temp', 'from_sbx.tif') + tif_filename = os.path.join(caiman_datadir(), 'temp', 'from_sbx.tif') try: file_3d_1 = os.path.join(TESTDATA_PATH, '3d_sbx_1.sbx') data_3d_1 = sbx_utils.sbxread(file_3d_1) @@ -142,16 +144,16 @@ def test_sbx_chain_to_tif(): data_3d_2 = sbx_utils.sbxread(file_3d_2) # normal chain - sbx_utils.sbx_chain_to_tif([file_3d_1, file_3d_2], fileout=tif_file) - data_chain_tif = cm.load(tif_file, is3D=True) + sbx_utils.sbx_chain_to_tif([file_3d_1, file_3d_2], fileout=tif_filename) + data_chain_tif = cm.load(tif_filename, is3D=True) data_chain_gt = np.concatenate([data_3d_1, data_3d_2], axis=0) npt.assert_array_almost_equal(data_chain_tif, data_chain_gt, err_msg='Tif from chain does not match expected') # matching subindices - sbx_utils.sbx_chain_to_tif([file_3d_1, file_3d_2], fileout=tif_file, + sbx_utils.sbx_chain_to_tif([file_3d_1, file_3d_2], fileout=tif_filename, subindices=(slice(None), slice(0, None, 2))) - data_chain_tif = cm.load(tif_file, is3D=True) + data_chain_tif = cm.load(tif_filename, is3D=True) data_chain_gt = data_chain_gt[:, ::2] npt.assert_array_almost_equal(data_chain_tif, data_chain_gt, err_msg='Tif from chain with subindices does not match expected') @@ -159,9 +161,9 @@ def test_sbx_chain_to_tif(): # non-matching subindices with compatible shapes subinds_1 = (slice(None), [0, 1, 3], slice(0, None, 2), [0, 2]) subinds_2 = (slice(1, None), [-4, -2, -1], slice(1, None, 2), [1, 3]) - sbx_utils.sbx_chain_to_tif([file_3d_1, file_3d_2], fileout=tif_file, + sbx_utils.sbx_chain_to_tif([file_3d_1, file_3d_2], fileout=tif_filename, subindices=[subinds_1, subinds_2]) - data_chain_tif = cm.load(tif_file, is3D=True) + data_chain_tif = cm.load(tif_filename, is3D=True) data_chain_gt = np.concatenate([data_3d_1[subinds_to_ix(subinds_1, data_3d_1.shape)], data_3d_2[subinds_to_ix(subinds_2, data_3d_2.shape)]], axis=0) npt.assert_array_almost_equal(data_chain_tif, data_chain_gt, @@ -169,6 +171,6 @@ def test_sbx_chain_to_tif(): finally: # cleanup - if os.path.isfile(tif_file): - os.remove(tif_file) + if os.path.isfile(tif_filename): + os.remove(tif_filename) \ No newline at end of file diff --git a/caiman/utils/sbx_utils.py b/caiman/utils/sbx_utils.py index f3001cdf5..f12c4a486 100644 --- a/caiman/utils/sbx_utils.py +++ b/caiman/utils/sbx_utils.py @@ -131,6 +131,7 @@ def sbx_to_tif(filename: str, fileout: Optional[str] = None, subindices: Optiona # Open tif as a memmap and copy to it memmap_tif = tifffile.memmap(fileout, shape=save_shape, dtype='uint16', photometric='MINISBLACK') _sbxread_helper(filename, subindices=subindices, channel=channel, out=memmap_tif, plane=plane, chunk_size=chunk_size) + memmap_tif._mmap.close() def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[ChainSubindices] = slice(None), @@ -187,7 +188,7 @@ def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[Ch channel = 0 elif any(shape[0] <= channel for shape in all_shapes): raise Exception('Not all files have the requested channel') - + # Allocate empty tif file with the final shape (do this first to ensure any existing file is overwritten) common_shape = tuple(map(int, all_shapes_out[0, 1:])) Ns = list(map(int, all_shapes_out[:, 0])) @@ -204,16 +205,18 @@ def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[Ch fileout = fileout + '.tif' dtype = np.float32 if to32 else np.uint16 - tifffile.imwrite(fileout, data=None, mode='w', shape=save_shape, bigtiff=bigtiff, imagej=imagej, - dtype=dtype, photometric='MINISBLACK') + with tifffile.TiffWriter(fileout, bigtiff=bigtiff, imagej=imagej) as tif: + tif.write(None, shape=save_shape, dtype=dtype, photometric='MINISBLACK') # Now convert each file - tif_memmap = tifffile.memmap(fileout, series=0) + memmap_tif = tifffile.memmap(fileout, series=0) offset = 0 for filename, subind, file_N in zip(filenames, subindices, Ns): - _sbxread_helper(filename, subindices=subind, channel=channel, out=tif_memmap[offset:offset+file_N], plane=plane, chunk_size=chunk_size) + _sbxread_helper(filename, subindices=subind, channel=channel, out=memmap_tif[offset:offset+file_N], plane=plane, chunk_size=chunk_size) offset += file_N + memmap_tif._mmap.close() + def sbx_shape(filename: str, info: Optional[dict] = None) -> tuple[int, int, int, int, int]: """ From a5de8bf2b621a2676e162a458007c598b2d28cf8 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 3 Apr 2024 16:39:47 -0400 Subject: [PATCH 02/86] VERSION: 1.10.10 post-release --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index b0dd75320..2ee4474a1 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.10.9 +1.10.10 From 0fec0b018ad55e751caadf6102b7a7c0a5e3420d Mon Sep 17 00:00:00 2001 From: "Martin B. Eriksen" Date: Mon, 8 Apr 2024 17:01:21 +0200 Subject: [PATCH 03/86] Extensions starts with a dot --- caiman/base/movies.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/caiman/base/movies.py b/caiman/base/movies.py index 60162a52e..4994020b4 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -1951,8 +1951,8 @@ def load_iter(file_name: Union[str, list[str]], subindices=None, var_name_hdf5: yield frame # was frame[..., 0].astype(outtype) return - elif extension in ('.hdf5', '.h5', '.nwb', '.mat', 'n5', 'zarr'): - if extension in ('n5', 'zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open + elif extension in ('.hdf5', '.h5', '.nwb', '.mat', '.n5', '.zarr'): + if extension in ('.n5', '.zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open f = zarr.open(file_name, "r") else: f = h5py.File(file_name, "r") From 4a08717a8d1903d6753c100337132fede7198143 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Wed, 10 Apr 2024 00:34:27 -0400 Subject: [PATCH 04/86] Use memmap to load only indexed parts of files --- caiman/utils/sbx_utils.py | 33 ++++++++++++++++++--------------- 1 file changed, 18 insertions(+), 15 deletions(-) diff --git a/caiman/utils/sbx_utils.py b/caiman/utils/sbx_utils.py index f12c4a486..aeaabce84 100644 --- a/caiman/utils/sbx_utils.py +++ b/caiman/utils/sbx_utils.py @@ -449,35 +449,35 @@ def _sbxread_helper(filename: str, subindices: FileSubindices = slice(None), cha raise Exception('Plane cannot be specified for 2D data') save_shape = save_shape[:3] - frame_stride = _interpret_subindices(subindices[0], n_frames)[1] - if out is not None and out.shape != save_shape: raise Exception('Existing memmap is the wrong shape to hold loaded data') # Open File with open(filename + '.sbx') as sbx_file: - def get_chunk(n: int): + def get_chunk(n: int, step: int): + mmap = np.memmap(sbx_file, mode='c', dtype='uint16', shape=data_shape[:4] + (n*step,), order='F') + mmap = np.transpose(mmap, (0, 4, 2, 1, 3)) # to (chans, frames, Y, X, Z) + inds = (channel, slice(0, None, step)) + np.ix_(*subindices[1:]) + if not is3D: + inds = inds + (0,) # squeeze out singleton plane dim # Note: SBX files store the values strangely, it's necessary to invert each uint16 value to get the correct ones - chunk = np.invert(np.fromfile(sbx_file, dtype='uint16', count=frame_size//2 * n)) - chunk = np.reshape(chunk, data_shape[:4] + (n,), order='F') # (chans, X, Y, Z, frames) - chunk = np.transpose(chunk, (0, 4, 2, 1, 3))[channel] # to (frames, Y, X, Z) - if not is3D: # squeeze out singleton plane dim - chunk = np.squeeze(chunk, axis=3) - chunk = chunk[(slice(None),) + np.ix_(*subindices[1:])] + chunk = mmap[inds] + np.invert(chunk, out=chunk) # avoid copying, may be large if is3D and len(save_shape) == 3: # squeeze out plane dim after selecting chunk = chunk[:, :, :, plane] return chunk - if frame_stride == 1: + if isinstance(subindices[0], range): + frame_stride = subindices[0].step sbx_file.seek(subindices[0][0] * frame_size, 0) if chunk_size is None: # load a contiguous block all at once if out is None: # avoid allocating an extra array just to copy into it - out = get_chunk(N) + out = get_chunk(N, frame_stride) else: - out[:] = get_chunk(N) + out[:] = get_chunk(N, frame_stride) else: # load a contiguous block of data, but chunk it to not use too much memory at once if out is None: @@ -487,7 +487,7 @@ def get_chunk(n: int): offset = 0 while n_remaining > 0: this_chunk_size = min(n_remaining, chunk_size) - chunk = get_chunk(this_chunk_size) + chunk = get_chunk(this_chunk_size, frame_stride) out[offset:offset+this_chunk_size] = chunk n_remaining -= this_chunk_size offset += this_chunk_size @@ -501,7 +501,7 @@ def get_chunk(n: int): if counter % 100 == 0: logging.debug(f'Reading iteration: {k}') sbx_file.seek(k * frame_size, 0) - out[ind] = get_chunk(1) + out[ind] = get_chunk(1, 1) if isinstance(out, np.memmap): out.flush() @@ -523,7 +523,10 @@ def _interpret_subindices(subindices: DimSubindices, dim_extent: int) -> tuple[I f'(requested up to {subindices.stop})') else: iterable_elements = subindices - skip = 0 + if isinstance(subindices, range): + skip = subindices.step + else: + skip = 0 return iterable_elements, skip From 163022fcb4bdc738c3ae21268b9a172e92cd6db9 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Wed, 10 Apr 2024 02:03:16 -0400 Subject: [PATCH 05/86] Simplify by using the same loading code for all time index types --- caiman/tests/test_sbx.py | 13 +++++++ caiman/utils/sbx_utils.py | 76 +++++++++++++++------------------------ 2 files changed, 42 insertions(+), 47 deletions(-) diff --git a/caiman/tests/test_sbx.py b/caiman/tests/test_sbx.py index 342c9d799..09092c2a2 100644 --- a/caiman/tests/test_sbx.py +++ b/caiman/tests/test_sbx.py @@ -4,6 +4,7 @@ import numpy.testing as npt import os import tifffile +import tracemalloc import caiman as cm from caiman.paths import caiman_datadir @@ -90,6 +91,18 @@ def test_load_subind(): npt.assert_array_equal(data_3d_plane0_3d[:, :, :, 0], data_3d_plane0_2d) +def test_load_efficiency(): + # Make sure that when loading, excess copies are not being made and + # data outside subindices are not being loaded into memory + file_2d = os.path.join(TESTDATA_PATH, '2d_sbx.sbx') + tracemalloc.start() + data_2d_sliced = sbx_utils.sbxread(file_2d, subindices=(slice(None), slice(None, None, 2))) + curr_mem, peak_mem = tracemalloc.get_traced_memory() + assert peak_mem / curr_mem < 1.1, 'Too much memory allocated when loading' + del data_2d_sliced + tracemalloc.stop() + + def test_sbx_to_tif(): tif_filename = os.path.join(caiman_datadir(), 'temp', 'from_sbx.tif') tif_file = None diff --git a/caiman/utils/sbx_utils.py b/caiman/utils/sbx_utils.py index aeaabce84..61dd8bdf3 100644 --- a/caiman/utils/sbx_utils.py +++ b/caiman/utils/sbx_utils.py @@ -454,58 +454,40 @@ def _sbxread_helper(filename: str, subindices: FileSubindices = slice(None), cha # Open File with open(filename + '.sbx') as sbx_file: - def get_chunk(n: int, step: int): - mmap = np.memmap(sbx_file, mode='c', dtype='uint16', shape=data_shape[:4] + (n*step,), order='F') - mmap = np.transpose(mmap, (0, 4, 2, 1, 3)) # to (chans, frames, Y, X, Z) - inds = (channel, slice(0, None, step)) + np.ix_(*subindices[1:]) - if not is3D: - inds = inds + (0,) # squeeze out singleton plane dim + sbx_mmap = np.memmap(sbx_file, mode='r', dtype='uint16', shape=data_shape, order='F') + sbx_mmap = np.transpose(sbx_mmap, (0, 4, 2, 1, 3)) # to (chans, frames, Y, X, Z) + sbx_mmap = sbx_mmap[channel] + if not is3D: # squeeze out singleton plane dim + sbx_mmap = sbx_mmap[..., 0] + elif plane is not None: # select plane relative to subindices + sbx_mmap = sbx_mmap[..., subindices[-1][plane]] + subindices = subindices[:-1] + inds = np.ix_(*subindices) + + if chunk_size is None: + # load a contiguous block all at once + chunk_size = N + elif out is None: + # Pre-allocate destination when loading in chunks + out = np.empty(save_shape, dtype=np.uint16) + + n_remaining = N + offset = 0 + while n_remaining > 0: + this_chunk_size = min(n_remaining, chunk_size) # Note: SBX files store the values strangely, it's necessary to invert each uint16 value to get the correct ones - chunk = mmap[inds] + chunk = sbx_mmap[(inds[0][offset:offset+this_chunk_size],) + inds[1:]] np.invert(chunk, out=chunk) # avoid copying, may be large - if is3D and len(save_shape) == 3: # squeeze out plane dim after selecting - chunk = chunk[:, :, :, plane] - return chunk - - if isinstance(subindices[0], range): - frame_stride = subindices[0].step - sbx_file.seek(subindices[0][0] * frame_size, 0) - - if chunk_size is None: - # load a contiguous block all at once - if out is None: - # avoid allocating an extra array just to copy into it - out = get_chunk(N, frame_stride) - else: - out[:] = get_chunk(N, frame_stride) - else: - # load a contiguous block of data, but chunk it to not use too much memory at once - if out is None: - out = np.empty(save_shape, dtype=np.uint16) - - n_remaining = N - offset = 0 - while n_remaining > 0: - this_chunk_size = min(n_remaining, chunk_size) - chunk = get_chunk(this_chunk_size, frame_stride) - out[offset:offset+this_chunk_size] = chunk - n_remaining -= this_chunk_size - offset += this_chunk_size - else: - # load one frame at a time, sorting indices for fastest access if out is None: - out = np.empty(save_shape, dtype=np.uint16) - - for counter, (k, ind) in enumerate(sorted(zip(subindices[0], range(N)))): - if counter % 100 == 0: - logging.debug(f'Reading iteration: {k}') - sbx_file.seek(k * frame_size, 0) - out[ind] = get_chunk(1, 1) + out = chunk # avoid copying when loading all data + else: + out[offset:offset+this_chunk_size] = chunk + n_remaining -= this_chunk_size + offset += this_chunk_size - if isinstance(out, np.memmap): - out.flush() - + if isinstance(out, np.memmap): + out.flush() return out From 2647b590bf027256888a7568213b1561bc9d1042 Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Thu, 11 Apr 2024 03:03:36 -0400 Subject: [PATCH 06/86] Revert tifffile write to what is in dev since it works --- caiman/utils/sbx_utils.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/caiman/utils/sbx_utils.py b/caiman/utils/sbx_utils.py index 61dd8bdf3..7fca35217 100644 --- a/caiman/utils/sbx_utils.py +++ b/caiman/utils/sbx_utils.py @@ -205,17 +205,17 @@ def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[Ch fileout = fileout + '.tif' dtype = np.float32 if to32 else np.uint16 - with tifffile.TiffWriter(fileout, bigtiff=bigtiff, imagej=imagej) as tif: - tif.write(None, shape=save_shape, dtype=dtype, photometric='MINISBLACK') + tifffile.imwrite(fileout, data=None, shape=save_shape, bigtiff=bigtiff, imagej=imagej, + dtype=dtype, photometric='MINISBLACK') # Now convert each file - memmap_tif = tifffile.memmap(fileout, series=0) + tif_memmap = tifffile.memmap(fileout, series=0) offset = 0 for filename, subind, file_N in zip(filenames, subindices, Ns): - _sbxread_helper(filename, subindices=subind, channel=channel, out=memmap_tif[offset:offset+file_N], plane=plane, chunk_size=chunk_size) + _sbxread_helper(filename, subindices=subind, channel=channel, out=tif_memmap[offset:offset+file_N], plane=plane, chunk_size=chunk_size) offset += file_N - memmap_tif._mmap.close() + tif_memmap._mmap.close() def sbx_shape(filename: str, info: Optional[dict] = None) -> tuple[int, int, int, int, int]: From c75a4f81fe382ffbd3e35336061e9c711344167a Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Fri, 12 Apr 2024 00:51:03 -0400 Subject: [PATCH 07/86] Use del to close memmaps rather than _mmap; reduce code duplication --- caiman/utils/sbx_utils.py | 107 +++++++++++++++++--------------------- 1 file changed, 48 insertions(+), 59 deletions(-) diff --git a/caiman/utils/sbx_utils.py b/caiman/utils/sbx_utils.py index 7fca35217..5b9881323 100644 --- a/caiman/utils/sbx_utils.py +++ b/caiman/utils/sbx_utils.py @@ -80,6 +80,7 @@ def sbxread(filename: str, subindices: Optional[FileSubindices] = slice(None), c def sbx_to_tif(filename: str, fileout: Optional[str] = None, subindices: Optional[FileSubindices] = slice(None), + bigtiff: Optional[bool] = True, imagej: bool = False, to32: bool = False, channel: Optional[int] = None, plane: Optional[int] = None, chunk_size: int = 1000): """ Convert a single .sbx file to .tif format @@ -95,6 +96,9 @@ def sbx_to_tif(filename: str, fileout: Optional[str] = None, subindices: Optiona which frames to read (defaults to all) if a tuple of non-scalars, specifies slices of up to 4 dimensions in the order (frame, Y, X, Z). + to32: bool + whether to save in float32 format (default is to keep as uint16) + channel: int | None which channel to save (required if data has >1 channel) @@ -106,32 +110,17 @@ def sbx_to_tif(filename: str, fileout: Optional[str] = None, subindices: Optiona how many frames to load into memory at once (None = load the whole thing) """ # Check filenames - basename, ext = os.path.splitext(filename) - if ext == '.sbx': - filename = basename - if fileout is None: + basename, ext = os.path.splitext(filename) + if ext == '.sbx': + filename = basename fileout = filename + '.tif' - else: - # Add a '.tif' extension if not already present - extension = os.path.splitext(fileout)[1].lower() - if extension not in ['.tif', '.tiff', '.btf']: - fileout = fileout + '.tif' if subindices is None: subindices = slice(None) - # Find the shape of the file we need - save_shape = _get_output_shape(filename, subindices)[0] - if plane is not None: - if len(save_shape) < 4: - raise Exception('Plane cannot be specified for 2D data') - save_shape = save_shape[:3] - - # Open tif as a memmap and copy to it - memmap_tif = tifffile.memmap(fileout, shape=save_shape, dtype='uint16', photometric='MINISBLACK') - _sbxread_helper(filename, subindices=subindices, channel=channel, out=memmap_tif, plane=plane, chunk_size=chunk_size) - memmap_tif._mmap.close() + sbx_chain_to_tif([filename], fileout, [subindices], bigtiff=bigtiff, imagej=imagej, to32=to32, + channel=channel, plane=plane, chunk_size=chunk_size) def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[ChainSubindices] = slice(None), @@ -150,11 +139,8 @@ def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[Ch see subindices for sbx_to_tif can specify separate subindices for each file if nested 2 levels deep; X, Y, and Z sizes must match for all files after indexing. - - to32: bool - whether to save in float32 format (default is to keep as uint16) - channel, plane, chunk_size: see sbx_to_tif + to32, channel, plane, chunk_size: see sbx_to_tif """ if subindices is None: subindices = slice(None) @@ -200,13 +186,15 @@ def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[Ch raise Exception('Plane cannot be specified for 2D data') save_shape = save_shape[:3] + # Add a '.tif' extension if not already present extension = os.path.splitext(fileout)[1].lower() if extension not in ['.tif', '.tiff', '.btf']: fileout = fileout + '.tif' dtype = np.float32 if to32 else np.uint16 + # Make the file first so we can pass in bigtiff and imagej options; otherwise could create using tifffile.memmap directly tifffile.imwrite(fileout, data=None, shape=save_shape, bigtiff=bigtiff, imagej=imagej, - dtype=dtype, photometric='MINISBLACK') + dtype=dtype, photometric='MINISBLACK', align=tifffile.TIFF.ALLOCATIONGRANULARITY) # Now convert each file tif_memmap = tifffile.memmap(fileout, series=0) @@ -215,7 +203,7 @@ def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[Ch _sbxread_helper(filename, subindices=subind, channel=channel, out=tif_memmap[offset:offset+file_N], plane=plane, chunk_size=chunk_size) offset += file_N - tif_memmap._mmap.close() + del tif_memmap # important to make sure file is closed (on Windows) def sbx_shape(filename: str, info: Optional[dict] = None) -> tuple[int, int, int, int, int]: @@ -452,39 +440,40 @@ def _sbxread_helper(filename: str, subindices: FileSubindices = slice(None), cha if out is not None and out.shape != save_shape: raise Exception('Existing memmap is the wrong shape to hold loaded data') - # Open File - with open(filename + '.sbx') as sbx_file: - sbx_mmap = np.memmap(sbx_file, mode='r', dtype='uint16', shape=data_shape, order='F') - sbx_mmap = np.transpose(sbx_mmap, (0, 4, 2, 1, 3)) # to (chans, frames, Y, X, Z) - sbx_mmap = sbx_mmap[channel] - if not is3D: # squeeze out singleton plane dim - sbx_mmap = sbx_mmap[..., 0] - elif plane is not None: # select plane relative to subindices - sbx_mmap = sbx_mmap[..., subindices[-1][plane]] - subindices = subindices[:-1] - inds = np.ix_(*subindices) - - if chunk_size is None: - # load a contiguous block all at once - chunk_size = N - elif out is None: - # Pre-allocate destination when loading in chunks - out = np.empty(save_shape, dtype=np.uint16) - - n_remaining = N - offset = 0 - while n_remaining > 0: - this_chunk_size = min(n_remaining, chunk_size) - # Note: SBX files store the values strangely, it's necessary to invert each uint16 value to get the correct ones - chunk = sbx_mmap[(inds[0][offset:offset+this_chunk_size],) + inds[1:]] - np.invert(chunk, out=chunk) # avoid copying, may be large - - if out is None: - out = chunk # avoid copying when loading all data - else: - out[offset:offset+this_chunk_size] = chunk - n_remaining -= this_chunk_size - offset += this_chunk_size + # Read from .sbx file, using memmap to avoid loading until necessary + sbx_mmap = np.memmap(filename + '.sbx', mode='r', dtype='uint16', shape=data_shape, order='F') + sbx_mmap = np.transpose(sbx_mmap, (0, 4, 2, 1, 3)) # to (chans, frames, Y, X, Z) + sbx_mmap = sbx_mmap[channel] + if not is3D: # squeeze out singleton plane dim + sbx_mmap = sbx_mmap[..., 0] + elif plane is not None: # select plane relative to subindices + sbx_mmap = sbx_mmap[..., subindices[-1][plane]] + subindices = subindices[:-1] + inds = np.ix_(*subindices) + + if chunk_size is None: + # load a contiguous block all at once + chunk_size = N + elif out is None: + # Pre-allocate destination when loading in chunks + out = np.empty(save_shape, dtype=np.uint16) + + n_remaining = N + offset = 0 + while n_remaining > 0: + this_chunk_size = min(n_remaining, chunk_size) + # Note: SBX files store the values strangely, it's necessary to invert each uint16 value to get the correct ones + chunk = sbx_mmap[(inds[0][offset:offset+this_chunk_size],) + inds[1:]] + np.invert(chunk, out=chunk) # avoid copying, may be large + + if out is None: + out = chunk # avoid copying when loading all data + else: + out[offset:offset+this_chunk_size] = chunk + n_remaining -= this_chunk_size + offset += this_chunk_size + + del sbx_mmap # Important to close file (on Windows) if isinstance(out, np.memmap): out.flush() From 6c836c028e1d72d4f52b6251714d5eb85386ebcc Mon Sep 17 00:00:00 2001 From: Ethan Blackwood Date: Fri, 12 Apr 2024 12:00:49 -0400 Subject: [PATCH 08/86] Add comment about not viewing memmap and rename Ns --- caiman/utils/sbx_utils.py | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/caiman/utils/sbx_utils.py b/caiman/utils/sbx_utils.py index 5b9881323..f349b9d0b 100644 --- a/caiman/utils/sbx_utils.py +++ b/caiman/utils/sbx_utils.py @@ -177,9 +177,9 @@ def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[Ch # Allocate empty tif file with the final shape (do this first to ensure any existing file is overwritten) common_shape = tuple(map(int, all_shapes_out[0, 1:])) - Ns = list(map(int, all_shapes_out[:, 0])) - N = sum(Ns) - save_shape = (N,) + common_shape + all_n_frames_out = list(map(int, all_shapes_out[:, 0])) + n_frames_out = sum(all_n_frames_out) + save_shape = (n_frames_out,) + common_shape if plane is not None: if len(save_shape) < 4: @@ -199,7 +199,7 @@ def sbx_chain_to_tif(filenames: list[str], fileout: str, subindices: Optional[Ch # Now convert each file tif_memmap = tifffile.memmap(fileout, series=0) offset = 0 - for filename, subind, file_N in zip(filenames, subindices, Ns): + for filename, subind, file_N in zip(filenames, subindices, all_n_frames_out): _sbxread_helper(filename, subindices=subind, channel=channel, out=tif_memmap[offset:offset+file_N], plane=plane, chunk_size=chunk_size) offset += file_N @@ -273,7 +273,7 @@ def sbx_shape(filename: str, info: Optional[dict] = None) -> tuple[int, int, int else: info['max_idx'] = filesize / info['bytesPerBuffer'] * factor - 1 - N = info['max_idx'] + 1 # Last frame + n_frames = info['max_idx'] + 1 # Last frame # Determine whether we are looking at a z-stack # Only consider optotune z-stacks - knobby schedules have too many possibilities and @@ -282,9 +282,9 @@ def sbx_shape(filename: str, info: Optional[dict] = None) -> tuple[int, int, int n_planes = info['otparam'][2] else: n_planes = 1 - N //= n_planes + n_frames //= n_planes - x = (int(info['nChan']), int(info['sz'][1]), int(info['recordsPerBuffer']), int(n_planes), int(N)) + x = (int(info['nChan']), int(info['sz'][1]), int(info['recordsPerBuffer']), int(n_planes), int(n_frames)) return x @@ -431,7 +431,7 @@ def _sbxread_helper(filename: str, subindices: FileSubindices = slice(None), cha raise Exception('Invalid scanbox metadata') save_shape, subindices = _get_output_shape(data_shape, subindices) - N = save_shape[0] + n_frames_out = save_shape[0] if plane is not None: if len(save_shape) < 4: raise Exception('Plane cannot be specified for 2D data') @@ -453,17 +453,19 @@ def _sbxread_helper(filename: str, subindices: FileSubindices = slice(None), cha if chunk_size is None: # load a contiguous block all at once - chunk_size = N + chunk_size = n_frames_out elif out is None: # Pre-allocate destination when loading in chunks out = np.empty(save_shape, dtype=np.uint16) - n_remaining = N + n_remaining = n_frames_out offset = 0 while n_remaining > 0: this_chunk_size = min(n_remaining, chunk_size) - # Note: SBX files store the values strangely, it's necessary to invert each uint16 value to get the correct ones + # Note: important to copy the data here instead of making a view, + # so the memmap can be closed (achieved by advanced indexing) chunk = sbx_mmap[(inds[0][offset:offset+this_chunk_size],) + inds[1:]] + # Note: SBX files store the values strangely, it's necessary to invert each uint16 value to get the correct ones np.invert(chunk, out=chunk) # avoid copying, may be large if out is None: From 149d6247262c92e434b045c08203ac8e86309237 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 25 Oct 2023 13:59:53 -0400 Subject: [PATCH 09/86] Trying out a refactor of the basic demo to be less notebook-y, more app-y --- demos/general/demo_caiman_basic.py | 102 +++++++++++++++-------------- 1 file changed, 52 insertions(+), 50 deletions(-) diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index b5ce1e7e0..b8329233f 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ Basic stripped-down demo for running the CNMF source extraction algorithm with @@ -12,13 +11,11 @@ This demo is designed to be run in an IDE or from a CLI; its plotting functions are tailored for that environment. -@authors: @agiovann and @epnev - """ +import argparse import cv2 import glob -from IPython import get_ipython import logging import numpy as np import os @@ -28,23 +25,12 @@ except: pass -try: - if __IPYTHON__: - print("Detected iPython") - ipython = get_ipython() - ipython.run_line_magic('load_ext', 'autoreload') - ipython.run_line_magic('autoreload', '2') -except NameError: - pass - - import caiman as cm from caiman.paths import caiman_datadir from caiman.source_extraction.cnmf import cnmf as cnmf from caiman.source_extraction.cnmf import params as params from caiman.summary_images import local_correlations_movie_offline -# %% # Set up the logger; change this if you like. # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR @@ -55,20 +41,27 @@ level=logging.WARNING) # filename="/tmp/caiman.log", -#%% def main(): - pass # For compatibility between running under an IDE or a CLI - - # %% start a cluster + cfg = handle_args() + # start a cluster c, dview, n_processes =\ - cm.cluster.setup_cluster(backend='multiprocessing', n_processes=None, + cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=None, single_thread=False) - - # %% set up some parameters - fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] - # file(s) to be analyzed - is_patches = True # flag for processing in patches or not + # Select input + if cfg.input is None: + fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] # file(s) to be analyzed + else: + fnames = cfg.input + # If you prefer to hardcode filenames, you could do something like this: + # fnames = ["/path/to/myfile1.avi", "/path/to/myfile2.avi"] + + if cfg.no_patches: + is_patches = False # flag for processing in patches or not + else: + is_patches = True + + # set up some parameters fr = 10 # approximate frame rate of data decay_time = 5.0 # length of transient @@ -99,27 +92,28 @@ def main(): opts = params.CNMFParams(params_dict=params_dict) - # %% Now RUN CaImAn Batch (CNMF) + # Run CaImAn Batch (CNMF) cnm = cnmf.CNMF(n_processes, params=opts, dview=dview) cnm = cnm.fit_file() - # %% plot contour plots of components + # plot contour plots of components Cns = local_correlations_movie_offline(fnames[0], remove_baseline=True, swap_dim=False, window=1000, stride=1000, winSize_baseline=100, quantil_min_baseline=10, dview=dview) Cn = Cns.max(axis=0) - cnm.estimates.plot_contours(img=Cn) + if not cfg.no_play: + cnm.estimates.plot_contours(img=Cn) - # %% load memory mapped file + # load memory mapped file Yr, dims, T = cm.load_memmap(cnm.mmap_file) images = np.reshape(Yr.T, [T] + list(dims), order='F') - # %% refit + # refit cnm2 = cnm.refit(images, dview=dview) - # %% COMPONENT EVALUATION + # COMPONENT EVALUATION # the components are evaluated in three ways: # a) the shape of each component must be correlated with the data # b) a minimum peak SNR is required over the length of a transient @@ -140,32 +134,40 @@ def main(): cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview) - # %% visualize selected and rejected components - cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components) - - # %% visualize selected components - cnm2.estimates.view_components(images, idx=cnm2.estimates.idx_components, img=Cn) + if not cfg.no_play: + # visualize selected and rejected components + cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components) + # visualize selected components + cnm2.estimates.view_components(images, idx=cnm2.estimates.idx_components, img=Cn) - #%% only select high quality components (destructive) + # only select high quality components (destructive) # cnm2.estimates.select_components(use_object=True) # cnm2.estimates.plot_contours(img=Cn) - #%% save results + # save results cnm2.estimates.Cn = Cn cnm2.save(cnm2.mmap_file[:-4]+'hdf5') - # %% play movie with results (original, reconstructed, amplified residual) - cnm2.estimates.play_movie(images, magnification=4); + # play movie with results (original, reconstructed, amplified residual) + if not cfg.no_play: + cnm2.estimates.play_movie(images, magnification=4); - # %% STOP CLUSTER and clean up log files + # STOP CLUSTER and clean up log files cm.stop_server(dview=dview) - log_files = glob.glob('Yr*_LOG_*') - for log_file in log_files: - os.remove(log_file) - -# %% -# This is to mask the differences between running this demo in IDE -# versus from the CLI -if __name__ == "__main__": - main() \ No newline at end of file + if not cfg.keep_logs: + log_files = glob.glob('Yr*_LOG_*') + for log_file in log_files: + os.remove(log_file) + +def handle_args(): + parser = argparse.ArgumentParser(description="Demonstrate basic Caiman functionality") + parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") + parser.add_argument("--no_patches", action="store_true", help="Do not use patches") + parser.add_argument("--no_play", action="store_true", help="Do not display results") + parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or local to pick an engine") + parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + return parser.parse_args() + +######## +main() \ No newline at end of file From cd6480699652bc25c02d5b0c328b2d8cf0d5604f Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 25 Oct 2023 16:08:54 -0400 Subject: [PATCH 10/86] note on single_thread --- demos/general/demo_caiman_basic.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index b8329233f..22b4a43de 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -165,7 +165,10 @@ def handle_args(): parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_patches", action="store_true", help="Do not use patches") parser.add_argument("--no_play", action="store_true", help="Do not display results") - parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or local to pick an engine") + parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or mono to pick an engine") + # TODO: Implement mono as providing single_thread to setup_cluster(), + # Or alternatively add a separate named background to setup_cluster() + # for that. parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") return parser.parse_args() From 4592b4394481c4755e0128960142f90a2b87244c Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 27 Oct 2023 14:41:08 -0400 Subject: [PATCH 11/86] demos get a readme (that caimanmanager will deliver, so it needs to be .txt) --- demos/README.txt | 12 ++++++++++++ 1 file changed, 12 insertions(+) create mode 100644 demos/README.txt diff --git a/demos/README.txt b/demos/README.txt new file mode 100644 index 000000000..e7d007b81 --- /dev/null +++ b/demos/README.txt @@ -0,0 +1,12 @@ +The demos serve a few purposes: +1) They demonstrate use of the Caiman software with some sample data either bundled or automatically fetchable +2) They act as a template for you to write your own analysis +3) They can be directly used if they already do almost exactly what you need to do with Caiman. + +There are two types of demos, each in their own folder: +* Jupyter demos are meant to be run interactively, in Jupyter or potentially one of its variants or something else compatible with Jupyter notebooks. They run in a cell-oriented way, allowing you to step through each part of the Caiman pipeline, seeing results and possibly changing things (values of parameters, files to work on, similar) between them. +* CLI demos are meant to be run from the commandline of your operating system (Powershell or CMD on Windows, Bash/Zsh inside a Terminal on Linux or OSX). When running Caiman this way, you might or might not want to see visualisations, and the arguments you provide to these demos control their behaviour. + +The CLI demos used to have a different intended use case, also being meant for cell-based interactive use, but in an IPython-based IDE like Spyder; this usage is no longer supported (use something compatible with Jupyter notebook formats for that). The earlier form of the demos is available in our use_cases repo in this directory: +https://github.com/flatironinstitute/caiman_use_cases/tree/main/use_cases/old_cli_demos + From e17931b33d134fb88b91e65fe0fc7e5960773e2e Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 27 Oct 2023 14:43:48 -0400 Subject: [PATCH 12/86] Demos readme: format it a little more nicely --- demos/README.txt | 25 ++++++++++++++++++++----- 1 file changed, 20 insertions(+), 5 deletions(-) diff --git a/demos/README.txt b/demos/README.txt index e7d007b81..4229e2837 100644 --- a/demos/README.txt +++ b/demos/README.txt @@ -1,12 +1,27 @@ The demos serve a few purposes: -1) They demonstrate use of the Caiman software with some sample data either bundled or automatically fetchable +1) They demonstrate use of the Caiman software with some sample data either + bundled or automatically fetchable 2) They act as a template for you to write your own analysis -3) They can be directly used if they already do almost exactly what you need to do with Caiman. +3) They can be directly used if they already do almost exactly what you need + to do with Caiman. There are two types of demos, each in their own folder: -* Jupyter demos are meant to be run interactively, in Jupyter or potentially one of its variants or something else compatible with Jupyter notebooks. They run in a cell-oriented way, allowing you to step through each part of the Caiman pipeline, seeing results and possibly changing things (values of parameters, files to work on, similar) between them. -* CLI demos are meant to be run from the commandline of your operating system (Powershell or CMD on Windows, Bash/Zsh inside a Terminal on Linux or OSX). When running Caiman this way, you might or might not want to see visualisations, and the arguments you provide to these demos control their behaviour. +* Jupyter demos are meant to be run interactively, in Jupyter or potentially + one of its variants or something else compatible with Jupyter notebooks. + They run in a cell-oriented way, allowing you to step through each part of + the Caiman pipeline, seeing results and possibly changing things (values of + parameters, files to work on, similar) between them. +* CLI demos are meant to be run from the commandline of your operating system + (Powershell or CMD on Windows, Bash/Zsh inside a Terminal on Linux or OSX). + When running Caiman this way, you might or might not want to see + visualisations, and the arguments you provide to these demos control their + behaviour. + +The CLI demos used to have a different intended use case, also being meant for +cell-based interactive use, but in an IPython-based IDE like Spyder; this usage +is no longer supported (use something compatible with Jupyter notebook formats +for that). The earlier form of the demos is available in our use_cases repo in +this directory: -The CLI demos used to have a different intended use case, also being meant for cell-based interactive use, but in an IPython-based IDE like Spyder; this usage is no longer supported (use something compatible with Jupyter notebook formats for that). The earlier form of the demos is available in our use_cases repo in this directory: https://github.com/flatironinstitute/caiman_use_cases/tree/main/use_cases/old_cli_demos From f30b975d0cd64327f7a9ac87eee465e9b4cf274a Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 3 Nov 2023 15:02:49 -0400 Subject: [PATCH 13/86] Smaller cleanups, move logger setup into main(), preparing this to be a model for other demo refactors --- demos/general/demo_caiman_basic.py | 34 +++++++++++++++--------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index 22b4a43de..f89d2425c 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -1,7 +1,7 @@ #!/usr/bin/env python """ -Basic stripped-down demo for running the CNMF source extraction algorithm with +Basic demo for running the CNMF source extraction algorithm with CaImAn and evaluation the components. The analysis can be run either in the whole FOV or in patches. For a complete pipeline (including motion correction) check demo_pipeline.py @@ -31,19 +31,21 @@ from caiman.source_extraction.cnmf import params as params from caiman.summary_images import local_correlations_movie_offline -# Set up the logger; change this if you like. -# You can log to a file using the filename parameter, or make the output more or less -# verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR - -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\ - "[%(process)d] %(message)s", - level=logging.WARNING) - # filename="/tmp/caiman.log", def main(): cfg = handle_args() + if cfg.logfile: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING, + filename=cfg.logfile) + # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + else: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING) + # start a cluster c, dview, n_processes =\ cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=None, @@ -113,8 +115,8 @@ def main(): # refit cnm2 = cnm.refit(images, dview=dview) - # COMPONENT EVALUATION - # the components are evaluated in three ways: + # Component Evaluation + # Components are evaluated in three ways: # a) the shape of each component must be correlated with the data # b) a minimum peak SNR is required over the length of a transient # c) each shape passes a CNN based classifier (this will pick up only neurons @@ -152,7 +154,7 @@ def main(): if not cfg.no_play: cnm2.estimates.play_movie(images, magnification=4); - # STOP CLUSTER and clean up log files + # Stop the cluster and clean up log files cm.stop_server(dview=dview) if not cfg.keep_logs: @@ -165,11 +167,9 @@ def handle_args(): parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_patches", action="store_true", help="Do not use patches") parser.add_argument("--no_play", action="store_true", help="Do not display results") - parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or mono to pick an engine") - # TODO: Implement mono as providing single_thread to setup_cluster(), - # Or alternatively add a separate named background to setup_cluster() - # for that. + parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() ######## From fab66bcf4901aebea1138278a125c4c9feafa285 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 3 Nov 2023 15:42:41 -0400 Subject: [PATCH 14/86] bringing demo_pipeline and demo_caiman_basic closer together as I refactor demo_pipeline --- demos/general/demo_caiman_basic.py | 12 ++- demos/general/demo_pipeline.py | 161 ++++++++++++++--------------- 2 files changed, 87 insertions(+), 86 deletions(-) diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index f89d2425c..afe410041 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -30,6 +30,7 @@ from caiman.source_extraction.cnmf import cnmf as cnmf from caiman.source_extraction.cnmf import params as params from caiman.summary_images import local_correlations_movie_offline +from caiman.utils.utils import download_demo def main(): @@ -46,13 +47,11 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - # start a cluster - c, dview, n_processes =\ - cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=None, - single_thread=False) # Select input if cfg.input is None: fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] # file(s) to be analyzed + if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: + fnames = [download_demo(fnames[0])] else: fnames = cfg.input # If you prefer to hardcode filenames, you could do something like this: @@ -94,6 +93,11 @@ def main(): opts = params.CNMFParams(params_dict=params_dict) + # start a cluster for parallel processing + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, + n_processes=None) + + # Run CaImAn Batch (CNMF) cnm = cnmf.CNMF(n_processes, params=opts, dview=dview) cnm = cnm.fit_file() diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 4d50ecc94..f58807f40 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -13,14 +13,10 @@ This demo pertains to two photon data. For a complete analysis pipeline for one photon microendoscopic data see demo_pipeline_cnmfE.py - -copyright GNU General Public License v2.0 -authors: @agiovann and @epnev """ -#%% + import cv2 import glob -from IPython import get_ipython import logging import matplotlib.pyplot as plt import numpy as np @@ -31,43 +27,40 @@ except: pass -try: - if __IPYTHON__: - ipython = get_ipython() - ipython.run_line_magic('load_ext', 'autoreload') - ipython.run_line_magic('autoreload', '2') - ipython.run_line_magic('matplotlib', 'qt') -except NameError: - pass - - import caiman as cm from caiman.motion_correction import MotionCorrect from caiman.source_extraction.cnmf import cnmf as cnmf from caiman.source_extraction.cnmf import params as params -from caiman.utils.utils import download_demo from caiman.summary_images import local_correlations_movie_offline +from caiman.utils.utils import download_demo -# %% -# Set up the logger (optional); change this if you like. -# You can log to a file using the filename parameter, or make the output more -# or less verbose by setting level to logging.DEBUG, logging.INFO, -# logging.WARNING, or logging.ERROR -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\ - "[%(process)d] %(message)s", - level=logging.WARNING) - -#%% -def main(): - pass # For compatibility between running in an IDE and the CLI - - #%% Select file(s) to be processed (download if not present) - fnames = ['Sue_2x_3000_40_-46.tif'] # filename to be processed - if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: - fnames = [download_demo(fnames[0])] - #%% First setup some parameters for data and motion correction +def main(): + cfg = handle_args() + + if cfg.logfile: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING, + filename=cfg.logfile) + # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + else: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING) + + # Select file(s) to be processed (download if not present) + if cfg.input is None: + fnames = ['Sue_2x_3000_40_-46.tif'] # filename to be processed + if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: + fnames = [download_demo(fnames[0])] + else: + fnames = cfg.input + # If you prefer to hardcode filenames, you could do something like this: + # fnames = ["/path/to/myfile1.avi", "/path/to/myfile2.avi"] + + + # First setup some parameters for data and motion correction # dataset dependent parameters fr = 30 # imaging rate in frames per second @@ -88,22 +81,23 @@ def main(): # maximum deviation allowed for patch with respect to rigid shifts max_deviation_rigid = 3 - mc_dict = { - 'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - 'dxy': dxy, - 'pw_rigid': pw_rigid, - 'max_shifts': max_shifts, - 'strides': strides, - 'overlaps': overlaps, - 'max_deviation_rigid': max_deviation_rigid, - 'border_nan': 'copy' - } - - opts = params.CNMFParams(params_dict=mc_dict) - - # %% play the movie (optional) + params_dict = {'fnames': fnames, + 'fr': fr, + 'decay_time': decay_time, + 'dxy': dxy, + 'pw_rigid': pw_rigid, + 'max_shifts': max_shifts, + 'strides': strides, + 'overlaps': overlaps, + 'max_deviation_rigid': max_deviation_rigid, + 'border_nan': 'copy'} + + opts = params.CNMFParams(params_dict=params_dict) + + # start a cluster for parallel processing + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + + # play the movie (optional) # playing the movie using opencv. It requires loading the movie in memory. # To close the video press q display_images = False @@ -114,19 +108,16 @@ def main(): moviehandle = m_orig.resize(1, 1, ds_ratio) moviehandle.play(q_max=99.5, fr=60, magnification=2) - # %% start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster( - backend='multiprocessing', n_processes=None, single_thread=False) - # %%% MOTION CORRECTION + # % MOTION CORRECTION # first we create a motion correction object with the specified parameters mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion')) # note that the file is not loaded in memory - # %% Run (piecewise-rigid motion) correction using NoRMCorre + # Run (piecewise-rigid motion) correction using NoRMCorre mc.motion_correct(save_movie=True) - # %% compare with original movie + # compare with original movie if display_images: m_orig = cm.load_movie_chain(fnames) m_els = cm.load(mc.mmap_file) @@ -135,7 +126,7 @@ def main(): m_els.resize(1, 1, ds_ratio)], axis=2) moviehandle.play(fr=60, q_max=99.5, magnification=2) # press q to exit - # %% MEMORY MAPPING + # MEMORY MAPPING border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0 # you can include the boundaries of the FOV if you used the 'copy' option # during motion correction, although be careful about the components near @@ -150,12 +141,12 @@ def main(): images = np.reshape(Yr.T, [T] + list(dims), order='F') # load frames in python format (T x X x Y) - # %% restart cluster to clean up memory + # restart cluster to clean up memory cm.stop_server(dview=dview) c, dview, n_processes = cm.cluster.setup_cluster( - backend='multiprocessing', n_processes=None, single_thread=False) + backend=cfg.cluster_backend, n_processes=None, single_thread=False) - # %% parameters for source extraction and deconvolution + # parameters for source extraction and deconvolution p = 1 # order of the autoregressive system gnb = 2 # number of global background components merge_thr = 0.85 # merging threshold, max correlation allowed @@ -188,7 +179,7 @@ def main(): opts.change_params(params_dict=opts_dict); - # %% RUN CNMF ON PATCHES + # RUN CNMF ON PATCHES # First extract spatial and temporal components on patches and combine them # for this step deconvolution is turned off (p=0). If you want to have # deconvolution within each patch change params.patch['p_patch'] to a @@ -198,13 +189,13 @@ def main(): cnm = cnmf.CNMF(n_processes, params=opts, dview=dview) cnm = cnm.fit(images) - # %% Alternate way to run above pipeline using a single method (optional) + # Alternate way to run above pipeline using a single method (optional) # you can also perform the motion correction plus cnmf fitting steps # simultaneously after defining your parameters object using # cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview) # cnm1.fit_file(motion_correct=True) - # %% plot contours of found components + # plot contours of found components Cns = local_correlations_movie_offline(mc.mmap_file[0], remove_baseline=True, window=1000, stride=1000, winSize_baseline=100, quantil_min_baseline=10, @@ -214,70 +205,76 @@ def main(): cnm.estimates.plot_contours(img=Cn) plt.title('Contour plots of found components'); - #%% save results + # save results cnm.estimates.Cn = Cn cnm.save(fname_new[:-5]+'_init.hdf5') - # %% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution + # RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution cnm2 = cnm.refit(images, dview=dview) - # %% COMPONENT EVALUATION - # the components are evaluated in three ways: + # Component Evaluation + # Components are evaluated in three ways: # a) the shape of each component must be correlated with the data # b) a minimum peak SNR is required over the length of a transient # c) each shape passes a CNN based classifier + min_SNR = 2 # signal to noise ratio for accepting a component rval_thr = 0.85 # space correlation threshold for accepting a component + use_cnn = True cnn_thr = 0.99 # threshold for CNN based classifier cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected cnm2.params.set('quality', {'decay_time': decay_time, 'min_SNR': min_SNR, 'rval_thr': rval_thr, - 'use_cnn': True, + 'use_cnn': use_cnn, 'min_cnn_thr': cnn_thr, 'cnn_lowest': cnn_lowest}) cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview) - # %% PLOT COMPONENTS + # Plot Components cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components) - # %% VIEW TRACES (accepted and rejected) + # View Traces (accepted and rejected) if display_images: cnm2.estimates.view_components(images, img=Cn, idx=cnm2.estimates.idx_components) cnm2.estimates.view_components(images, img=Cn, idx=cnm2.estimates.idx_components_bad) - #%% update object with selected components (optional) + # update object with selected components (optional) cnm2.estimates.select_components(use_object=True) - #%% Extract DF/F values + # Extract DF/F values cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250) - #%% Show final traces + # Show final traces cnm2.estimates.view_components(img=Cn) - #%% cnm2.estimates.Cn = Cn cnm2.save(cnm2.mmap_file[:-4] + 'hdf5') - #%% reconstruct denoised movie (press q to exit) + # reconstruct denoised movie (press q to exit) if display_images: cnm2.estimates.play_movie(images, q_max=99.9, gain_res=2, magnification=2, bpx=border_to_0, include_bck=False) # background not shown - #%% STOP CLUSTER and clean up log files + # Stop cluster and clean up log files cm.stop_server(dview=dview) log_files = glob.glob('*_LOG_*') for log_file in log_files: os.remove(log_file) -# %% -# This is to mask the differences between running this demo in an IDE -# versus from the CLI -if __name__ == "__main__": - main() +def handle_args(): + parser = argparse.ArgumentParser(description="Demonstrate 2P Pipeline using batch algorithm") + parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--logfile", help="If specified, log to the named file") + return parser.parse_args() + + +######## +main() From 4ff7854071fe5003b42071f984cd03e7be8154a0 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 3 Nov 2023 17:11:00 -0400 Subject: [PATCH 15/86] CLI Demos: Finish re-syncing demo_caiman_basic and demo_pipeline --- demos/general/demo_caiman_basic.py | 10 +++--- demos/general/demo_pipeline.py | 50 ++++++++++++++---------------- 2 files changed, 29 insertions(+), 31 deletions(-) diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index afe410041..472a5461f 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -2,7 +2,7 @@ """ Basic demo for running the CNMF source extraction algorithm with -CaImAn and evaluation the components. The analysis can be run either in the +Caiman and evaluation the components. The analysis can be run either in the whole FOV or in patches. For a complete pipeline (including motion correction) check demo_pipeline.py @@ -47,8 +47,8 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - # Select input if cfg.input is None: + # If no input is specified, use sample data, downloading if necessary fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] # file(s) to be analyzed if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: fnames = [download_demo(fnames[0])] @@ -94,8 +94,7 @@ def main(): opts = params.CNMFParams(params_dict=params_dict) # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, - n_processes=None) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) # Run CaImAn Batch (CNMF) @@ -109,6 +108,7 @@ def main(): winSize_baseline=100, quantil_min_baseline=10, dview=dview) Cn = Cns.max(axis=0) + Cn[np.isnan(Cn)] = 0 if not cfg.no_play: cnm.estimates.plot_contours(img=Cn) @@ -177,4 +177,4 @@ def handle_args(): return parser.parse_args() ######## -main() \ No newline at end of file +main() diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index f58807f40..5d7f47e27 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -2,7 +2,7 @@ """ Complete demo pipeline for processing two photon calcium imaging data using the -CaImAn batch algorithm. The processing pipeline included motion correction, +Caiman batch algorithm. The processing pipeline included motion correction, source extraction and deconvolution. The demo shows how to construct the params, MotionCorrect and cnmf objects and call the relevant functions. You can also run a large part of the pipeline with a single method (cnmf.fit_file) @@ -15,10 +15,10 @@ one photon microendoscopic data see demo_pipeline_cnmfE.py """ +import argparse import cv2 import glob import logging -import matplotlib.pyplot as plt import numpy as np import os @@ -49,8 +49,8 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - # Select file(s) to be processed (download if not present) if cfg.input is None: + # If no input is specified, use sample data, downloading if necessary fnames = ['Sue_2x_3000_40_-46.tif'] # filename to be processed if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: fnames = [download_demo(fnames[0])] @@ -100,9 +100,8 @@ def main(): # play the movie (optional) # playing the movie using opencv. It requires loading the movie in memory. # To close the video press q - display_images = False - if display_images: + if not cfg.no_play: m_orig = cm.load_movie_chain(fnames) ds_ratio = 0.2 moviehandle = m_orig.resize(1, 1, ds_ratio) @@ -118,7 +117,7 @@ def main(): mc.motion_correct(save_movie=True) # compare with original movie - if display_images: + if not cfg.no_play: m_orig = cm.load_movie_chain(fnames) m_els = cm.load(mc.mmap_file) ds_ratio = 0.2 @@ -189,21 +188,16 @@ def main(): cnm = cnmf.CNMF(n_processes, params=opts, dview=dview) cnm = cnm.fit(images) - # Alternate way to run above pipeline using a single method (optional) - # you can also perform the motion correction plus cnmf fitting steps - # simultaneously after defining your parameters object using - # cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview) - # cnm1.fit_file(motion_correct=True) - # plot contours of found components Cns = local_correlations_movie_offline(mc.mmap_file[0], - remove_baseline=True, window=1000, stride=1000, + remove_baseline=True, + window=1000, stride=1000, winSize_baseline=100, quantil_min_baseline=10, dview=dview) Cn = Cns.max(axis=0) Cn[np.isnan(Cn)] = 0 - cnm.estimates.plot_contours(img=Cn) - plt.title('Contour plots of found components'); + if not cfg.no_play: + cnm.estimates.plot_contours(img=Cn) # save results cnm.estimates.Cn = Cn @@ -232,11 +226,11 @@ def main(): 'cnn_lowest': cnn_lowest}) cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview) - # Plot Components - cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components) + if not cfg.no_play: + # Plot Components + cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components) - # View Traces (accepted and rejected) - if display_images: + # View Traces (accepted and rejected) cnm2.estimates.view_components(images, img=Cn, idx=cnm2.estimates.idx_components) cnm2.estimates.view_components(images, img=Cn, @@ -249,32 +243,36 @@ def main(): cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250) # Show final traces - cnm2.estimates.view_components(img=Cn) + if not cfg.no_play: + cnm2.estimates.view_components(img=Cn) cnm2.estimates.Cn = Cn cnm2.save(cnm2.mmap_file[:-4] + 'hdf5') # reconstruct denoised movie (press q to exit) - if display_images: + if not cfg.no_play: cnm2.estimates.play_movie(images, q_max=99.9, gain_res=2, magnification=2, bpx=border_to_0, include_bck=False) # background not shown - # Stop cluster and clean up log files + # Stop the cluster and clean up log files cm.stop_server(dview=dview) - log_files = glob.glob('*_LOG_*') - for log_file in log_files: - os.remove(log_file) + + if not cfg.keep_logs: + log_files = glob.glob('*_LOG_*') + for log_file in log_files: + os.remove(log_file) def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate 2P Pipeline using batch algorithm") + parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") + parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() - ######## main() From 46a15ddab6792a271132c588c2b565ab620206f9 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 3 Nov 2023 17:30:00 -0400 Subject: [PATCH 16/86] Rebasing --- demos/general/demo_behavior.py | 117 --------------------------------- 1 file changed, 117 deletions(-) delete mode 100755 demos/general/demo_behavior.py diff --git a/demos/general/demo_behavior.py b/demos/general/demo_behavior.py deleted file mode 100755 index 9547d4e39..000000000 --- a/demos/general/demo_behavior.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -Created on Sun Aug 6 11:43:39 2017 - -@author: agiovann -""" - -import cv2 -from IPython import get_ipython -import logging -import numpy as np -import matplotlib.pyplot as plt - -try: - cv2.setNumThreads(0) -except: - pass - -try: - if __IPYTHON__: - print("Detected iPython") - ipython = get_ipython() - ipython.run_line_magic('load_ext', 'autoreload') - ipython.run_line_magic('autoreload', '2') - ipython.run_line_magic('matplotlib', 'qt') -except NameError: - pass - -import caiman as cm -from caiman.behavior import behavior -from caiman.utils.utils import download_demo - -#%% -# Set up the logger; change this if you like. -# You can log to a file using the filename parameter, or make the output more or less -# verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR - -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", - # filename="/tmp/caiman.log", - level=logging.WARNING) - -#%% -def main(): - pass # For compatibility between running under IDE and the CLI - - #%% - plt.ion() - - fname = [u'demo_behavior.h5'] - if fname[0] in ['demo_behavior.h5']: - # TODO: todocument - fname = [download_demo(fname[0])] - # TODO: todocument - m = cm._load_behavior(fname[0]) - - #%% load, rotate and eliminate useless pixels - m = m.transpose([0, 2, 1]) - m = m[:, 150:, :] - - #%% visualize movie - m.play() - - #%% select interesting portion of the FOV (draw a polygon on the figure that pops up, when done press enter) - # TODO: Put the message below into the image - print("Please draw a polygon delimiting the ROI on the image that will be displayed after the image; press enter when done") - mask = np.array(behavior.select_roi(np.median(m[::100], 0), 1)[0], np.float32) - - #%% - n_components = 4 # number of movement looked for - resize_fact = 0.5 # for computational efficiency movies are downsampled - # number of standard deviations above mean for the magnitude that are considered enough to measure the angle in polar coordinates - num_std_mag_for_angle = .6 - only_magnitude = False # if onlu interested in factorizing over the magnitude - method_factorization = 'dict_learn' # could also use nmf - # number of iterations for the dictionary learning algorithm (Marial et al, 2010) - max_iter_DL = -30 - - spatial_filter_, time_trace_, of_or = cm.behavior.behavior.extract_motor_components_OF(m, n_components, mask=mask, - resize_fact=resize_fact, only_magnitude=only_magnitude, verbose=True, method_factorization='dict_learn', max_iter_DL=max_iter_DL) - - #%% - mags, dircts, dircts_thresh, spatial_masks_thrs = cm.behavior.behavior.extract_magnitude_and_angle_from_OF( - spatial_filter_, time_trace_, of_or, num_std_mag_for_angle=num_std_mag_for_angle, sav_filter_size=3, only_magnitude=only_magnitude) - - #%% - idd = 0 - axlin = plt.subplot(n_components, 2, 2) - for mag, dirct, spatial_filter in zip(mags, dircts_thresh, spatial_filter_): - plt.subplot(n_components, 2, 1 + idd * 2) - min_x, min_y = np.min(np.where(mask), 1) - - spfl = spatial_filter - spfl = cm.movie(spfl[None, :, :]).resize( - 1 / resize_fact, 1 / resize_fact, 1).squeeze() - max_x, max_y = np.add((min_x, min_y), np.shape(spfl)) - - mask[min_x:max_x, min_y:max_y] = spfl - mask[mask < np.nanpercentile(spfl, 70)] = np.nan - plt.imshow(m[0], cmap='gray') - plt.imshow(mask, alpha=.5) - plt.axis('off') - - axelin = plt.subplot(n_components, 2, 2 + idd * 2, sharex=axlin) - plt.plot(mag / 10, 'k') - dirct[mag < 0.5 * np.std(mag)] = np.nan - plt.plot(dirct, 'r-', linewidth=2) - - idd += 1 - -#%% -# This is to mask the differences between running this demo in an IDE -# versus from the CLI -if __name__ == "__main__": - main() \ No newline at end of file From 9b56d85257a34e8b4b75f01a87fee8171b797a7b Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 3 Nov 2023 17:51:35 -0400 Subject: [PATCH 17/86] Work on converting more CLI demos --- demos/general/demo_OnACID.py | 77 ++++++++++++---------- demos/general/demo_OnACID_mesoscope.py | 91 +++++++++++++------------- 2 files changed, 88 insertions(+), 80 deletions(-) diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index 32bd44b11..ab65bf896 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -1,35 +1,26 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ Basic demo for the CaImAn Online algorithm (OnACID) using CNMF initialization. It demonstrates the construction of the params and online_cnmf objects and the fit function that is used to run the algorithm. For a more complete demo check the script demo_OnACID_mesoscope.py - -@author: jfriedrich & epnev """ -#%% -from IPython import get_ipython + +import argparse import logging import numpy as np import os +try: + cv2.setNumThreads(0) +except: + pass + import caiman as cm from caiman.source_extraction import cnmf as cnmf from caiman.paths import caiman_datadir -try: - if __IPYTHON__: - print("Detected iPython") - ipython = get_ipython() - ipython.run_line_magic('load_ext', 'autoreload') - ipython.run_line_magic('autoreload', '2') - ipython.run_line_magic('matplotlib', 'qt') -except NameError: - pass - -#%% # Set up the logger; change this if you like. # You can log to a file using the filename parameter, or make the output more or less # verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR @@ -39,14 +30,29 @@ "[%(process)d] %(message)s", level=logging.WARNING) # filename="/tmp/caiman.log" -# %% + def main(): - pass # For compatibility between running under an IDE and the CLI + cfg = handle_args() + + if cfg.logfile: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING, + filename=cfg.logfile) + # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + else: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING) - # %% load data - fname = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] + if cfg.input is None: + fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] + if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: + fnames = [download_demo(fnames[0])] + else: + fnames = cfg.input - # %% set up some parameters + # set up some parameters fr = 10 # frame rate (Hz) decay_time = .75 # approximate length of transient event in seconds gSig = [6, 6] # expected half size of neurons @@ -61,8 +67,9 @@ def main(): patch_size = 32 # size of patch stride = 3 # amount of overlap between patches K = 4 # max number of components in each patch + params_dict = {'fr': fr, - 'fnames': fname, + 'fnames': fnames, 'decay_time': decay_time, 'gSig': gSig, 'p': p, @@ -75,18 +82,19 @@ def main(): 'sniper_mode': True, 'thresh_CNN_noisy': thresh_CNN_noisy, 'K': K} + opts = cnmf.params.CNMFParams(params_dict=params_dict) - # %% fit with online object + # fit with online object cnm = cnmf.online_cnmf.OnACID(params=opts) cnm.fit_online() - # %% plot contours - logging.info('Number of components:' + str(cnm.estimates.A.shape[-1])) - Cn = cm.load(fname[0], subindices=slice(0,500)).local_correlations(swap_dim=False) + # plot contours + logging.info(f"Number of components: {cnm.estimates.A.shape[-1]}") + Cn = cm.load(fnames[0], subindices=slice(0,500)).local_correlations(swap_dim=False) cnm.estimates.plot_contours(img=Cn) - # %% pass through the CNN classifier with a low threshold (keeps clearer neuron shapes and excludes processes) + # pass through the CNN classifier with a low threshold (keeps clearer neuron shapes and excludes processes) use_CNN = True if use_CNN: # threshold for CNN classifier @@ -94,11 +102,14 @@ def main(): cnm.estimates.evaluate_components_CNN(opts) cnm.estimates.plot_contours(img=Cn, idx=cnm.estimates.idx_components) - # %% plot results + # plot results cnm.estimates.view_components(img=Cn, idx=cnm.estimates.idx_components) -# %% -# This is to mask the differences between running this demo in IDE -# versus from the CLI -if __name__ == "__main__": - main() +def handle_args(): + parser = argparse.ArgumentParser(description="Demonstrate basic Caiman Online functionality with CNMF initialization") + parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--logfile", help="If specified, log to the named file") + return parser.parse_args() + +######## +main() diff --git a/demos/general/demo_OnACID_mesoscope.py b/demos/general/demo_OnACID_mesoscope.py index cf9bde48f..a82433e37 100755 --- a/demos/general/demo_OnACID_mesoscope.py +++ b/demos/general/demo_OnACID_mesoscope.py @@ -1,5 +1,4 @@ #!/usr/bin/env python -# -*- coding: utf-8 -*- """ Complete pipeline for online processing using CaImAn Online (OnACID). @@ -11,27 +10,20 @@ can be passed as options. A plot of the processing time for the various steps of the algorithm is also included. -@author: Eftychios Pnevmatikakis @epnev - -Special thanks to Andreas Tolias and his lab at Baylor College of Medicine +Thanks to Andreas Tolias and his lab at Baylor College of Medicine for sharing the data used in this demo. """ +import argparse import glob -from IPython import get_ipython import numpy as np import os import logging import matplotlib.pyplot as plt try: - if __IPYTHON__: - print("Detected iPython") - ipython = get_ipython() - ipython.run_line_magic('load_ext', 'autoreload') - ipython.run_line_magic('autoreload', '2') - ipython.run_line_magic('matplotlib', 'qt') -except NameError: + cv2.setNumThreads(0) +except: pass import caiman as cm @@ -40,28 +32,28 @@ from caiman.utils.utils import download_demo -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\ - "[%(process)d] %(message)s", - level=logging.INFO) - -# %% def main(): - pass # For compatibility between running under IDE and the CLI - - # %% download and list all files to be processed - - # folder inside ./example_movies where files will be saved - fld_name = 'Mesoscope' - fnames = [] - fnames.append(download_demo('Tolias_mesoscope_1.hdf5', fld_name)) - fnames.append(download_demo('Tolias_mesoscope_2.hdf5', fld_name)) - fnames.append(download_demo('Tolias_mesoscope_3.hdf5', fld_name)) - - # your list of files should look something like this - logging.info(fnames) - - # %% Set up some parameters + cfg = handle_args() + + if cfg.logfile: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING, + filename=cfg.logfile) + # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + else: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING) + + if cfg.input is None: + fnames = [] + for tolias_file in ['Tolias_mesoscope_1.hdf5', 'Tolias_mesoscope_2.hdf5', 'Tolias_mesoscope_3.hdf5']: + fnames.append(download_demo(tolias_file)) + else: + fnames = cfg.input + + # Set up some parameters fr = 15 # frame rate (Hz) decay_time = 0.5 # approximate length of transient event in seconds @@ -81,7 +73,7 @@ def main(): init_batch = 200 # number of frames for initialization (presumably from the first file) K = 2 # initial number of components epochs = 1 # number of passes over the data - show_movie = False # show the movie as the data gets processed + show_movie = not cfg.no_play # show the movie as the data gets processed params_dict = {'fnames': fnames, 'fr': fr, @@ -106,20 +98,20 @@ def main(): 'show_movie': show_movie} opts = cnmf.params.CNMFParams(params_dict=params_dict) - # %% fit online + # fit online cnm = cnmf.online_cnmf.OnACID(params=opts) cnm.fit_online() - # %% plot contours (this may take time) - logging.info('Number of components: ' + str(cnm.estimates.A.shape[-1])) + # plot contours (this may take time) + logging.info(f"Number of components: {cnm.estimates.A.shape[-1]}") images = cm.load(fnames) Cn = images.local_correlations(swap_dim=False, frames_per_chunk=500) cnm.estimates.plot_contours(img=Cn, display_numbers=False) - # %% view components + # view components cnm.estimates.view_components(img=Cn) - # %% plot timing performance (if a movie is generated during processing, timing + # plot timing performance (if a movie is generated during processing, timing # will be severely over-estimated) T_motion = 1e3*np.array(cnm.t_motion) @@ -133,9 +125,9 @@ def main(): plt.xlabel('Frame #') plt.ylabel('Processing time [ms]') - #%% RUN IF YOU WANT TO VISUALIZE THE RESULTS (might take time) + # Prepare result visualisations (might take time) c, dview, n_processes = \ - cm.cluster.setup_cluster(backend='multiprocessing', n_processes=None, + cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=None, single_thread=False) if opts.online['motion_correct']: shifts = cnm.estimates.shifts[-cnm.estimates.C.shape[-1]:] @@ -171,12 +163,17 @@ def main(): cnm.estimates.evaluate_components(images, cnm.params, dview=dview) cnm.estimates.Cn = Cn - cnm.save(os.path.splitext(fnames[0])[0]+'_results.hdf5') + cnm.save(os.path.splitext(fnames[0])[0] + '_results.hdf5') dview.terminate() -#%% -# This is to mask the differences between running this demo in IDE -# versus from the CLI -if __name__ == "__main__": - main() \ No newline at end of file +def handle_args(): + parser = argparse.ArgumentParser(description="Full OnACID Caiman demo") + parser.add_argument("--no_play", action="store_true", help="Do not display results") + parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--logfile", help="If specified, log to the named file") + return parser.parse_args() + +######## +main() \ No newline at end of file From 1315b9a6b09fa43c9310abd40780ea2eedf21b35 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 15 Nov 2023 16:03:16 -0500 Subject: [PATCH 18/86] cli_demos work: convert more, regularise more --- demos/general/demo_pipeline.py | 10 +- demos/general/demo_pipeline_NWB.py | 287 +++++++++++++++++------------ 2 files changed, 173 insertions(+), 124 deletions(-) diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 5d7f47e27..82c1070f5 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -94,21 +94,21 @@ def main(): opts = params.CNMFParams(params_dict=params_dict) - # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + m_orig = cm.load_movie_chain(fnames) # play the movie (optional) # playing the movie using opencv. It requires loading the movie in memory. # To close the video press q if not cfg.no_play: - m_orig = cm.load_movie_chain(fnames) ds_ratio = 0.2 moviehandle = m_orig.resize(1, 1, ds_ratio) moviehandle.play(q_max=99.5, fr=60, magnification=2) + # start a cluster for parallel processing + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) - # % MOTION CORRECTION + # Motion Correction # first we create a motion correction object with the specified parameters mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion')) # note that the file is not loaded in memory @@ -235,7 +235,7 @@ def main(): idx=cnm2.estimates.idx_components) cnm2.estimates.view_components(images, img=Cn, idx=cnm2.estimates.idx_components_bad) - + # update object with selected components (optional) cnm2.estimates.select_components(use_object=True) diff --git a/demos/general/demo_pipeline_NWB.py b/demos/general/demo_pipeline_NWB.py index f1335adcd..0f18fc951 100644 --- a/demos/general/demo_pipeline_NWB.py +++ b/demos/general/demo_pipeline_NWB.py @@ -6,10 +6,12 @@ the output. It is meant as an example on how to use NWB files with CaImAn. authors: @agiovann and @epnev """ -#%% + +import argparse import cv2 +from datetime import datetime +from dateutil.tz import tzlocal import glob -from IPython import get_ipython import logging import matplotlib.pyplot as plt import numpy as np @@ -20,73 +22,110 @@ except: pass -try: - if __IPYTHON__: - ipython = get_ipython() - ipython.run_line_magic('load_ext', 'autoreload') - ipython.run_line_magic('autoreload', '2') - ipython.run_line_magic('matplotlib', 'qt') -except NameError: - pass - - -from datetime import datetime -from dateutil.tz import tzlocal - import caiman as cm from caiman.motion_correction import MotionCorrect +from caiman.paths import caiman_datadir from caiman.source_extraction.cnmf import cnmf as cnmf from caiman.source_extraction.cnmf import params as params from caiman.utils.utils import download_demo -from caiman.paths import caiman_datadir -# %% -# Set up the logger (optional); change this if you like. -# You can log to a file using the filename parameter, or make the output more -# or less verbose by setting level to logging.DEBUG, logging.INFO, -# logging.WARNING, or logging.ERROR -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\ - "[%(process)d] %(message)s", - level=logging.WARNING) - -#%% +# TODO: Various code here isn't careful enough with paths and may produce files in +# places alongside their sources, rather than in temporary directories. Come back +# later and fix this once we've worked out where everything should go. + def main(): - pass # For compatibility between running under an IDE and the CLI + cfg = handle_args() + + if cfg.logfile: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING, + filename=cfg.logfile) + # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + else: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING) + + # This demo will either accept a nwb file or it'll convert an existing movie into one + if cfg.input is None: + # default path, no input was provide, so we'll take the "Sue" sample data, convert it to a nwb, and then ingest it. + # The convert target is the original fn, with the extension swapped to nwb. + fr = float(15) # imaging rate in frames per second + pre_convert = download_demo('Sue_2x_3000_40_-46.tif') + convert_target = os.path.join(caiman_datadir(), 'Sue_2x_3000_40_-46.nwb') + + orig_movie = cm.load(pre_convert, fr=fr) + # save file in NWB format with various additional info + orig_movie.save(convert_target, + sess_desc="test", + identifier="demo 1", + imaging_plane_description='single plane', + emission_lambda=520.0, indicator='GCAMP6f', + location='parietal cortex', + experimenter='Sue Ann Koay', lab_name='Tank Lab', + institution='Princeton U', + experiment_description='Experiment Description', + session_id='Session 1', + var_name_hdf5='TwoPhotonSeries') + fnames = [convert_target] + elif cfg.input[0].endswith('.nwb'): + if os.path.isfile(cfg.input[0]): + # We were handed at least one nwb file and it exists either right here or as an absolute path + for fn in cfg.input: + if not os.path.isfile(fn): + raise Exception(f"Could not find input file {fn}") + if not fn.endswith('.nwb'): + raise Exception(f"Cannot mix nwb and other file formats in input with this demo") + fnames = cfg.input + elif os.path.isfile(os.path.join(caiman_datadir(), cfg.input[0])): + # We were handed at least one nwb file and it exists, but in the caiman_datadir() + # So repeat the logic above except look in caiman_datadir() + for fn in cfg.input: + if not os.path.isfile(os.path.join(caiman_datadir(), fn)): + raise Exception(f"Could not find input file {fn}") + if not fn.endswith('.nwb'): + raise Exception(f"Cannot mix nwb and other file formats in input with this demo") + fnames = list(map(lambda n: os.path.join(caiman_datadir, n), cfg.input)) + + else: # Someone mentioned an nwb file but we can't find it! + raise Exception(f"Could not find referenced input file {cfg.input[0]}") + else: + # We were handed a file in some other format, so let's make an nwb file out of it and + # then ingest that nwb file. + if len(cfg.input) != 1: + raise Exception("We're only willing to convert one file to nwb") + pre_convert = cfg.input[0] + post_convert = os.path.splitext(pre_convert)[0] + ".nwb" + fr = float(15) # TODO: Make this a cli parameter + orig_movie = cm.load(pre_convert, fr=fr) + # save file in NWB format with various additional info + orig_movie.save(convert_target, + sess_desc="test", + identifier="demo 1", + imaging_plane_description='single plane', + emission_lambda=520.0, indicator='GCAMP6f', # Entirely synthetic, but if we don't know, what can we do? + location='unknown', + experimenter='Unknown Name', lab_name='Unknown Lab' + institution='Unknown University', + experiment_description='Experiment Description', + session_id='Session 1') # XXX do we need to provide var_name_hdf5? + fnames = [post_convert] - #%% Select file(s) to be processed (download if not present) - fnames = [os.path.join(caiman_datadir(), 'example_movies/Sue_2x_3000_40_-46.nwb')] # estimates save path can be same or different from raw data path - save_path = os.path.join(caiman_datadir(), 'example_movies/Sue_2x_3000_40_-46_CNMF_estimates.nwb') - # filename to be created or processed - # dataset dependent parameters - fr = 15. # imaging rate in frames per second - decay_time = 0.4 # length of a typical transient in seconds + save_path = os.path.splitext(fnames[0])[0] + '_CNMF_estimates.nwb' # TODO: Make this a parameter? - #%% load the file and save it in the NWB format (if it doesn't exist already) - if not os.path.exists(fnames[0]): - fnames_orig = 'Sue_2x_3000_40_-46.tif' # filename to be processed - if fnames_orig in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: - fnames_orig = [download_demo(fnames_orig)] - orig_movie = cm.load(fnames_orig, fr=fr) + # We're done with all the file input parts - # save file in NWB format with various additional info - orig_movie.save(fnames[0], sess_desc='test', identifier='demo 1', - imaging_plane_description='single plane', - emission_lambda=520.0, indicator='GCAMP6f', - location='parietal cortex', - experimenter='Sue Ann Koay', lab_name='Tank Lab', - institution='Princeton U', - experiment_description='Experiment Description', - session_id='Session 1', - var_name_hdf5='TwoPhotonSeries') - - #%% First setup some parameters for data and motion correction - # motion correction parameters - dxy = (2., 2.) # spatial resolution in x and y in (um per pixel) + # dataset dependent parameters + decay_time = 0.4 # length of a typical transient in seconds + dxy = (2., 2.) # spatial resolution in x and y in (um per pixel) # note the lower than usual spatial resolution here - max_shift_um = (12., 12.) # maximum shift in um + max_shift_um = (12., 12.) # maximum shift in um patch_motion_um = (100., 100.) # patch size for non-rigid correction in um + + # First setup some parameters for data and motion correction + # motion correction parameters pw_rigid = True # flag to select rigid vs pw_rigid motion correction # maximum allowed rigid shift in pixels max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)] @@ -97,46 +136,43 @@ def main(): # maximum deviation allowed for patch with respect to rigid shifts max_deviation_rigid = 3 - mc_dict = { - 'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - 'dxy': dxy, - 'pw_rigid': pw_rigid, - 'max_shifts': max_shifts, - 'strides': strides, - 'overlaps': overlaps, - 'max_deviation_rigid': max_deviation_rigid, - 'border_nan': 'copy', - #'var_name_hdf5': 'acquisition/TwoPhotonSeries' - 'var_name_hdf5': 'TwoPhotonSeries' - } - opts = params.CNMFParams(params_dict=mc_dict) - - # %% play the movie (optional) + params_dict = {'fnames': fnames, + 'fr': fr, + 'decay_time': decay_time, + 'dxy': dxy, + 'pw_rigid': pw_rigid, + 'max_shifts': max_shifts, + 'strides': strides, + 'overlaps': overlaps, + 'max_deviation_rigid': max_deviation_rigid, + 'border_nan': 'copy', + 'var_name_hdf5': 'TwoPhotonSeries'} + opts = params.CNMFParams(params_dict=params_dict) + + m_orig = cm.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5']) + + # play the movie (optional) # playing the movie using opencv. It requires loading the movie in memory. # To close the video press q - display_images = False - if display_images: - m_orig = cm.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5']) + + if not cfg.no_play: ds_ratio = 0.2 moviehandle = m_orig.resize(1, 1, ds_ratio) moviehandle.play(q_max=99.5, fr=60, magnification=2) - # %% start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster( - backend='multiprocessing', n_processes=None, single_thread=False) + # start a cluster for parallel processing + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) - # %%% MOTION CORRECTION + # Motion Correction # first we create a motion correction object with the specified parameters mc = MotionCorrect(fnames, dview=dview, var_name_hdf5=opts.data['var_name_hdf5'], **opts.get_group('motion')) # note that the file is not loaded in memory - # %% Run (piecewise-rigid motion) correction using NoRMCorre + # Run (piecewise-rigid motion) correction using NoRMCorre mc.motion_correct(save_movie=True) - # %% compare with original movie - if display_images: + # compare with original movie + if not cfg.no_play: m_orig = cm.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5']) m_els = cm.load(mc.mmap_file) ds_ratio = 0.2 @@ -144,7 +180,7 @@ def main(): m_els.resize(1, 1, ds_ratio)], axis=2) moviehandle.play(fr=60, q_max=99.5, magnification=2) # press q to exit - # %% MEMORY MAPPING + # MEMORY MAPPING border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0 # you can include the boundaries of the FOV if you used the 'copy' option # during motion correction, although be careful about the components near @@ -159,12 +195,12 @@ def main(): images = np.reshape(Yr.T, [T] + list(dims), order='F') # load frames in python format (T x X x Y) - # %% restart cluster to clean up memory + # restart cluster to clean up memory cm.stop_server(dview=dview) c, dview, n_processes = cm.cluster.setup_cluster( - backend='multiprocessing', n_processes=None, single_thread=False) + backend=cfg.cluster_backend, n_processes=None, single_thread=False) - # %% parameters for source extraction and deconvolution + # parameters for source extraction and deconvolution p = 1 # order of the autoregressive system gnb = 2 # number of global background components merge_thr = 0.85 # merging threshold, max correlation allowed @@ -196,7 +232,7 @@ def main(): opts.change_params(params_dict=opts_dict); - # %% RUN CNMF ON PATCHES + # RUN CNMF ON PATCHES # First extract spatial and temporal components on patches and combine them # for this step deconvolution is turned off (p=0) @@ -204,87 +240,100 @@ def main(): cnm = cnmf.CNMF(n_processes, params=opts, dview=dview) cnm = cnm.fit(images) - # %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE (optional) + # ALTERNATE WAY TO RUN THE PIPELINE AT ONCE (optional) # you can also perform the motion correction plus cnmf fitting steps # simultaneously after defining your parameters object using # cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview) # cnm1.fit_file(motion_correct=True) - # %% plot contours of found components + # plot contours of found components Cn = cm.local_correlations(images, swap_dim=False) Cn[np.isnan(Cn)] = 0 - cnm.estimates.plot_contours(img=Cn) + if not cfg.no_play: + cnm.estimates.plot_contours(img=Cn) plt.title('Contour plots of found components') - #%% save results in a separate file (just for demonstration purposes) + # save results in a separate file (just for demonstration purposes) cnm.estimates.Cn = Cn cnm.save(fname_new[:-4]+'hdf5') #cm.movie(Cn).save(fname_new[:-5]+'_Cn.tif') - # %% RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution + # RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution cnm.params.change_params({'p': p}) cnm2 = cnm.refit(images, dview=dview) - # %% COMPONENT EVALUATION - # the components are evaluated in three ways: + # Component Evaluation + # Components are evaluated in three ways: # a) the shape of each component must be correlated with the data # b) a minimum peak SNR is required over the length of a transient # c) each shape passes a CNN based classifier + min_SNR = 2 # signal to noise ratio for accepting a component rval_thr = 0.85 # space correlation threshold for accepting a component + use_cnn = True cnn_thr = 0.99 # threshold for CNN based classifier cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected cnm2.params.set('quality', {'decay_time': decay_time, 'min_SNR': min_SNR, 'rval_thr': rval_thr, - 'use_cnn': True, + 'use_cnn': use_cnn, 'min_cnn_thr': cnn_thr, - 'cnn_lowest': cnn_lowest}); + 'cnn_lowest': cnn_lowest}) cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview) - #%% Save new estimates object (after adding correlation image) + # Save new estimates object (after adding correlation image) cnm2.estimates.Cn = Cn cnm2.save(fname_new[:-4] + 'hdf5') - # %% PLOT COMPONENTS - cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components) + if not cfg.no_play: + # Plot Components + cnm2.estimates.plot_contours(img=Cn, idx=cnm2.estimates.idx_components) - # %% VIEW TRACES (accepted and rejected) - if display_images: + # View Traces (accepted and rejected) cnm2.estimates.view_components(images, img=Cn, idx=cnm2.estimates.idx_components) cnm2.estimates.view_components(images, img=Cn, idx=cnm2.estimates.idx_components_bad) - #%% update object with selected components (optional) + + # update object with selected components (optional) # cnm2.estimates.select_components(use_object=True) - #%% Extract DF/F values + # Extract DF/F values cnm2.estimates.detrend_df_f(quantileMin=8, frames_window=250) - #%% Show final traces - cnm2.estimates.view_components(img=Cn) + # Show final traces + if not cfg.no_play: + cnm2.estimates.view_components(img=Cn) - #%% reconstruct denoised movie (press q to exit) - if display_images: + # reconstruct denoised movie (press q to exit) + if not cfg.no_play: cnm2.estimates.play_movie(images, q_max=99.9, gain_res=2, magnification=2, bpx=border_to_0, include_bck=False) # background not shown - #%% STOP CLUSTER and clean up log files + # Stop the cluster and clean up log files cm.stop_server(dview=dview) - log_files = glob.glob('*_LOG_*') - for log_file in log_files: - os.remove(log_file) - - #%% save the results in the original NWB file + + if not cfg.keep_logs: + log_files = glob.glob('*_LOG_*') + for log_file in log_files: + os.remove(log_file) + + # save the results in the original NWB file cnm2.estimates.save_NWB(save_path, imaging_rate=fr, session_start_time=datetime.now(tzlocal()), raw_data_file=fnames[0]) -# %% -# This is to mask the differences between running this demo in IDE -# versus from the CLI -if __name__ == "__main__": - main() +def handle_args(): + parser = argparse.ArgumentParser(description="Demonstrate 2P Pipeline using batch algorithm, with nwb files") + parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") + parser.add_argument("--no_play", action="store_true", help="Do not display results") + parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--logfile", help="If specified, log to the named file") + return parser.parse_args() + +######## +main() From 285917474bc62f7e0ab75fb03eac46e985d9455b Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 15 Nov 2023 17:35:13 -0500 Subject: [PATCH 19/86] cli_demos: Initial conversion of demo_pipeline_cnmfE to "app" --- demos/general/demo_pipeline.py | 14 +-- demos/general/demo_pipeline_cnmfE.py | 147 +++++++++++++-------------- 2 files changed, 76 insertions(+), 85 deletions(-) diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 82c1070f5..87fc87fc2 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -51,16 +51,11 @@ def main(): if cfg.input is None: # If no input is specified, use sample data, downloading if necessary - fnames = ['Sue_2x_3000_40_-46.tif'] # filename to be processed - if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: - fnames = [download_demo(fnames[0])] + fnames = [download_demo('Sue_2x_3000_40_-46.tif')] else: fnames = cfg.input - # If you prefer to hardcode filenames, you could do something like this: - # fnames = ["/path/to/myfile1.avi", "/path/to/myfile2.avi"] - - # First setup some parameters for data and motion correction + # First set up some parameters for data and motion correction # dataset dependent parameters fr = 30 # imaging rate in frames per second @@ -80,6 +75,7 @@ def main(): overlaps = (24, 24) # maximum deviation allowed for patch with respect to rigid shifts max_deviation_rigid = 3 + border_nan = 'copy' params_dict = {'fnames': fnames, 'fr': fr, @@ -90,7 +86,7 @@ def main(): 'strides': strides, 'overlaps': overlaps, 'max_deviation_rigid': max_deviation_rigid, - 'border_nan': 'copy'} + 'border_nan': border_nan} opts = params.CNMFParams(params_dict=params_dict) @@ -145,7 +141,7 @@ def main(): c, dview, n_processes = cm.cluster.setup_cluster( backend=cfg.cluster_backend, n_processes=None, single_thread=False) - # parameters for source extraction and deconvolution + # Parameters for source extraction and deconvolution p = 1 # order of the autoregressive system gnb = 2 # number of global background components merge_thr = 0.85 # merging threshold, max correlation allowed diff --git a/demos/general/demo_pipeline_cnmfE.py b/demos/general/demo_pipeline_cnmfE.py index e61178876..7a6df3c85 100755 --- a/demos/general/demo_pipeline_cnmfE.py +++ b/demos/general/demo_pipeline_cnmfE.py @@ -14,57 +14,46 @@ Demo is also available as a jupyter notebook (see demo_pipeline_cnmfE.ipynb) """ -#%% -from IPython import get_ipython + +import argparse +import glob import logging import matplotlib.pyplot as plt import numpy as np -try: - if __IPYTHON__: - ipython = get_ipython() - ipython.run_line_magic('load_ext', 'autoreload') - ipython.run_line_magic('autoreload', '2') - ipython.run_line_magic('matplotlib', 'qt') -except NameError: - pass import caiman as cm +from caiman.motion_correction import MotionCorrect from caiman.source_extraction import cnmf +from caiman.source_extraction.cnmf import params as params from caiman.utils.utils import download_demo from caiman.utils.visualization import inspect_correlation_pnr -from caiman.motion_correction import MotionCorrect -from caiman.source_extraction.cnmf import params as params -#%% -# Set up the logger; change this if you like. -# You can log to a file using the filename parameter, or make the output more or less -# verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\ - "[%(process)d] %(message)s", - level=logging.WARNING) - # filename="/tmp/caiman.log" - -#%% -def main(): - pass # For compatibility between running under an IDE and the CLI - - #%% start the cluster - try: - cm.stop_server() # stop it if it was running - except(): - pass - c, dview, n_processes = cm.cluster.setup_cluster(backend='multiprocessing', - n_processes=24, # number of process to use, if you go out of memory try to reduce this one - single_thread=False) - #%% First setup some parameters for motion correction +def main(): + cfg = handle_args() + + if cfg.logfile: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING, + filename=cfg.logfile) + # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + else: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING) + + if cfg.input is None: + # If no input is specified, use sample data, downloading if necessary + fnames = [download_demo('data_endoscope.tif')] + else: + fnames = cfg.input + filename_reorder = fnames # XXX What is this? + + # First set up some parameters for data and motion correction # dataset dependent parameters - fnames = ['data_endoscope.tif'] # filename to be processed - fnames = [download_demo(fnames[0])] # download file if not already present - filename_reorder = fnames fr = 10 # movie frame rate decay_time = 0.4 # length of a typical transient in seconds @@ -81,22 +70,21 @@ def main(): max_deviation_rigid = 3 border_nan = 'copy' - mc_dict = { - 'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - 'pw_rigid': pw_rigid, - 'max_shifts': max_shifts, - 'gSig_filt': gSig_filt, - 'strides': strides, - 'overlaps': overlaps, - 'max_deviation_rigid': max_deviation_rigid, - 'border_nan': border_nan - } - - opts = params.CNMFParams(params_dict=mc_dict) - - # %% MOTION CORRECTION + params_dict = {'fnames': fnames, + 'fr': fr, + 'decay_time': decay_time, + 'pw_rigid': pw_rigid, + 'max_shifts': max_shifts, + 'gSig_filt': gSig_filt, + 'strides': strides, + 'overlaps': overlaps, + 'max_deviation_rigid': max_deviation_rigid, + 'border_nan': border_nan} + + opts = params.CNMFParams(params_dict=params_dict) + + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + # Motion Correction # The pw_rigid flag set above, determines where to use rigid or pw-rigid # motion correction if motion_correct: @@ -109,8 +97,8 @@ def main(): np.max(np.abs(mc.y_shifts_els)))).astype(int) else: bord_px = np.ceil(np.max(np.abs(mc.shifts_rig))).astype(int) - plt.subplot(1, 2, 1); plt.imshow(mc.total_template_rig) # % plot template - plt.subplot(1, 2, 2); plt.plot(mc.shifts_rig) # % plot rigid shifts + plt.subplot(1, 2, 1); plt.imshow(mc.total_template_rig) # plot template + plt.subplot(1, 2, 2); plt.plot(mc.shifts_rig) # plot rigid shifts plt.legend(['x shifts', 'y shifts']) plt.xlabel('frames') plt.ylabel('pixels') @@ -126,7 +114,7 @@ def main(): Yr, dims, T = cm.load_memmap(fname_new) images = Yr.T.reshape((T,) + dims, order='F') - # %% Parameters for source extraction and deconvolution (CNMF-E algorithm) + # Parameters for source extraction and deconvolution (CNMF-E algorithm) p = 1 # order of the autoregressive system K = None # upper bound on number of components per patch, in general None for 1p data gSig = (3, 3) # gaussian width of a 2D gaussian kernel, which approximates a neuron @@ -181,7 +169,7 @@ def main(): 'del_duplicates': True, # whether to remove duplicates from initialization 'border_pix': bord_px}) # number of pixels to not consider in the borders) - # %% compute some summary images (correlation and peak to noise) + # compute some summary images (correlation and peak to noise) # change swap dim if output looks weird, it is a problem with tiffile cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig=gSig[0], swap_dim=False) # if your images file is too long this computation will take unnecessarily @@ -191,20 +179,20 @@ def main(): # inspect the summary images and set the parameters inspect_correlation_pnr(cn_filter, pnr) # print parameters set above, modify them if necessary based on summary images - print(min_corr) # min correlation of peak (from correlation image) - print(min_pnr) # min peak to noise ratio + print(f"Minimum correlation: {min_corr}") # min correlation of peak (from correlation image) + print(f"Minimum peak to noise ratio: {min_pnr}") # min peak to noise ratio - # %% RUN CNMF ON PATCHES + # Run CMNF in patches cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, Ain=Ain, params=opts) cnm.fit(images) - # %% ALTERNATE WAY TO RUN THE PIPELINE AT ONCE (optional -- commented out) + # ALTERNATE WAY TO RUN THE PIPELINE AT ONCE (optional -- commented out) # you can also perform the motion correction plus cnmf fitting steps # simultaneously after defining your parameters object using # cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview) # cnm1.fit_file(motion_correct=True) - # %% Quality Control: DISCARD LOW QUALITY COMPONENTS + # Quality Control: DISCARD LOW QUALITY COMPONENTS min_SNR = 2.5 # adaptive way to set threshold on the transient size r_values_min = 0.85 # threshold on space consistency (if you lower more components # will be accepted, potentially with worst quality) @@ -217,16 +205,11 @@ def main(): print('Number of total components: ', len(cnm.estimates.C)) print('Number of accepted components: ', len(cnm.estimates.idx_components)) - # %% PLOT COMPONENTS - cnm.dims = dims - display_images = True # Set to true to show movies and images - if display_images: + # Play result movies + if not cfg.no_play: + cnm.dims = dims cnm.estimates.plot_contours(img=cn_filter, idx=cnm.estimates.idx_components) cnm.estimates.view_components(images, idx=cnm.estimates.idx_components) - - # %% MOVIES - display_images = False # Set to true to show movies and images - if display_images: # fully reconstructed movie cnm.estimates.play_movie(images, q_max=99.5, magnification=2, include_bck=True, gain_res=10, bpx=bord_px) @@ -234,11 +217,23 @@ def main(): cnm.estimates.play_movie(images, q_max=99.9, magnification=2, include_bck=False, gain_res=4, bpx=bord_px) - # %% STOP SERVER + # Stop the cluster and clean up log files cm.stop_server(dview=dview) -# %% This is to mask the differences between running this demo in IDE -# versus from the CLI -if __name__ == "__main__": - main() + if not cfg.keep_logs: + log_files = glob.glob('*_LOG_*') + for log_file in log_files: + os.remove(log_file) + +def handle_args(): + parser = argparse.ArgumentParser(description="Demonstrate CNMFE Pipeline") + parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") + parser.add_argument("--no_play", action="store_true", help="Do not display results") + parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--logfile", help="If specified, log to the named file") + return parser.parse_args() + +######## +main() From 38436e1c5577a1981f95eb2d707a453070e159f0 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 15 Nov 2023 17:53:17 -0500 Subject: [PATCH 20/86] cli demos: convert demo_pipeline_voltage_imaging to "app" --- demos/general/demo_pipeline.py | 3 +- .../general/demo_pipeline_voltage_imaging.py | 149 +++++++++--------- 2 files changed, 75 insertions(+), 77 deletions(-) diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 87fc87fc2..1caf33c9a 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -138,8 +138,7 @@ def main(): # restart cluster to clean up memory cm.stop_server(dview=dview) - c, dview, n_processes = cm.cluster.setup_cluster( - backend=cfg.cluster_backend, n_processes=None, single_thread=False) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) # Parameters for source extraction and deconvolution p = 1 # order of the autoregressive system diff --git a/demos/general/demo_pipeline_voltage_imaging.py b/demos/general/demo_pipeline_voltage_imaging.py index daa9f254e..d9560926b 100644 --- a/demos/general/demo_pipeline_voltage_imaging.py +++ b/demos/general/demo_pipeline_voltage_imaging.py @@ -7,11 +7,11 @@ Dataset courtesy of Karel Svoboda Lab (Janelia Research Campus). author: @caichangjia """ -#%% + +import argparse import cv2 import glob import h5py -from IPython import get_ipython import logging import matplotlib.pyplot as plt import numpy as np @@ -22,45 +22,41 @@ except: pass -try: - if __IPYTHON__: - ipython = get_ipython() - ipython.run_line_magic('load_ext', 'autoreload') - ipython.run_line_magic('autoreload', '2') - ipython.run_line_magic('matplotlib', 'qt') -except NameError: - pass - import caiman as cm from caiman.motion_correction import MotionCorrect from caiman.paths import caiman_datadir from caiman.source_extraction.volpy import utils from caiman.source_extraction.volpy.volparams import volparams from caiman.source_extraction.volpy.volpy import VOLPY -from caiman.summary_images import local_correlations_movie_offline -from caiman.summary_images import mean_image +from caiman.summary_images import local_correlations_movie_offline, mean_image from caiman.utils.utils import download_demo, download_model -# %% -# Set up the logger (optional); change this if you like. -# You can log to a file using the filename parameter, or make the output more -# or less verbose by setting level to logging.DEBUG, logging.INFO, -# logging.WARNING, or logging.ERROR -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]" \ - "[%(process)d] %(message)s", - level=logging.INFO) -# %% def main(): - pass # For compatibility between running using an IDE and the CLI + cfg = handle_args() + + if cfg.logfile: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING, + filename=cfg.logfile) + # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + else: + logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", + level=logging.WARNING) - # %% Load demo movie and ROIs - fnames = download_demo('demo_voltage_imaging.hdf5', 'volpy') # file path to movie file (will download if not present) - path_ROIs = download_demo('demo_voltage_imaging_ROIs.hdf5', 'volpy') # file path to ROIs file (will download if not present) + if cfg.input is None: + # If no input is specified, use sample data, downloading if necessary + fnames = [download_demo('demo_voltage_imaging.hdf5', 'volpy')] # XXX do we need to use a separate directory? + path_ROIs = [download_demo('demo_voltage_imaging_ROIs.hdf5', 'volpy')] + else: + fnames = cfg.input + path_ROIs = cfg.pathinput + + # Load demo movie and ROIs file_dir = os.path.split(fnames)[0] - #%% dataset dependent parameters # dataset dependent parameters fr = 400 # sample rate of the movie @@ -74,33 +70,29 @@ def main(): max_deviation_rigid = 3 # maximum deviation allowed for patch with respect to rigid shifts border_nan = 'copy' - opts_dict = { - 'fnames': fnames, - 'fr': fr, - 'pw_rigid': pw_rigid, - 'max_shifts': max_shifts, - 'gSig_filt': gSig_filt, - 'strides': strides, - 'overlaps': overlaps, - 'max_deviation_rigid': max_deviation_rigid, - 'border_nan': border_nan - } + params_dict = {'fnames': fnames, + 'fr': fr, + 'pw_rigid': pw_rigid, + 'max_shifts': max_shifts, + 'gSig_filt': gSig_filt, + 'strides': strides, + 'overlaps': overlaps, + 'max_deviation_rigid': max_deviation_rigid, + 'border_nan': border_nan} - opts = volparams(params_dict=opts_dict) + opts = volparams(params_dict=params_dict) - # %% play the movie (optional) + # play the movie (optional) # playing the movie using opencv. It requires loading the movie in memory. # To close the movie press q - display_images = False - if display_images: + if not cfg.no_play: m_orig = cm.load(fnames) ds_ratio = 0.2 moviehandle = m_orig.resize(1, 1, ds_ratio) moviehandle.play(q_max=99.5, fr=40, magnification=4) - # %% start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster( - backend='multiprocessing', n_processes=None, single_thread=False) + # start a cluster for parallel processing + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) # %%% MOTION CORRECTION # first we create a motion correction object with the specified parameters @@ -115,8 +107,8 @@ def main(): mc.mmap_file = [os.path.join(file_dir, mc_list[0])] print(f'reuse previously saved motion corrected file:{mc.mmap_file}') - # %% compare with original movie - if display_images: + # compare with original movie + if not cfg.no_play: m_orig = cm.load(fnames) m_rig = cm.load(mc.mmap_file) ds_ratio = 0.2 @@ -124,7 +116,7 @@ def main(): m_rig.resize(1, 1, ds_ratio)], axis=2) moviehandle.play(fr=40, q_max=99.5, magnification=4) # press q to exit - # %% MEMORY MAPPING + # MEMORY MAPPING do_memory_mapping = True if do_memory_mapping: border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0 @@ -139,9 +131,9 @@ def main(): mmap_list = [file for file in os.listdir(file_dir) if ('memmap_' + os.path.splitext(os.path.split(fnames)[-1])[0]) in file] fname_new = os.path.join(file_dir, mmap_list[0]) - print(f'reuse previously saved memory mapping file:{fname_new}') + print(f'reuse previously saved memory mapping file: {fname_new}') - # %% SEGMENTATION + # SEGMENTATION # create summary images img = mean_image(mc.mmap_file[0], window = 1000, dview=dview) img = (img-np.mean(img))/np.std(img) @@ -159,7 +151,7 @@ def main(): axs[0].imshow(summary_images[0]); axs[1].imshow(summary_images[2]) axs[0].set_title('mean image'); axs[1].set_title('corr image'); - #%% methods for segmentation + # methods for segmentation methods_list = ['manual_annotation', # manual annotations need prepared annotated datasets in the same format as demo_voltage_imaging_ROIs.hdf5 'maskrcnn', # Mask R-CNN is a convolutional neural network trained for detecting neurons in summary images 'gui_annotation'] # use VolPy GUI to correct outputs of Mask R-CNN or annotate new datasets @@ -177,7 +169,6 @@ def main(): elif method == 'gui_annotation': # run volpy_gui.py file in the caiman/source_extraction/volpy folder - # or run the following in the ipython: %run volpy_gui.py gui_ROIs = caiman_datadir() + '/example_movies/volpy/gui_roi.hdf5' with h5py.File(gui_ROIs, 'r') as fl: ROIs = fl['mov'][()] @@ -186,12 +177,12 @@ def main(): axs[0].imshow(summary_images[0]); axs[1].imshow(ROIs.sum(0)) axs[0].set_title('mean image'); axs[1].set_title('masks') - # %% restart cluster to clean up memory + # restart cluster to clean up memory cm.stop_server(dview=dview) - c, dview, n_processes = cm.cluster.setup_cluster( - backend='multiprocessing', n_processes=None, single_thread=False, maxtasksperchild=1) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + - # %% parameters for trace denoising and spike extraction + # parameters for trace denoising and spike extraction ROIs = ROIs # region of interests index = list(range(len(ROIs))) # index of neurons weights = None # if None, use ROIs for initialization; to reuse weights check reuse weights block @@ -232,27 +223,26 @@ def main(): 'weight_update': weight_update, 'n_iter': n_iter} - opts.change_params(params_dict=opts_dict); + opts.change_params(params_dict=params_dict); - #%% TRACE DENOISING AND SPIKE DETECTION + # TRACE DENOISING AND SPIKE DETECTION vpy = VOLPY(n_processes=n_processes, dview=dview, params=opts) vpy.fit(n_processes=n_processes, dview=dview); - #%% visualization - display_images = True - if display_images: + # visualization + if not cfg.no_play: print(np.where(vpy.estimates['locality'])[0]) # neurons that pass locality test idx = np.where(vpy.estimates['locality'] > 0)[0] utils.view_components(vpy.estimates, img_corr, idx) - #%% reconstructed movie + # reconstructed movie # note the negative spatial weights are cutoff - if display_images: + if not cfg.no_play: mv_all = utils.reconstructed_movie(vpy.estimates.copy(), fnames=mc.mmap_file, idx=idx, scope=(0,1000), flip_signal=flip_signal) mv_all.play(fr=40, magnification=3) - #%% save the result in .npy format + # save the result in .npy format save_result = True if save_result: vpy.estimates['ROIs'] = ROIs @@ -260,7 +250,7 @@ def main(): save_name = f'volpy_{os.path.split(fnames)[1][:-5]}_{threshold_method}' np.save(os.path.join(file_dir, save_name), vpy.estimates) - #%% reuse weights + # reuse weights # set weights = reuse_weights in opts_dict dictionary estimates = np.load(os.path.join(file_dir, save_name+'.npy'), allow_pickle=True).item() reuse_weights = [] @@ -270,15 +260,24 @@ def main(): plt.figure(); plt.imshow(w);plt.colorbar(); plt.show() reuse_weights.append(w) - # %% STOP CLUSTER and clean up log files + # Stop the cluster and clean up log files cm.stop_server(dview=dview) - log_files = glob.glob('*_LOG_*') - for log_file in log_files: - os.remove(log_file) -# %% -# This is to mask the differences between running this demo in an IDE -# versus from the CLI -if __name__ == "__main__": - main() + if not cfg.keep_logs: + log_files = glob.glob('*_LOG_*') + for log_file in log_files: + os.remove(log_file) + +def handle_args(): + parser = argparse.ArgumentParser(description="Demonstrate voltage imaging pipeline") + parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") + parser.add_argument("--no_play", action="store_true", help="Do not display results") + parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--pathinput", action="append", help="Path input file(s) to work on, provide multiple times for more files") + parser.add_argument("--logfile", help="If specified, log to the named file") + return parser.parse_args() + +######## +main() From b882dddd1d003dea249c7a2d7aaa232c1f8acb43 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 16 Nov 2023 13:44:27 -0500 Subject: [PATCH 21/86] rebasing --- demos/general/demo_OnACID.py | 11 ----------- demos/general/demo_OnACID_mesoscope.py | 6 +++--- demos/general/demo_caiman_basic.py | 2 -- 3 files changed, 3 insertions(+), 16 deletions(-) diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index ab65bf896..b6137c6f1 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -21,15 +21,6 @@ from caiman.source_extraction import cnmf as cnmf from caiman.paths import caiman_datadir -# Set up the logger; change this if you like. -# You can log to a file using the filename parameter, or make the output more or less -# verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR - -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s]"\ - "[%(process)d] %(message)s", - level=logging.WARNING) - # filename="/tmp/caiman.log" def main(): cfg = handle_args() @@ -47,8 +38,6 @@ def main(): if cfg.input is None: fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] - if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: - fnames = [download_demo(fnames[0])] else: fnames = cfg.input diff --git a/demos/general/demo_OnACID_mesoscope.py b/demos/general/demo_OnACID_mesoscope.py index a82433e37..3d7a3d873 100755 --- a/demos/general/demo_OnACID_mesoscope.py +++ b/demos/general/demo_OnACID_mesoscope.py @@ -47,9 +47,9 @@ def main(): level=logging.WARNING) if cfg.input is None: - fnames = [] - for tolias_file in ['Tolias_mesoscope_1.hdf5', 'Tolias_mesoscope_2.hdf5', 'Tolias_mesoscope_3.hdf5']: - fnames.append(download_demo(tolias_file)) + fnames = [download_demo('Tolias_mesoscope_1.hdf5'), + download_demo('Tolias_mesoscope_2.hdf5'), + download_demo('Tolias_mesoscope_3.hdf5')] else: fnames = cfg.input diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index 472a5461f..c037ce38d 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -50,8 +50,6 @@ def main(): if cfg.input is None: # If no input is specified, use sample data, downloading if necessary fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] # file(s) to be analyzed - if fnames[0] in ['Sue_2x_3000_40_-46.tif', 'demoMovie.tif']: - fnames = [download_demo(fnames[0])] else: fnames = cfg.input # If you prefer to hardcode filenames, you could do something like this: From 21eb57fc0ab293af8366575d59d31a0cfa7ecce7 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 16 Nov 2023 14:36:31 -0500 Subject: [PATCH 22/86] cli demos: make amount of parallelism in the backend a CLI parameter --- demos/general/demo_OnACID_mesoscope.py | 5 ++--- demos/general/demo_caiman_basic.py | 3 ++- demos/general/demo_pipeline.py | 5 +++-- demos/general/demo_pipeline_NWB.py | 6 +++--- demos/general/demo_pipeline_cnmfE.py | 3 ++- demos/general/demo_pipeline_voltage_imaging.py | 5 +++-- 6 files changed, 15 insertions(+), 12 deletions(-) diff --git a/demos/general/demo_OnACID_mesoscope.py b/demos/general/demo_OnACID_mesoscope.py index 3d7a3d873..499b16d0a 100755 --- a/demos/general/demo_OnACID_mesoscope.py +++ b/demos/general/demo_OnACID_mesoscope.py @@ -126,9 +126,7 @@ def main(): plt.ylabel('Processing time [ms]') # Prepare result visualisations (might take time) - c, dview, n_processes = \ - cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=None, - single_thread=False) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) if opts.online['motion_correct']: shifts = cnm.estimates.shifts[-cnm.estimates.C.shape[-1]:] if not opts.motion['pw_rigid']: @@ -171,6 +169,7 @@ def handle_args(): parser = argparse.ArgumentParser(description="Full OnACID Caiman demo") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index c037ce38d..8bc7651bd 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -92,7 +92,7 @@ def main(): opts = params.CNMFParams(params_dict=params_dict) # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Run CaImAn Batch (CNMF) @@ -170,6 +170,7 @@ def handle_args(): parser.add_argument("--no_patches", action="store_true", help="Do not use patches") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 1caf33c9a..12d5ec5b8 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -102,7 +102,7 @@ def main(): moviehandle.play(q_max=99.5, fr=60, magnification=2) # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Motion Correction # first we create a motion correction object with the specified parameters @@ -138,7 +138,7 @@ def main(): # restart cluster to clean up memory cm.stop_server(dview=dview) - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Parameters for source extraction and deconvolution p = 1 # order of the autoregressive system @@ -264,6 +264,7 @@ def handle_args(): parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() diff --git a/demos/general/demo_pipeline_NWB.py b/demos/general/demo_pipeline_NWB.py index 0f18fc951..e5a5dfd66 100644 --- a/demos/general/demo_pipeline_NWB.py +++ b/demos/general/demo_pipeline_NWB.py @@ -161,7 +161,7 @@ def main(): moviehandle.play(q_max=99.5, fr=60, magnification=2) # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Motion Correction # first we create a motion correction object with the specified parameters @@ -197,8 +197,7 @@ def main(): # restart cluster to clean up memory cm.stop_server(dview=dview) - c, dview, n_processes = cm.cluster.setup_cluster( - backend=cfg.cluster_backend, n_processes=None, single_thread=False) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # parameters for source extraction and deconvolution p = 1 # order of the autoregressive system @@ -331,6 +330,7 @@ def handle_args(): parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() diff --git a/demos/general/demo_pipeline_cnmfE.py b/demos/general/demo_pipeline_cnmfE.py index 7a6df3c85..4b10d7d86 100755 --- a/demos/general/demo_pipeline_cnmfE.py +++ b/demos/general/demo_pipeline_cnmfE.py @@ -83,7 +83,7 @@ def main(): opts = params.CNMFParams(params_dict=params_dict) - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Motion Correction # The pw_rigid flag set above, determines where to use rigid or pw-rigid # motion correction @@ -230,6 +230,7 @@ def handle_args(): parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() diff --git a/demos/general/demo_pipeline_voltage_imaging.py b/demos/general/demo_pipeline_voltage_imaging.py index d9560926b..ebca473a5 100644 --- a/demos/general/demo_pipeline_voltage_imaging.py +++ b/demos/general/demo_pipeline_voltage_imaging.py @@ -92,7 +92,7 @@ def main(): moviehandle.play(q_max=99.5, fr=40, magnification=4) # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # %%% MOTION CORRECTION # first we create a motion correction object with the specified parameters @@ -179,7 +179,7 @@ def main(): # restart cluster to clean up memory cm.stop_server(dview=dview) - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend) + c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # parameters for trace denoising and spike extraction @@ -273,6 +273,7 @@ def handle_args(): parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") + parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--pathinput", action="append", help="Path input file(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") From 7ee20e1bf6155c797743f060ba223bd6efe1d6ac Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 28 Feb 2024 12:32:52 -0500 Subject: [PATCH 23/86] demo_OnACID CLI: Use the new CNMFParams JSON API to let people specify configs as json --- demos/general/demo_OnACID.py | 73 +++++++++++++++------------ demos/general/params_demo_OnACID.json | 25 +++++++++ 2 files changed, 67 insertions(+), 31 deletions(-) create mode 100644 demos/general/params_demo_OnACID.json diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index b6137c6f1..121ff6a55 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -8,6 +8,7 @@ """ import argparse +#import code import logging import numpy as np import os @@ -41,38 +42,47 @@ def main(): else: fnames = cfg.input - # set up some parameters - fr = 10 # frame rate (Hz) - decay_time = .75 # approximate length of transient event in seconds - gSig = [6, 6] # expected half size of neurons - p = 1 # order of AR indicator dynamics - min_SNR = 1 # minimum SNR for accepting candidate components - thresh_CNN_noisy = 0.65 # CNN threshold for candidate components - gnb = 2 # number of background components - init_method = 'cnmf' # initialization method - - # set up CNMF initialization parameters - init_batch = 400 # number of frames for initialization - patch_size = 32 # size of patch - stride = 3 # amount of overlap between patches - K = 4 # max number of components in each patch - - params_dict = {'fr': fr, - 'fnames': fnames, - 'decay_time': decay_time, - 'gSig': gSig, - 'p': p, - 'min_SNR': min_SNR, - 'nb': gnb, - 'init_batch': init_batch, - 'init_method': init_method, - 'rf': patch_size//2, - 'stride': stride, - 'sniper_mode': True, - 'thresh_CNN_noisy': thresh_CNN_noisy, - 'K': K} + if cfg.configfile: + opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) + if opts.data['fnames'] is None: + opts.set("data", {"fnames": fnames}) + else: + # set up some parameters + fr = 10 # frame rate (Hz) + decay_time = .75 # approximate length of transient event in seconds + gSig = [6, 6] # expected half size of neurons + p = 1 # order of AR indicator dynamics + min_SNR = 1 # minimum SNR for accepting candidate components + thresh_CNN_noisy = 0.65 # CNN threshold for candidate components + gnb = 2 # number of background components + init_method = 'cnmf' # initialization method + + # set up CNMF initialization parameters + init_batch = 400 # number of frames for initialization + patch_size = 32 # size of patch + stride = 3 # amount of overlap between patches + K = 4 # max number of components in each patch + + params_dict = {'fr': fr, + 'fnames': fnames, + 'decay_time': decay_time, + 'gSig': gSig, + 'p': p, + 'min_SNR': min_SNR, + 'nb': gnb, + 'init_batch': init_batch, + 'init_method': init_method, + 'rf': patch_size//2, + 'stride': stride, + 'sniper_mode': True, + 'thresh_CNN_noisy': thresh_CNN_noisy, + 'K': K} + + opts = cnmf.params.CNMFParams(params_dict=params_dict) - opts = cnmf.params.CNMFParams(params_dict=params_dict) + # If you want to break into an interactive console session, move and uncomment this wherever you want in the code + # (and uncomment the code import at the top) + #code.interact(local=dict(globals(), **locals()) ) # fit with online object cnm = cnmf.online_cnmf.OnACID(params=opts) @@ -96,6 +106,7 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate basic Caiman Online functionality with CNMF initialization") + parser.add_argument("--configfile", help="JSON Configfile for Caiman parameters") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() diff --git a/demos/general/params_demo_OnACID.json b/demos/general/params_demo_OnACID.json new file mode 100644 index 000000000..d871c6f98 --- /dev/null +++ b/demos/general/params_demo_OnACID.json @@ -0,0 +1,25 @@ +{ +"data": { + "decay_time": 0.75, + "fr": 10 + }, +"init": { + "gSig": [6, 6], + "K": 4, + "nb": 2 + }, +"online": { + "init_batch": 400, + "init_method": "cnmf", + "min_SNR": 1, + "sniper_mode": true, + "thresh_CNN_noisy": 0.65 + }, +"patch":{ + "rf": 16, + "stride": 3 + }, +"preprocess": { + "p": 1 + } +} From 2ffe0705725707d89862db9617edac153ac07349 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 28 Feb 2024 16:24:49 -0500 Subject: [PATCH 24/86] CLI demo_caiman_basic.py: only set params once --- demos/general/demo_caiman_basic.py | 61 +++++++++++++++++++----------- 1 file changed, 39 insertions(+), 22 deletions(-) diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index 8bc7651bd..4306137cd 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -78,16 +78,44 @@ def main(): p = 2 # order of the autoregressive system gnb = 2 # global background order - params_dict = {'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - 'rf': rf, - 'stride': stride, - 'K': K, - 'gSig': gSig, - 'merge_thr': merge_thresh, - 'p': p, - 'nb': gnb} + min_SNR = 2 # peak SNR for accepted components (if above this, accept) + rval_thr = 0.85 # space correlation threshold (if above this, accept) + use_cnn = True # use the CNN classifier + min_cnn_thr = 0.99 # if cnn classifier predicts below this value, reject + cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected + + params_dict = { + 'data': { + 'fnames': fnames, + 'fr': fr, + 'decay_time': decay_time, + }, + 'init': { + 'gSig': gSig, + 'K': K, + 'nb': gnb + }, + 'patch': { + 'rf': rf, + 'stride': stride, + }, + 'merging': { + 'merge_thr': merge_thresh, + }, + 'preprocess': { + 'p': p, + }, + 'temporal': { + 'p': p, + }, + 'quality': { + 'min_SNR': min_SNR, + 'rval_thr': rval_thr, + 'use_cnn': use_cnn, + 'min_cnn_thr': min_cnn_thr, + 'cnn_lowest': cnn_lowest + } + } opts = params.CNMFParams(params_dict=params_dict) @@ -124,19 +152,8 @@ def main(): # c) each shape passes a CNN based classifier (this will pick up only neurons # and filter out active processes) - min_SNR = 2 # peak SNR for accepted components (if above this, accept) - rval_thr = 0.85 # space correlation threshold (if above this, accept) - use_cnn = True # use the CNN classifier - min_cnn_thr = 0.99 # if cnn classifier predicts below this value, reject - cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected - - cnm2.params.set('quality', {'min_SNR': min_SNR, - 'rval_thr': rval_thr, - 'use_cnn': use_cnn, - 'min_cnn_thr': min_cnn_thr, - 'cnn_lowest': cnn_lowest}) - cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview) + cnm2.estimates.evaluate_components(images, opts, dview=dview) if not cfg.no_play: # visualize selected and rejected components From 860ea9f1472b65363f2be511c3c878aab7b1f44b Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 28 Feb 2024 16:50:00 -0500 Subject: [PATCH 25/86] demo_caiman_basic CLI demo: move to json config files, only provide one inbuilt config --- demos/general/demo_caiman_basic.py | 113 +++++++++--------- ...params_demo_caiman_basic_with_patches.json | 31 +++++ ...ams_demo_caiman_basic_without_patches.json | 27 +++++ 3 files changed, 112 insertions(+), 59 deletions(-) create mode 100644 demos/general/params_demo_caiman_basic_with_patches.json create mode 100644 demos/general/params_demo_caiman_basic_without_patches.json diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index 4306137cd..b658a0551 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -55,69 +55,64 @@ def main(): # If you prefer to hardcode filenames, you could do something like this: # fnames = ["/path/to/myfile1.avi", "/path/to/myfile2.avi"] - if cfg.no_patches: - is_patches = False # flag for processing in patches or not + if cfg.configfile: + opts = params.CNMFParams(params_from_file=cfg.configfile) + if opts.data['fnames'] is None: + opts.set("data", {"fnames": fnames}) else: - is_patches = True + # set up some parameters + fr = 10 # approximate frame rate of data + decay_time = 5.0 # length of transient - # set up some parameters - fr = 10 # approximate frame rate of data - decay_time = 5.0 # length of transient - - if is_patches: # PROCESS IN PATCHES AND THEN COMBINE + # See the without_patches json for an alternative rf = 10 # half size of each patch stride = 4 # overlap between patches K = 4 # number of components in each patch - else: # PROCESS THE WHOLE FOV AT ONCE - rf = None # setting these parameters to None - stride = None # will run CNMF on the whole FOV - K = 30 # number of neurons expected (in the whole FOV) - - gSig = [6, 6] # expected half size of neurons - merge_thresh = 0.80 # merging threshold, max correlation allowed - p = 2 # order of the autoregressive system - gnb = 2 # global background order - - min_SNR = 2 # peak SNR for accepted components (if above this, accept) - rval_thr = 0.85 # space correlation threshold (if above this, accept) - use_cnn = True # use the CNN classifier - min_cnn_thr = 0.99 # if cnn classifier predicts below this value, reject - cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected - - params_dict = { - 'data': { - 'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - }, - 'init': { - 'gSig': gSig, - 'K': K, - 'nb': gnb - }, - 'patch': { - 'rf': rf, - 'stride': stride, - }, - 'merging': { - 'merge_thr': merge_thresh, - }, - 'preprocess': { - 'p': p, - }, - 'temporal': { - 'p': p, - }, - 'quality': { - 'min_SNR': min_SNR, - 'rval_thr': rval_thr, - 'use_cnn': use_cnn, - 'min_cnn_thr': min_cnn_thr, - 'cnn_lowest': cnn_lowest - } - } - opts = params.CNMFParams(params_dict=params_dict) + gSig = [6, 6] # expected half size of neurons + merge_thresh = 0.80 # merging threshold, max correlation allowed + p = 2 # order of the autoregressive system + gnb = 2 # global background order + + min_SNR = 2 # peak SNR for accepted components (if above this, accept) + rval_thr = 0.85 # space correlation threshold (if above this, accept) + use_cnn = True # use the CNN classifier + min_cnn_thr = 0.99 # if cnn classifier predicts below this value, reject + cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected + + params_dict = { + 'data': { + 'fnames': fnames, + 'fr': fr, + 'decay_time': decay_time, + }, + 'init': { + 'gSig': gSig, + 'K': K, + 'nb': gnb + }, + 'patch': { + 'rf': rf, + 'stride': stride, + }, + 'merging': { + 'merge_thr': merge_thresh, + }, + 'preprocess': { + 'p': p, + }, + 'temporal': { + 'p': p, + }, + 'quality': { + 'min_SNR': min_SNR, + 'rval_thr': rval_thr, + 'use_cnn': use_cnn, + 'min_cnn_thr': min_cnn_thr, + 'cnn_lowest': cnn_lowest + } + } + opts = params.CNMFParams(params_dict=params_dict) # start a cluster for parallel processing c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) @@ -134,7 +129,7 @@ def main(): winSize_baseline=100, quantil_min_baseline=10, dview=dview) Cn = Cns.max(axis=0) - Cn[np.isnan(Cn)] = 0 + Cn[np.isnan(Cn)] = 0 # Convert NaNs to zero if not cfg.no_play: cnm.estimates.plot_contours(img=Cn) @@ -183,8 +178,8 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate basic Caiman functionality") + parser.add_argument("--configfile", help="JSON Configfile for Caiman parameters") parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") - parser.add_argument("--no_patches", action="store_true", help="Do not use patches") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") diff --git a/demos/general/params_demo_caiman_basic_with_patches.json b/demos/general/params_demo_caiman_basic_with_patches.json new file mode 100644 index 000000000..7cd7bae8e --- /dev/null +++ b/demos/general/params_demo_caiman_basic_with_patches.json @@ -0,0 +1,31 @@ +{ +"data": { + "fr": 10, + "decay_time": 5.0 + }, +"init": { + "gSig": [6, 6], + "K": 4, + "nb": 2 + }, +"patch": { + "rf": 10, + "stride": 4 + }, +"merging": { + "merge_thr": 0.80 + }, +"preprocess": { + "p": 2 + }, +"temporal": { + "p": 2 + }, +"quality": { + "min_SNR": 2, + "rval_thr": 0.85, + "use_cnn": true, + "min_cnn_thr": 0.99, + "cnn_lowest": 0.1 + } +} diff --git a/demos/general/params_demo_caiman_basic_without_patches.json b/demos/general/params_demo_caiman_basic_without_patches.json new file mode 100644 index 000000000..229fffe0e --- /dev/null +++ b/demos/general/params_demo_caiman_basic_without_patches.json @@ -0,0 +1,27 @@ +{ +"data": { + "fr": 10, + "decay_time": 5.0 + }, +"init": { + "gSig": [6, 6], + "K": 30, + "nb": 2 + }, +"merging": { + "merge_thr": 0.80 + }, +"preprocess": { + "p": 2 + }, +"temporal": { + "p": 2 + }, +"quality": { + "min_SNR": 2, + "rval_thr": 0.85, + "use_cnn": true, + "min_cnn_thr": 0.99, + "cnn_lowest": 0.1 + } +} From 5a9825985661c6c3fbad62180a27babe662ca240 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 29 Feb 2024 12:42:48 -0500 Subject: [PATCH 26/86] Demos/jsonconfigs: Fix misuse of params api --- demos/general/demo_OnACID.py | 2 +- demos/general/demo_caiman_basic.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index 121ff6a55..522d6aa02 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -45,7 +45,7 @@ def main(): if cfg.configfile: opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) if opts.data['fnames'] is None: - opts.set("data", {"fnames": fnames}) + opts.change_params({"data": {"fnames": fnames}}) else: # set up some parameters fr = 10 # frame rate (Hz) diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index b658a0551..3a048dc3b 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -58,7 +58,7 @@ def main(): if cfg.configfile: opts = params.CNMFParams(params_from_file=cfg.configfile) if opts.data['fnames'] is None: - opts.set("data", {"fnames": fnames}) + opts.change_params({"data": {"fnames": fnames}}) else: # set up some parameters fr = 10 # approximate frame rate of data From 3862d714eb876e81c8c7c0c737708af6eabaf23a Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 1 Mar 2024 16:21:53 -0500 Subject: [PATCH 27/86] Revise the three CLI demos that have been refactored to do argument/file-input parsing in the right order --- demos/general/demo_OnACID.py | 13 ++- demos/general/demo_OnACID_mesoscope.py | 119 ++++++++++++------------- demos/general/demo_caiman_basic.py | 16 ++-- 3 files changed, 73 insertions(+), 75 deletions(-) diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index 522d6aa02..42e84d72c 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -37,15 +37,8 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - if cfg.input is None: - fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] - else: - fnames = cfg.input - if cfg.configfile: opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) - if opts.data['fnames'] is None: - opts.change_params({"data": {"fnames": fnames}}) else: # set up some parameters fr = 10 # frame rate (Hz) @@ -80,6 +73,12 @@ def main(): opts = cnmf.params.CNMFParams(params_dict=params_dict) + if cfg.input is not None: + opts.change_params({"data": {"fnames": cfg.input}}) + + if not opts.data['fnames']: # Set neither by CLI arg nor through JSON, so use default data + fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] + # If you want to break into an interactive console session, move and uncomment this wherever you want in the code # (and uncomment the code import at the top) #code.interact(local=dict(globals(), **locals()) ) diff --git a/demos/general/demo_OnACID_mesoscope.py b/demos/general/demo_OnACID_mesoscope.py index 499b16d0a..24b619b7f 100755 --- a/demos/general/demo_OnACID_mesoscope.py +++ b/demos/general/demo_OnACID_mesoscope.py @@ -46,57 +46,65 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - if cfg.input is None: + if cfg.configfile: + opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) + else: + # Set up parameters using builtin defaults + params_dict = { + 'data': { + 'fr': 15, + 'decay_time': 0.5, + }, + 'init': { + 'gSig': (3, 3), # The notebooks have (4, 4) though + 'nb': 2, + 'K': 2, + }, + 'preprocess': { + 'p': 1, # Possibly redundant + }, + 'temporal': { + 'p': 1, # Possibly redundant + }, + 'online': { + 'min_SNR': 1, + 'rval_thr': 0.9, + 'ds_factor': 1, + 'motion_correct': True, + 'init_batch': 200, + 'init_method': 'bare', + 'normalize': True, + 'sniper_mode': True, + 'epochs': 1, + 'max_shifts_online': 10, + 'dist_shape_update': True, + 'min_num_trial': 10, + }, + 'motion': { + 'pw_rigid': False, + + }, + 'quality': { + 'min_SNR': 2, + 'rval_thr': 0.85, + 'use_cnn': True, + 'min_cnn_thr': 0.99, + 'cnn_lowest': 0.1 + }, + } + opts = cnmf.params.CNMFParams(params_dict=params_dict) + + opts.change_params({'online': {'show_movie': not cfg.no_play}}) # Override from CLI + + if cfg.input is not None: # CLI arg can override all other settings for fnames, although other data-centric commands still must match source data + opts.change_params({'data': {'fnames': cfg.input}}) + + if not opts.data['fnames']: # Set neither by CLI arg nor through JSON, so use default data fnames = [download_demo('Tolias_mesoscope_1.hdf5'), download_demo('Tolias_mesoscope_2.hdf5'), download_demo('Tolias_mesoscope_3.hdf5')] - else: - fnames = cfg.input - - # Set up some parameters - - fr = 15 # frame rate (Hz) - decay_time = 0.5 # approximate length of transient event in seconds - gSig = (3, 3) # expected half size of neurons - p = 1 # order of AR indicator dynamics - min_SNR = 1 # minimum SNR for accepting new components - ds_factor = 1 # spatial downsampling factor (increases speed but may lose some fine structure) - gnb = 2 # number of background components - gSig = tuple(np.ceil(np.array(gSig) / ds_factor).astype('int')) # recompute gSig if downsampling is involved - mot_corr = True # flag for online motion correction - pw_rigid = False # flag for pw-rigid motion correction (slower but potentially more accurate) - max_shifts_online = np.ceil(10.).astype('int') # maximum allowed shift during motion correction - sniper_mode = True # use a CNN to detect new neurons (o/w space correlation) - rval_thr = 0.9 # soace correlation threshold for candidate components - # set up some additional supporting parameters needed for the algorithm - # (these are default values but can change depending on dataset properties) - init_batch = 200 # number of frames for initialization (presumably from the first file) - K = 2 # initial number of components - epochs = 1 # number of passes over the data - show_movie = not cfg.no_play # show the movie as the data gets processed - - params_dict = {'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - 'gSig': gSig, - 'p': p, - 'min_SNR': min_SNR, - 'rval_thr': rval_thr, - 'ds_factor': ds_factor, - 'nb': gnb, - 'motion_correct': mot_corr, - 'init_batch': init_batch, - 'init_method': 'bare', - 'normalize': True, - 'sniper_mode': sniper_mode, - 'K': K, - 'epochs': epochs, - 'max_shifts_online': max_shifts_online, - 'pw_rigid': pw_rigid, - 'dist_shape_update': True, - 'min_num_trial': 10, - 'show_movie': show_movie} - opts = cnmf.params.CNMFParams(params_dict=params_dict) + opts.change_params({'data': {'fnames': fnames}}) + # fit online cnm = cnmf.online_cnmf.OnACID(params=opts) @@ -117,7 +125,7 @@ def main(): T_motion = 1e3*np.array(cnm.t_motion) T_detect = 1e3*np.array(cnm.t_detect) T_shapes = 1e3*np.array(cnm.t_shapes) - T_track = 1e3*np.array(cnm.t_online) - T_motion - T_detect - T_shapes + T_track = 1e3*np.array(cnm.t_online) - T_motion - T_detect - T_shapes plt.figure() plt.stackplot(np.arange(len(T_motion)), T_motion, T_track, T_detect, T_shapes) plt.legend(labels=['motion', 'tracking', 'detect', 'shapes'], loc=2) @@ -147,18 +155,8 @@ def main(): Yr, dims, T = cm.load_memmap(memmap_file) images = np.reshape(Yr.T, [T] + list(dims), order='F') - min_SNR = 2 # peak SNR for accepted components (if above this, accept) - rval_thr = 0.85 # space correlation threshold (if above this, accept) - use_cnn = True # use the CNN classifier - min_cnn_thr = 0.99 # if cnn classifier predicts below this value, reject - cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected - - cnm.params.set('quality', {'min_SNR': min_SNR, - 'rval_thr': rval_thr, - 'use_cnn': use_cnn, - 'min_cnn_thr': min_cnn_thr, - 'cnn_lowest': cnn_lowest}) + # evaluate_components uses parameters from the 'quality' category cnm.estimates.evaluate_components(images, cnm.params, dview=dview) cnm.estimates.Cn = Cn cnm.save(os.path.splitext(fnames[0])[0] + '_results.hdf5') @@ -167,6 +165,7 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Full OnACID Caiman demo") + parser.add_argument("--configfile", help="JSON Configfile for Caiman parameters") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index 3a048dc3b..90bbe097a 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -47,14 +47,6 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - if cfg.input is None: - # If no input is specified, use sample data, downloading if necessary - fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] # file(s) to be analyzed - else: - fnames = cfg.input - # If you prefer to hardcode filenames, you could do something like this: - # fnames = ["/path/to/myfile1.avi", "/path/to/myfile2.avi"] - if cfg.configfile: opts = params.CNMFParams(params_from_file=cfg.configfile) if opts.data['fnames'] is None: @@ -114,6 +106,14 @@ def main(): } opts = params.CNMFParams(params_dict=params_dict) + if cfg.input is not None: # CLI arg can override all other settings for fnames, although other data-centric commands still must match source data + opts.change_params({'data': {'fnames': cfg.input}}) + + if not opts.data['fnames']: # Set neither by CLI arg nor through JSON, so use default data + # If no input is specified, use sample data, downloading if necessary + fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] # file(s) to be analyzed + opts.change_params({'data': {'fnames': fnames}}) + # start a cluster for parallel processing c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) From 994773b9366badf98f04ae4971181004bfee5fb6 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 1 Mar 2024 16:51:39 -0500 Subject: [PATCH 28/86] Demo revisions: remove inline parameters --- demos/general/demo_OnACID.py | 38 +--------- demos/general/demo_OnACID_mesoscope.py | 51 +------------ demos/general/demo_caiman_basic.py | 73 +++---------------- .../general/params_demo_OnACID_mesoscope.json | 41 +++++++++++ 4 files changed, 54 insertions(+), 149 deletions(-) create mode 100644 demos/general/params_demo_OnACID_mesoscope.json diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index 42e84d72c..cd68abbd7 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -37,41 +37,7 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - if cfg.configfile: - opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) - else: - # set up some parameters - fr = 10 # frame rate (Hz) - decay_time = .75 # approximate length of transient event in seconds - gSig = [6, 6] # expected half size of neurons - p = 1 # order of AR indicator dynamics - min_SNR = 1 # minimum SNR for accepting candidate components - thresh_CNN_noisy = 0.65 # CNN threshold for candidate components - gnb = 2 # number of background components - init_method = 'cnmf' # initialization method - - # set up CNMF initialization parameters - init_batch = 400 # number of frames for initialization - patch_size = 32 # size of patch - stride = 3 # amount of overlap between patches - K = 4 # max number of components in each patch - - params_dict = {'fr': fr, - 'fnames': fnames, - 'decay_time': decay_time, - 'gSig': gSig, - 'p': p, - 'min_SNR': min_SNR, - 'nb': gnb, - 'init_batch': init_batch, - 'init_method': init_method, - 'rf': patch_size//2, - 'stride': stride, - 'sniper_mode': True, - 'thresh_CNN_noisy': thresh_CNN_noisy, - 'K': K} - - opts = cnmf.params.CNMFParams(params_dict=params_dict) + opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) if cfg.input is not None: opts.change_params({"data": {"fnames": cfg.input}}) @@ -105,7 +71,7 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate basic Caiman Online functionality with CNMF initialization") - parser.add_argument("--configfile", help="JSON Configfile for Caiman parameters") + parser.add_argument("--configfile", default=os.path.join(caiman_datadir(), 'demos', 'general', 'params_demo_OnACID.json'), help="JSON Configfile for Caiman parameters") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() diff --git a/demos/general/demo_OnACID_mesoscope.py b/demos/general/demo_OnACID_mesoscope.py index 24b619b7f..fc9894809 100755 --- a/demos/general/demo_OnACID_mesoscope.py +++ b/demos/general/demo_OnACID_mesoscope.py @@ -46,54 +46,7 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - if cfg.configfile: - opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) - else: - # Set up parameters using builtin defaults - params_dict = { - 'data': { - 'fr': 15, - 'decay_time': 0.5, - }, - 'init': { - 'gSig': (3, 3), # The notebooks have (4, 4) though - 'nb': 2, - 'K': 2, - }, - 'preprocess': { - 'p': 1, # Possibly redundant - }, - 'temporal': { - 'p': 1, # Possibly redundant - }, - 'online': { - 'min_SNR': 1, - 'rval_thr': 0.9, - 'ds_factor': 1, - 'motion_correct': True, - 'init_batch': 200, - 'init_method': 'bare', - 'normalize': True, - 'sniper_mode': True, - 'epochs': 1, - 'max_shifts_online': 10, - 'dist_shape_update': True, - 'min_num_trial': 10, - }, - 'motion': { - 'pw_rigid': False, - - }, - 'quality': { - 'min_SNR': 2, - 'rval_thr': 0.85, - 'use_cnn': True, - 'min_cnn_thr': 0.99, - 'cnn_lowest': 0.1 - }, - } - opts = cnmf.params.CNMFParams(params_dict=params_dict) - + opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) opts.change_params({'online': {'show_movie': not cfg.no_play}}) # Override from CLI if cfg.input is not None: # CLI arg can override all other settings for fnames, although other data-centric commands still must match source data @@ -165,7 +118,7 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Full OnACID Caiman demo") - parser.add_argument("--configfile", help="JSON Configfile for Caiman parameters") + parser.add_argument("--configfile", default=os.path.join(caiman_datadir(), 'demos', 'general', 'params_demo_OnACID_mesoscope.json'), help="JSON Configfile for Caiman parameters") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index 90bbe097a..31fa265b0 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -25,7 +25,7 @@ except: pass -import caiman as cm +import caiman from caiman.paths import caiman_datadir from caiman.source_extraction.cnmf import cnmf as cnmf from caiman.source_extraction.cnmf import params as params @@ -47,64 +47,9 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - if cfg.configfile: - opts = params.CNMFParams(params_from_file=cfg.configfile) - if opts.data['fnames'] is None: - opts.change_params({"data": {"fnames": fnames}}) - else: - # set up some parameters - fr = 10 # approximate frame rate of data - decay_time = 5.0 # length of transient - - # See the without_patches json for an alternative - rf = 10 # half size of each patch - stride = 4 # overlap between patches - K = 4 # number of components in each patch - - gSig = [6, 6] # expected half size of neurons - merge_thresh = 0.80 # merging threshold, max correlation allowed - p = 2 # order of the autoregressive system - gnb = 2 # global background order - - min_SNR = 2 # peak SNR for accepted components (if above this, accept) - rval_thr = 0.85 # space correlation threshold (if above this, accept) - use_cnn = True # use the CNN classifier - min_cnn_thr = 0.99 # if cnn classifier predicts below this value, reject - cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected - - params_dict = { - 'data': { - 'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - }, - 'init': { - 'gSig': gSig, - 'K': K, - 'nb': gnb - }, - 'patch': { - 'rf': rf, - 'stride': stride, - }, - 'merging': { - 'merge_thr': merge_thresh, - }, - 'preprocess': { - 'p': p, - }, - 'temporal': { - 'p': p, - }, - 'quality': { - 'min_SNR': min_SNR, - 'rval_thr': rval_thr, - 'use_cnn': use_cnn, - 'min_cnn_thr': min_cnn_thr, - 'cnn_lowest': cnn_lowest - } - } - opts = params.CNMFParams(params_dict=params_dict) + opts = params.CNMFParams(params_from_file=cfg.configfile) + if opts.data['fnames'] is None: + opts.change_params({"data": {"fnames": fnames}}) if cfg.input is not None: # CLI arg can override all other settings for fnames, although other data-centric commands still must match source data opts.change_params({'data': {'fnames': cfg.input}}) @@ -115,7 +60,7 @@ def main(): opts.change_params({'data': {'fnames': fnames}}) # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) + c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Run CaImAn Batch (CNMF) @@ -134,7 +79,7 @@ def main(): cnm.estimates.plot_contours(img=Cn) # load memory mapped file - Yr, dims, T = cm.load_memmap(cnm.mmap_file) + Yr, dims, T = caiman.load_memmap(cnm.mmap_file) images = np.reshape(Yr.T, [T] + list(dims), order='F') # refit @@ -162,14 +107,14 @@ def main(): # save results cnm2.estimates.Cn = Cn - cnm2.save(cnm2.mmap_file[:-4]+'hdf5') + cnm2.save(cnm2.mmap_file[:-4] + 'hdf5') # FIXME use the path library to do this the right way # play movie with results (original, reconstructed, amplified residual) if not cfg.no_play: cnm2.estimates.play_movie(images, magnification=4); # Stop the cluster and clean up log files - cm.stop_server(dview=dview) + caiman.stop_server(dview=dview) if not cfg.keep_logs: log_files = glob.glob('Yr*_LOG_*') @@ -178,7 +123,7 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate basic Caiman functionality") - parser.add_argument("--configfile", help="JSON Configfile for Caiman parameters") + parser.add_argument("--configfile", default=os.path.join(caiman_datadir(), 'demos', 'general', 'params_demo_caiman_basic_with_patches.json'), help="JSON Configfile for Caiman parameters") parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") diff --git a/demos/general/params_demo_OnACID_mesoscope.json b/demos/general/params_demo_OnACID_mesoscope.json new file mode 100644 index 000000000..48aa536b8 --- /dev/null +++ b/demos/general/params_demo_OnACID_mesoscope.json @@ -0,0 +1,41 @@ +{ + "data": { + "fr": 15, + "decay_time": 0.5 + }, + "init": { + "gSig": (4, 4), + "nb": 2, + "K": 2 + }, + "preprocess": { + "p": 1 + }, + "temporal": { + "p": 1 + }, + "online": { + "min_SNR": 1, + "rval_thr": 0.9, + "ds_factor": 1, + "motion_correct": true, + "init_batch": 200, + "init_method": "bare", + "normalize": true, + "sniper_mode": true, + "epochs": 1, + "max_shifts_online": 10, + "dist_shape_update": true, + "min_num_trial": 10 + }, + "motion": { + "pw_rigid": false + }, + "quality": { + "min_SNR": 2, + "rval_thr": 0.85, + "use_cnn": true, + "min_cnn_thr": 0.99, + "cnn_lowest": 0.1 + } +} From 13b2730555bf758c056390a44828688c33b58292 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 8 Mar 2024 11:47:30 -0500 Subject: [PATCH 29/86] demos_refactor: demo_onacid file load fix --- demos/general/demo_OnACID.py | 1 + 1 file changed, 1 insertion(+) diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index cd68abbd7..741b98f03 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -44,6 +44,7 @@ def main(): if not opts.data['fnames']: # Set neither by CLI arg nor through JSON, so use default data fnames = [os.path.join(caiman_datadir(), 'example_movies', 'demoMovie.tif')] + opts.change_params({"data": {"fnames": fnames}}) # If you want to break into an interactive console session, move and uncomment this wherever you want in the code # (and uncomment the code import at the top) From 32a642880abcb37e16cae775e79c5f0663021e45 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 8 Mar 2024 11:48:07 -0500 Subject: [PATCH 30/86] demos_refactor: demo_pipeline rework to use json --- demos/general/demo_pipeline.py | 118 +++++------------------- demos/general/params_demo_pipeline.json | 46 +++++++++ 2 files changed, 67 insertions(+), 97 deletions(-) create mode 100644 demos/general/params_demo_pipeline.json diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 12d5ec5b8..76914dc17 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -27,7 +27,7 @@ except: pass -import caiman as cm +import caiman from caiman.motion_correction import MotionCorrect from caiman.source_extraction.cnmf import cnmf as cnmf from caiman.source_extraction.cnmf import params as params @@ -49,48 +49,16 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - if cfg.input is None: - # If no input is specified, use sample data, downloading if necessary - fnames = [download_demo('Sue_2x_3000_40_-46.tif')] - else: - fnames = cfg.input - # First set up some parameters for data and motion correction + opts = params.CNMFParams(params_from_file=cfg.input) + + if cfg.input is not None: + opts.change_params({"data": {"fnames": cfg.input}}) + if not opts.data['fnames']: # Set neither by CLI arg nor through JSON, so use default data + fnames = [download_demo('Sue_2x_3000_40_-46.tif')] + opts.change_params({"data": {"fnames": fnames}}) - # dataset dependent parameters - fr = 30 # imaging rate in frames per second - decay_time = 0.4 # length of a typical transient in seconds - dxy = (2., 2.) # spatial resolution in x and y in (um per pixel) - # note the lower than usual spatial resolution here - max_shift_um = (12., 12.) # maximum shift in um - patch_motion_um = (100., 100.) # patch size for non-rigid correction in um - - # motion correction parameters - pw_rigid = True # flag to select rigid vs pw_rigid motion correction - # maximum allowed rigid shift in pixels - max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)] - # start a new patch for pw-rigid motion correction every x pixels - strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)]) - # overlap between patches (size of patch in pixels: strides+overlaps) - overlaps = (24, 24) - # maximum deviation allowed for patch with respect to rigid shifts - max_deviation_rigid = 3 - border_nan = 'copy' - - params_dict = {'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - 'dxy': dxy, - 'pw_rigid': pw_rigid, - 'max_shifts': max_shifts, - 'strides': strides, - 'overlaps': overlaps, - 'max_deviation_rigid': max_deviation_rigid, - 'border_nan': border_nan} - - opts = params.CNMFParams(params_dict=params_dict) - - m_orig = cm.load_movie_chain(fnames) + m_orig = caiman.load_movie_chain(opts.data['fnames']) # play the movie (optional) # playing the movie using opencv. It requires loading the movie in memory. @@ -102,11 +70,11 @@ def main(): moviehandle.play(q_max=99.5, fr=60, magnification=2) # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) + c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Motion Correction # first we create a motion correction object with the specified parameters - mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion')) + mc = MotionCorrect(opts.data['fnames'], dview=dview, **opts.get_group('motion')) # note that the file is not loaded in memory # Run (piecewise-rigid motion) correction using NoRMCorre @@ -114,10 +82,10 @@ def main(): # compare with original movie if not cfg.no_play: - m_orig = cm.load_movie_chain(fnames) - m_els = cm.load(mc.mmap_file) + m_orig = caiman.load_movie_chain(opts.data['fnames']) + m_els = caiman.load(mc.mmap_file) ds_ratio = 0.2 - moviehandle = cm.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov*mc.nonneg_movie, + moviehandle = caiman.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov*mc.nonneg_movie, m_els.resize(1, 1, ds_ratio)], axis=2) moviehandle.play(fr=60, q_max=99.5, magnification=2) # press q to exit @@ -128,50 +96,17 @@ def main(): # the boundaries # memory map the file in order 'C' - fname_new = cm.save_memmap(mc.mmap_file, base_name='memmap_', order='C', + fname_new = caiman.save_memmap(mc.mmap_file, base_name='memmap_', order='C', border_to_0=border_to_0) # exclude borders # now load the file - Yr, dims, T = cm.load_memmap(fname_new) + Yr, dims, T = caiman.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') # load frames in python format (T x X x Y) # restart cluster to clean up memory - cm.stop_server(dview=dview) - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) - - # Parameters for source extraction and deconvolution - p = 1 # order of the autoregressive system - gnb = 2 # number of global background components - merge_thr = 0.85 # merging threshold, max correlation allowed - rf = 15 - # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50 - stride_cnmf = 6 # amount of overlap between the patches in pixels - K = 4 # number of components per patch - gSig = [4, 4] # expected half size of neurons in pixels - # initialization method (if analyzing dendritic data using 'sparse_nmf') - method_init = 'greedy_roi' - ssub = 2 # spatial subsampling during initialization - tsub = 2 # temporal subsampling during initialization - - # parameters for component evaluation - opts_dict = {'fnames': fnames, - 'p': p, - 'fr': fr, - 'nb': gnb, - 'rf': rf, - 'K': K, - 'gSig': gSig, - 'stride': stride_cnmf, - 'method_init': method_init, - 'rolling_sum': True, - 'merge_thr': merge_thr, - 'n_processes': n_processes, - 'only_init': True, - 'ssub': ssub, - 'tsub': tsub} - - opts.change_params(params_dict=opts_dict); + caiman.stop_server(dview=dview) + c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # RUN CNMF ON PATCHES # First extract spatial and temporal components on patches and combine them @@ -196,7 +131,7 @@ def main(): # save results cnm.estimates.Cn = Cn - cnm.save(fname_new[:-5]+'_init.hdf5') + cnm.save(fname_new[:-5] + '_init.hdf5') # FIXME # RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution cnm2 = cnm.refit(images, dview=dview) @@ -207,18 +142,6 @@ def main(): # b) a minimum peak SNR is required over the length of a transient # c) each shape passes a CNN based classifier - min_SNR = 2 # signal to noise ratio for accepting a component - rval_thr = 0.85 # space correlation threshold for accepting a component - use_cnn = True - cnn_thr = 0.99 # threshold for CNN based classifier - cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected - - cnm2.params.set('quality', {'decay_time': decay_time, - 'min_SNR': min_SNR, - 'rval_thr': rval_thr, - 'use_cnn': use_cnn, - 'min_cnn_thr': cnn_thr, - 'cnn_lowest': cnn_lowest}) cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview) if not cfg.no_play: @@ -252,7 +175,7 @@ def main(): include_bck=False) # background not shown # Stop the cluster and clean up log files - cm.stop_server(dview=dview) + caiman.stop_server(dview=dview) if not cfg.keep_logs: log_files = glob.glob('*_LOG_*') @@ -261,6 +184,7 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate 2P Pipeline using batch algorithm") + parser.add_argument("--configfile", default=os.path.join(caiman.paths.caiman_datadir(), 'demos', 'general', 'params_demo_pipeline.json'), help="JSON Configfile for Caiman parameters") parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") diff --git a/demos/general/params_demo_pipeline.json b/demos/general/params_demo_pipeline.json new file mode 100644 index 000000000..2e5241bd0 --- /dev/null +++ b/demos/general/params_demo_pipeline.json @@ -0,0 +1,46 @@ +{ + "data": { + "fr": 30, + "decay_time": 0.4, + "dxy": [2.0, 2.0], + "nb": 2 + }, + "init": { + "K": 4, + "gSig": [4, 4], + "method_init": "greedy_roi", + "rolling_sum": true, + "ssub": 2, + "tsub": 2 + }, + "motion": { + "pw_rigid": true, + "max_shifts": [6, 6], + "strides": [50, 50], + "overlaps": [24, 24], + "max_deviation_rigid": 3, + "border_nan": "copy" + }, + "preprocess": { + "p": 1 + }, + "temporal": { + "p": 1 + }, + "patch": { + "rf": 15, + "stride": 6, + "only_init": true + }, + "merging": { + "merge_thr": 0.85 + }, + "quality": { + "decay_time": 0.4, + "min_SNR": 2, + "rval_thr": 0.85, + "use_cnn": true, + "min_cnn_thr": 0.99, + "cnn_lowest": 0.1 + } +} From fa065d6cde68e777c8445f67ff96a587ca24cbfd Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 12 Mar 2024 18:15:14 -0400 Subject: [PATCH 31/86] demo_pipeline_NWB conversion work --- demos/general/demo_pipeline.py | 2 +- demos/general/demo_pipeline_NWB.py | 151 ++++---------------- demos/general/params_demo_pipeline.json | 4 +- demos/general/params_demo_pipeline_NWB.json | 46 ++++++ 4 files changed, 78 insertions(+), 125 deletions(-) create mode 100644 demos/general/params_demo_pipeline_NWB.json diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 76914dc17..1b8119178 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -50,7 +50,7 @@ def main(): level=logging.WARNING) # First set up some parameters for data and motion correction - opts = params.CNMFParams(params_from_file=cfg.input) + opts = params.CNMFParams(params_from_file=cfg.configfile) if cfg.input is not None: opts.change_params({"data": {"fnames": cfg.input}}) diff --git a/demos/general/demo_pipeline_NWB.py b/demos/general/demo_pipeline_NWB.py index e5a5dfd66..efd7ccc93 100644 --- a/demos/general/demo_pipeline_NWB.py +++ b/demos/general/demo_pipeline_NWB.py @@ -4,7 +4,8 @@ This script follows closely the demo_pipeline.py script but uses the Neurodata Without Borders (NWB) file format for loading the input and saving the output. It is meant as an example on how to use NWB files with CaImAn. -authors: @agiovann and @epnev + +If you provide a filename in the config json, it must be an nwb file. """ import argparse @@ -22,7 +23,7 @@ except: pass -import caiman as cm +import caiman from caiman.motion_correction import MotionCorrect from caiman.paths import caiman_datadir from caiman.source_extraction.cnmf import cnmf as cnmf @@ -47,15 +48,19 @@ def main(): "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", level=logging.WARNING) - # This demo will either accept a nwb file or it'll convert an existing movie into one - if cfg.input is None: + opts = params.CNMFParams(params_from_file=cfg.configfile) + if cfg.input is not None: + opts.change_params({"data": {"fnames": cfg.input}}) + # TODO throw error if it is not an nwb file + if not opts.data['fnames']: # Set neither by CLI arg nor through JSON, so use default data + # This demo will either accept a nwb file or it'll convert an existing movie into one # default path, no input was provide, so we'll take the "Sue" sample data, convert it to a nwb, and then ingest it. # The convert target is the original fn, with the extension swapped to nwb. fr = float(15) # imaging rate in frames per second pre_convert = download_demo('Sue_2x_3000_40_-46.tif') convert_target = os.path.join(caiman_datadir(), 'Sue_2x_3000_40_-46.nwb') - orig_movie = cm.load(pre_convert, fr=fr) + orig_movie = caiman.load(pre_convert, fr=fr) # save file in NWB format with various additional info orig_movie.save(convert_target, sess_desc="test", @@ -69,6 +74,7 @@ def main(): session_id='Session 1', var_name_hdf5='TwoPhotonSeries') fnames = [convert_target] + opts.change_params({'data': {'fnames': fnames, 'var_names_hdf5': 'TwoPhotonSeries'}}) elif cfg.input[0].endswith('.nwb'): if os.path.isfile(cfg.input[0]): # We were handed at least one nwb file and it exists either right here or as an absolute path @@ -91,65 +97,14 @@ def main(): else: # Someone mentioned an nwb file but we can't find it! raise Exception(f"Could not find referenced input file {cfg.input[0]}") else: - # We were handed a file in some other format, so let's make an nwb file out of it and - # then ingest that nwb file. - if len(cfg.input) != 1: - raise Exception("We're only willing to convert one file to nwb") - pre_convert = cfg.input[0] - post_convert = os.path.splitext(pre_convert)[0] + ".nwb" - fr = float(15) # TODO: Make this a cli parameter - orig_movie = cm.load(pre_convert, fr=fr) - # save file in NWB format with various additional info - orig_movie.save(convert_target, - sess_desc="test", - identifier="demo 1", - imaging_plane_description='single plane', - emission_lambda=520.0, indicator='GCAMP6f', # Entirely synthetic, but if we don't know, what can we do? - location='unknown', - experimenter='Unknown Name', lab_name='Unknown Lab' - institution='Unknown University', - experiment_description='Experiment Description', - session_id='Session 1') # XXX do we need to provide var_name_hdf5? - fnames = [post_convert] + raise Exception("If you're providing your own data to this demo, you must provide it in nwb format or pre-convert it yourself") # estimates save path can be same or different from raw data path save_path = os.path.splitext(fnames[0])[0] + '_CNMF_estimates.nwb' # TODO: Make this a parameter? # We're done with all the file input parts - # dataset dependent parameters - decay_time = 0.4 # length of a typical transient in seconds - dxy = (2., 2.) # spatial resolution in x and y in (um per pixel) - # note the lower than usual spatial resolution here - max_shift_um = (12., 12.) # maximum shift in um - patch_motion_um = (100., 100.) # patch size for non-rigid correction in um - - # First setup some parameters for data and motion correction - # motion correction parameters - pw_rigid = True # flag to select rigid vs pw_rigid motion correction - # maximum allowed rigid shift in pixels - max_shifts = [int(a/b) for a, b in zip(max_shift_um, dxy)] - # start a new patch for pw-rigid motion correction every x pixels - strides = tuple([int(a/b) for a, b in zip(patch_motion_um, dxy)]) - # overlap between patches (size of patch in pixels: strides+overlaps) - overlaps = (24, 24) - # maximum deviation allowed for patch with respect to rigid shifts - max_deviation_rigid = 3 - - params_dict = {'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - 'dxy': dxy, - 'pw_rigid': pw_rigid, - 'max_shifts': max_shifts, - 'strides': strides, - 'overlaps': overlaps, - 'max_deviation_rigid': max_deviation_rigid, - 'border_nan': 'copy', - 'var_name_hdf5': 'TwoPhotonSeries'} - opts = params.CNMFParams(params_dict=params_dict) - - m_orig = cm.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5']) + m_orig = caiman.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5']) # play the movie (optional) # playing the movie using opencv. It requires loading the movie in memory. @@ -161,7 +116,7 @@ def main(): moviehandle.play(q_max=99.5, fr=60, magnification=2) # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) + c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Motion Correction # first we create a motion correction object with the specified parameters @@ -173,10 +128,10 @@ def main(): # compare with original movie if not cfg.no_play: - m_orig = cm.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5']) - m_els = cm.load(mc.mmap_file) + m_orig = caiman.load_movie_chain(fnames, var_name_hdf5=opts.data['var_name_hdf5']) + m_els = caiman.load(mc.mmap_file) ds_ratio = 0.2 - moviehandle = cm.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov*mc.nonneg_movie, + moviehandle = caiman.concatenate([m_orig.resize(1, 1, ds_ratio) - mc.min_mov*mc.nonneg_movie, m_els.resize(1, 1, ds_ratio)], axis=2) moviehandle.play(fr=60, q_max=99.5, magnification=2) # press q to exit @@ -187,55 +142,24 @@ def main(): # the boundaries # memory map the file in order 'C' - fname_new = cm.save_memmap(mc.mmap_file, base_name='memmap_', order='C', + fname_new = caiman.save_memmap(mc.mmap_file, base_name='memmap_', order='C', border_to_0=border_to_0) # exclude borders # now load the file - Yr, dims, T = cm.load_memmap(fname_new) + Yr, dims, T = caiman.load_memmap(fname_new) images = np.reshape(Yr.T, [T] + list(dims), order='F') # load frames in python format (T x X x Y) # restart cluster to clean up memory - cm.stop_server(dview=dview) - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) - - # parameters for source extraction and deconvolution - p = 1 # order of the autoregressive system - gnb = 2 # number of global background components - merge_thr = 0.85 # merging threshold, max correlation allowed - rf = 15 - # half-size of the patches in pixels. e.g., if rf=25, patches are 50x50 - stride_cnmf = 6 # amount of overlap between the patches in pixels - K = 4 # number of components per patch - gSig = [4, 4] # expected half size of neurons in pixels - # initialization method (if analyzing dendritic data using 'sparse_nmf') - method_init = 'greedy_roi' - ssub = 2 # spatial subsampling during initialization - tsub = 2 # temporal subsampling during initialization - - # parameters for component evaluation - opts_dict = {'fnames': fnames, - 'fr': fr, - 'nb': gnb, - 'rf': rf, - 'K': K, - 'gSig': gSig, - 'stride': stride_cnmf, - 'method_init': method_init, - 'rolling_sum': True, - 'merge_thr': merge_thr, - 'n_processes': n_processes, - 'only_init': True, - 'ssub': ssub, - 'tsub': tsub} - - opts.change_params(params_dict=opts_dict); + caiman.stop_server(dview=dview) + c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # RUN CNMF ON PATCHES # First extract spatial and temporal components on patches and combine them # for this step deconvolution is turned off (p=0) - opts.change_params({'p': 0}) + saved_p = opts.preprocess['p'] # Save the deconvolution parameter for later restoration + opts.change_params({'preprocess': {'p': 0}, 'temporal': {'p': 0}}) cnm = cnmf.CNMF(n_processes, params=opts, dview=dview) cnm = cnm.fit(images) @@ -247,7 +171,7 @@ def main(): # cnm1.fit_file(motion_correct=True) # plot contours of found components - Cn = cm.local_correlations(images, swap_dim=False) + Cn = caiman.summary_images.local_correlations(images, swap_dim=False) Cn[np.isnan(Cn)] = 0 if not cfg.no_play: cnm.estimates.plot_contours(img=Cn) @@ -255,31 +179,12 @@ def main(): # save results in a separate file (just for demonstration purposes) cnm.estimates.Cn = Cn - cnm.save(fname_new[:-4]+'hdf5') - #cm.movie(Cn).save(fname_new[:-5]+'_Cn.tif') + cnm.save(fname_new[:-4] + 'hdf5') # FIXME # RE-RUN seeded CNMF on accepted patches to refine and perform deconvolution - cnm.params.change_params({'p': p}) + cnm.params.change_params({'preprocess': {'p': saved_p}, 'temporal': {'p': saved_p}}) # Restore deconvolution cnm2 = cnm.refit(images, dview=dview) - # Component Evaluation - # Components are evaluated in three ways: - # a) the shape of each component must be correlated with the data - # b) a minimum peak SNR is required over the length of a transient - # c) each shape passes a CNN based classifier - - min_SNR = 2 # signal to noise ratio for accepting a component - rval_thr = 0.85 # space correlation threshold for accepting a component - use_cnn = True - cnn_thr = 0.99 # threshold for CNN based classifier - cnn_lowest = 0.1 # neurons with cnn probability lower than this value are rejected - - cnm2.params.set('quality', {'decay_time': decay_time, - 'min_SNR': min_SNR, - 'rval_thr': rval_thr, - 'use_cnn': use_cnn, - 'min_cnn_thr': cnn_thr, - 'cnn_lowest': cnn_lowest}) cnm2.estimates.evaluate_components(images, cnm2.params, dview=dview) # Save new estimates object (after adding correlation image) @@ -314,7 +219,7 @@ def main(): include_bck=False) # background not shown # Stop the cluster and clean up log files - cm.stop_server(dview=dview) + caiman.stop_server(dview=dview) if not cfg.keep_logs: log_files = glob.glob('*_LOG_*') @@ -327,6 +232,7 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate 2P Pipeline using batch algorithm, with nwb files") + parser.add_argument("--configfile", default=os.path.join(caiman.paths.caiman_datadir(), 'demos', 'general', 'params_demo_pipeline_NWB.json'), help="JSON Configfile for Caiman parameters") parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") @@ -337,3 +243,4 @@ def handle_args(): ######## main() + diff --git a/demos/general/params_demo_pipeline.json b/demos/general/params_demo_pipeline.json index 2e5241bd0..152faad25 100644 --- a/demos/general/params_demo_pipeline.json +++ b/demos/general/params_demo_pipeline.json @@ -2,10 +2,10 @@ "data": { "fr": 30, "decay_time": 0.4, - "dxy": [2.0, 2.0], - "nb": 2 + "dxy": [2.0, 2.0] }, "init": { + "nb": 2, "K": 4, "gSig": [4, 4], "method_init": "greedy_roi", diff --git a/demos/general/params_demo_pipeline_NWB.json b/demos/general/params_demo_pipeline_NWB.json new file mode 100644 index 000000000..768d4ff5d --- /dev/null +++ b/demos/general/params_demo_pipeline_NWB.json @@ -0,0 +1,46 @@ +{ + "data": { + "fr": 15.0, + "decay_time": 0.4, + "dxy": [2.0, 2.0] + }, + "init": { + "nb": 2, + "K": 4, + "gSig": [4, 4], + "method_init": "greedy_roi", + "rolling_sum": true, + "ssub": 2, + "tsub": 2 + }, + "motion": { + "pw_rigid": true, + "max_shifts": [6, 6], + "strides": [50, 50], + "overlaps": [24, 24], + "max_deviation_rigid": 3, + "border_nan": "copy" + }, + "preprocess": { + "p": 1 + }, + "temporal": { + "p": 1 + }, + "patch": { + "rf": 15, + "stride": 6, + "only_init": true + }, + "merging": { + "merge_thr": 0.85 + }, + "quality": { + "decay_time": 0.4, + "min_SNR": 2, + "rval_thr": 0.85, + "use_cnn": true, + "min_cnn_thr": 0.99, + "cnn_lowest": 0.1 + } +} From 7cfc771a1c3d0c99ce7da23ac70cb38ab22de190 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 12 Mar 2024 18:19:45 -0400 Subject: [PATCH 32/86] cli demos json format fix whoops --- demos/general/params_demo_OnACID_mesoscope.json | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demos/general/params_demo_OnACID_mesoscope.json b/demos/general/params_demo_OnACID_mesoscope.json index 48aa536b8..f423dbc45 100644 --- a/demos/general/params_demo_OnACID_mesoscope.json +++ b/demos/general/params_demo_OnACID_mesoscope.json @@ -4,7 +4,7 @@ "decay_time": 0.5 }, "init": { - "gSig": (4, 4), + "gSig": [4, 4], "nb": 2, "K": 2 }, From 01362a7417a61d2fc632eecaba51d4742c4a23f6 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 13 Mar 2024 14:46:09 -0400 Subject: [PATCH 33/86] demos refactor: fixing demos as testing --- demos/general/demo_caiman_basic.py | 2 -- demos/general/demo_pipeline_NWB.py | 9 ++++++--- demos/general/demo_pipeline_voltage_imaging.py | 0 3 files changed, 6 insertions(+), 5 deletions(-) mode change 100644 => 100755 demos/general/demo_pipeline_NWB.py mode change 100644 => 100755 demos/general/demo_pipeline_voltage_imaging.py diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index 31fa265b0..dc582b8d4 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -48,8 +48,6 @@ def main(): level=logging.WARNING) opts = params.CNMFParams(params_from_file=cfg.configfile) - if opts.data['fnames'] is None: - opts.change_params({"data": {"fnames": fnames}}) if cfg.input is not None: # CLI arg can override all other settings for fnames, although other data-centric commands still must match source data opts.change_params({'data': {'fnames': cfg.input}}) diff --git a/demos/general/demo_pipeline_NWB.py b/demos/general/demo_pipeline_NWB.py old mode 100644 new mode 100755 index efd7ccc93..cd30d1e59 --- a/demos/general/demo_pipeline_NWB.py +++ b/demos/general/demo_pipeline_NWB.py @@ -74,7 +74,7 @@ def main(): session_id='Session 1', var_name_hdf5='TwoPhotonSeries') fnames = [convert_target] - opts.change_params({'data': {'fnames': fnames, 'var_names_hdf5': 'TwoPhotonSeries'}}) + opts.change_params({'data': {'fnames': fnames, 'var_name_hdf5': 'TwoPhotonSeries'}}) elif cfg.input[0].endswith('.nwb'): if os.path.isfile(cfg.input[0]): # We were handed at least one nwb file and it exists either right here or as an absolute path @@ -100,7 +100,7 @@ def main(): raise Exception("If you're providing your own data to this demo, you must provide it in nwb format or pre-convert it yourself") # estimates save path can be same or different from raw data path - save_path = os.path.splitext(fnames[0])[0] + '_CNMF_estimates.nwb' # TODO: Make this a parameter? + save_path = os.path.splitext(fnames[0])[0] + '_CNMF_estimates.nwb' # We're done with all the file input parts @@ -136,7 +136,10 @@ def main(): moviehandle.play(fr=60, q_max=99.5, magnification=2) # press q to exit # MEMORY MAPPING - border_to_0 = 0 if mc.border_nan == 'copy' else mc.border_to_0 + if mc.border_nan == 'copy': + border_to_0 = 0 + else: + border_to_0 = mc.border_to_0 # you can include the boundaries of the FOV if you used the 'copy' option # during motion correction, although be careful about the components near # the boundaries diff --git a/demos/general/demo_pipeline_voltage_imaging.py b/demos/general/demo_pipeline_voltage_imaging.py old mode 100644 new mode 100755 From d04b0121f738529157fa853d7483577c9fc02c4c Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 13 Mar 2024 14:49:28 -0400 Subject: [PATCH 34/86] demos_refactor: params files should not try to set quality.decay_time (nothing reads it, not a valid parameter) --- demos/general/params_demo_pipeline.json | 1 - demos/general/params_demo_pipeline_NWB.json | 1 - 2 files changed, 2 deletions(-) diff --git a/demos/general/params_demo_pipeline.json b/demos/general/params_demo_pipeline.json index 152faad25..567f1d255 100644 --- a/demos/general/params_demo_pipeline.json +++ b/demos/general/params_demo_pipeline.json @@ -36,7 +36,6 @@ "merge_thr": 0.85 }, "quality": { - "decay_time": 0.4, "min_SNR": 2, "rval_thr": 0.85, "use_cnn": true, diff --git a/demos/general/params_demo_pipeline_NWB.json b/demos/general/params_demo_pipeline_NWB.json index 768d4ff5d..6cca1d199 100644 --- a/demos/general/params_demo_pipeline_NWB.json +++ b/demos/general/params_demo_pipeline_NWB.json @@ -36,7 +36,6 @@ "merge_thr": 0.85 }, "quality": { - "decay_time": 0.4, "min_SNR": 2, "rval_thr": 0.85, "use_cnn": true, From 29dcdcfc233d4ccdf3dccf01f748006f9b82b1e0 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 14 Mar 2024 10:27:17 -0400 Subject: [PATCH 35/86] CLI demos rework: fix demo_onacid_mesoscope's visualisation defaults, match old vals --- demos/general/demo_OnACID_mesoscope.py | 25 +++++++++++-------- .../general/params_demo_OnACID_mesoscope.json | 2 +- 2 files changed, 15 insertions(+), 12 deletions(-) diff --git a/demos/general/demo_OnACID_mesoscope.py b/demos/general/demo_OnACID_mesoscope.py index fc9894809..79bd9d585 100755 --- a/demos/general/demo_OnACID_mesoscope.py +++ b/demos/general/demo_OnACID_mesoscope.py @@ -16,9 +16,9 @@ import argparse import glob +import logging import numpy as np import os -import logging import matplotlib.pyplot as plt try: @@ -37,28 +37,31 @@ def main(): if cfg.logfile: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING, + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO, filename=cfg.logfile) # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR else: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING) + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO) opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) - opts.change_params({'online': {'show_movie': not cfg.no_play}}) # Override from CLI - if cfg.input is not None: # CLI arg can override all other settings for fnames, although other data-centric commands still must match source data + if cfg.input is not None and len(cfg.input) != 0: # CLI arg can override all other settings for fnames, although other data-centric commands still must match source data opts.change_params({'data': {'fnames': cfg.input}}) if not opts.data['fnames']: # Set neither by CLI arg nor through JSON, so use default data - fnames = [download_demo('Tolias_mesoscope_1.hdf5'), - download_demo('Tolias_mesoscope_2.hdf5'), - download_demo('Tolias_mesoscope_3.hdf5')] + fld_name = 'Mesoscope' + print("Downloading demos...") + fnames = [download_demo('Tolias_mesoscope_1.hdf5', fld_name), + download_demo('Tolias_mesoscope_2.hdf5', fld_name), + download_demo('Tolias_mesoscope_3.hdf5', fld_name)] opts.change_params({'data': {'fnames': fnames}}) + opts.change_params({'online': {'show_movie': cfg.show_movie}}) # Override from CLI + print(f"Params: {opts.to_json()}") # fit online cnm = cnmf.online_cnmf.OnACID(params=opts) cnm.fit_online() @@ -119,7 +122,7 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Full OnACID Caiman demo") parser.add_argument("--configfile", default=os.path.join(caiman_datadir(), 'demos', 'general', 'params_demo_OnACID_mesoscope.json'), help="JSON Configfile for Caiman parameters") - parser.add_argument("--no_play", action="store_true", help="Do not display results") + parser.add_argument("--show_movie", action="store_true", help="Display results as movie") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") diff --git a/demos/general/params_demo_OnACID_mesoscope.json b/demos/general/params_demo_OnACID_mesoscope.json index f423dbc45..623501a18 100644 --- a/demos/general/params_demo_OnACID_mesoscope.json +++ b/demos/general/params_demo_OnACID_mesoscope.json @@ -4,7 +4,7 @@ "decay_time": 0.5 }, "init": { - "gSig": [4, 4], + "gSig": [3, 3], "nb": 2, "K": 2 }, From 2f6291e7c9690fde42b9c91fc0320d955dfc28c8 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 16 Apr 2024 11:25:41 -0400 Subject: [PATCH 36/86] demo_behaviour:rebasing --- demos/general/demo_OnACID.py | 8 +- demos/general/demo_behavior.py | 117 ++++++++++++++++++ demos/general/demo_caiman_basic.py | 8 +- demos/general/demo_pipeline.py | 8 +- demos/general/demo_pipeline_NWB.py | 8 +- demos/general/demo_pipeline_cnmfE.py | 8 +- .../general/demo_pipeline_voltage_imaging.py | 8 +- 7 files changed, 141 insertions(+), 24 deletions(-) create mode 100755 demos/general/demo_behavior.py diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index 741b98f03..2232fefa5 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -28,14 +28,14 @@ def main(): if cfg.logfile: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING, + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO, filename=cfg.logfile) # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR else: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING) + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO) opts = cnmf.params.CNMFParams(params_from_file=cfg.configfile) diff --git a/demos/general/demo_behavior.py b/demos/general/demo_behavior.py new file mode 100755 index 000000000..9547d4e39 --- /dev/null +++ b/demos/general/demo_behavior.py @@ -0,0 +1,117 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- + +""" +Created on Sun Aug 6 11:43:39 2017 + +@author: agiovann +""" + +import cv2 +from IPython import get_ipython +import logging +import numpy as np +import matplotlib.pyplot as plt + +try: + cv2.setNumThreads(0) +except: + pass + +try: + if __IPYTHON__: + print("Detected iPython") + ipython = get_ipython() + ipython.run_line_magic('load_ext', 'autoreload') + ipython.run_line_magic('autoreload', '2') + ipython.run_line_magic('matplotlib', 'qt') +except NameError: + pass + +import caiman as cm +from caiman.behavior import behavior +from caiman.utils.utils import download_demo + +#%% +# Set up the logger; change this if you like. +# You can log to a file using the filename parameter, or make the output more or less +# verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + +logging.basicConfig(format= + "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", + # filename="/tmp/caiman.log", + level=logging.WARNING) + +#%% +def main(): + pass # For compatibility between running under IDE and the CLI + + #%% + plt.ion() + + fname = [u'demo_behavior.h5'] + if fname[0] in ['demo_behavior.h5']: + # TODO: todocument + fname = [download_demo(fname[0])] + # TODO: todocument + m = cm._load_behavior(fname[0]) + + #%% load, rotate and eliminate useless pixels + m = m.transpose([0, 2, 1]) + m = m[:, 150:, :] + + #%% visualize movie + m.play() + + #%% select interesting portion of the FOV (draw a polygon on the figure that pops up, when done press enter) + # TODO: Put the message below into the image + print("Please draw a polygon delimiting the ROI on the image that will be displayed after the image; press enter when done") + mask = np.array(behavior.select_roi(np.median(m[::100], 0), 1)[0], np.float32) + + #%% + n_components = 4 # number of movement looked for + resize_fact = 0.5 # for computational efficiency movies are downsampled + # number of standard deviations above mean for the magnitude that are considered enough to measure the angle in polar coordinates + num_std_mag_for_angle = .6 + only_magnitude = False # if onlu interested in factorizing over the magnitude + method_factorization = 'dict_learn' # could also use nmf + # number of iterations for the dictionary learning algorithm (Marial et al, 2010) + max_iter_DL = -30 + + spatial_filter_, time_trace_, of_or = cm.behavior.behavior.extract_motor_components_OF(m, n_components, mask=mask, + resize_fact=resize_fact, only_magnitude=only_magnitude, verbose=True, method_factorization='dict_learn', max_iter_DL=max_iter_DL) + + #%% + mags, dircts, dircts_thresh, spatial_masks_thrs = cm.behavior.behavior.extract_magnitude_and_angle_from_OF( + spatial_filter_, time_trace_, of_or, num_std_mag_for_angle=num_std_mag_for_angle, sav_filter_size=3, only_magnitude=only_magnitude) + + #%% + idd = 0 + axlin = plt.subplot(n_components, 2, 2) + for mag, dirct, spatial_filter in zip(mags, dircts_thresh, spatial_filter_): + plt.subplot(n_components, 2, 1 + idd * 2) + min_x, min_y = np.min(np.where(mask), 1) + + spfl = spatial_filter + spfl = cm.movie(spfl[None, :, :]).resize( + 1 / resize_fact, 1 / resize_fact, 1).squeeze() + max_x, max_y = np.add((min_x, min_y), np.shape(spfl)) + + mask[min_x:max_x, min_y:max_y] = spfl + mask[mask < np.nanpercentile(spfl, 70)] = np.nan + plt.imshow(m[0], cmap='gray') + plt.imshow(mask, alpha=.5) + plt.axis('off') + + axelin = plt.subplot(n_components, 2, 2 + idd * 2, sharex=axlin) + plt.plot(mag / 10, 'k') + dirct[mag < 0.5 * np.std(mag)] = np.nan + plt.plot(dirct, 'r-', linewidth=2) + + idd += 1 + +#%% +# This is to mask the differences between running this demo in an IDE +# versus from the CLI +if __name__ == "__main__": + main() \ No newline at end of file diff --git a/demos/general/demo_caiman_basic.py b/demos/general/demo_caiman_basic.py index dc582b8d4..255e5fb00 100755 --- a/demos/general/demo_caiman_basic.py +++ b/demos/general/demo_caiman_basic.py @@ -38,14 +38,14 @@ def main(): if cfg.logfile: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING, + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO, filename=cfg.logfile) # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR else: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING) + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO) opts = params.CNMFParams(params_from_file=cfg.configfile) diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 1b8119178..9a8d45c50 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -40,14 +40,14 @@ def main(): if cfg.logfile: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING, + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO, filename=cfg.logfile) # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR else: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING) + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO) # First set up some parameters for data and motion correction opts = params.CNMFParams(params_from_file=cfg.configfile) diff --git a/demos/general/demo_pipeline_NWB.py b/demos/general/demo_pipeline_NWB.py index cd30d1e59..22e66f139 100755 --- a/demos/general/demo_pipeline_NWB.py +++ b/demos/general/demo_pipeline_NWB.py @@ -39,14 +39,14 @@ def main(): if cfg.logfile: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING, + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO, filename=cfg.logfile) # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR else: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING) + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO) opts = params.CNMFParams(params_from_file=cfg.configfile) if cfg.input is not None: diff --git a/demos/general/demo_pipeline_cnmfE.py b/demos/general/demo_pipeline_cnmfE.py index 4b10d7d86..e89997414 100755 --- a/demos/general/demo_pipeline_cnmfE.py +++ b/demos/general/demo_pipeline_cnmfE.py @@ -36,14 +36,14 @@ def main(): if cfg.logfile: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING, + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO, filename=cfg.logfile) # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR else: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING) + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO) if cfg.input is None: # If no input is specified, use sample data, downloading if necessary diff --git a/demos/general/demo_pipeline_voltage_imaging.py b/demos/general/demo_pipeline_voltage_imaging.py index ebca473a5..ad1d210bc 100755 --- a/demos/general/demo_pipeline_voltage_imaging.py +++ b/demos/general/demo_pipeline_voltage_imaging.py @@ -37,14 +37,14 @@ def main(): if cfg.logfile: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING, + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO, filename=cfg.logfile) # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR else: logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s][%(process)d] %(message)s", - level=logging.WARNING) + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO) if cfg.input is None: # If no input is specified, use sample data, downloading if necessary From a6bfde276011844d7326badbbc694dde6b0659d5 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 14 Mar 2024 16:19:48 -0400 Subject: [PATCH 37/86] demo refactor: demo_pipeline_voltage_imaging: make method a commandline argument --- demos/general/demo_pipeline_voltage_imaging.py | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/demos/general/demo_pipeline_voltage_imaging.py b/demos/general/demo_pipeline_voltage_imaging.py index ad1d210bc..2956ba2b0 100755 --- a/demos/general/demo_pipeline_voltage_imaging.py +++ b/demos/general/demo_pipeline_voltage_imaging.py @@ -151,23 +151,17 @@ def main(): axs[0].imshow(summary_images[0]); axs[1].imshow(summary_images[2]) axs[0].set_title('mean image'); axs[1].set_title('corr image'); - # methods for segmentation - methods_list = ['manual_annotation', # manual annotations need prepared annotated datasets in the same format as demo_voltage_imaging_ROIs.hdf5 - 'maskrcnn', # Mask R-CNN is a convolutional neural network trained for detecting neurons in summary images - 'gui_annotation'] # use VolPy GUI to correct outputs of Mask R-CNN or annotate new datasets - - method = methods_list[0] - if method == 'manual_annotation': + if cfg.method == 'manual_annotation': with h5py.File(path_ROIs, 'r') as fl: ROIs = fl['mov'][()] - elif method == 'maskrcnn': + elif cfg.method == 'maskrcnn': weights_path = download_model('mask_rcnn') ROIs = utils.mrcnn_inference(img=summary_images.transpose([1, 2, 0]), size_range=[5, 22], weights_path=weights_path, display_result=True) # size parameter decides size range of masks to be selected cm.movie(ROIs).save(fnames[:-5] + '_mrcnn_ROIs.hdf5') - elif method == 'gui_annotation': + elif cfg.method == 'gui_annotation': # run volpy_gui.py file in the caiman/source_extraction/volpy folder gui_ROIs = caiman_datadir() + '/example_movies/volpy/gui_roi.hdf5' with h5py.File(gui_ROIs, 'r') as fl: @@ -277,6 +271,7 @@ def handle_args(): parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") parser.add_argument("--pathinput", action="append", help="Path input file(s) to work on, provide multiple times for more files") parser.add_argument("--logfile", help="If specified, log to the named file") + parser.add_argument("--method", default="manual_annotation", choices=["manual_annotation", "maskrcnn", "gui_annotation"], help="Segmentation method") return parser.parse_args() ######## From c27f1a0e2fbecaf91986540d862048f69ffbb1e3 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 15 Mar 2024 14:24:23 -0400 Subject: [PATCH 38/86] demos_refactor: volpy demo import tweaks --- .../general/demo_pipeline_voltage_imaging.py | 25 ++++++++++--------- 1 file changed, 13 insertions(+), 12 deletions(-) diff --git a/demos/general/demo_pipeline_voltage_imaging.py b/demos/general/demo_pipeline_voltage_imaging.py index 2956ba2b0..20ad228e1 100755 --- a/demos/general/demo_pipeline_voltage_imaging.py +++ b/demos/general/demo_pipeline_voltage_imaging.py @@ -1,4 +1,5 @@ #!/usr/bin/env python + """ Demo pipeline for processing voltage imaging data. The processing pipeline includes motion correction, memory mapping, segmentation, denoising and source @@ -22,7 +23,7 @@ except: pass -import caiman as cm +import caiman from caiman.motion_correction import MotionCorrect from caiman.paths import caiman_datadir from caiman.source_extraction.volpy import utils @@ -86,13 +87,13 @@ def main(): # playing the movie using opencv. It requires loading the movie in memory. # To close the movie press q if not cfg.no_play: - m_orig = cm.load(fnames) + m_orig = caiman.load(fnames) ds_ratio = 0.2 moviehandle = m_orig.resize(1, 1, ds_ratio) moviehandle.play(q_max=99.5, fr=40, magnification=4) # start a cluster for parallel processing - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) + c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # %%% MOTION CORRECTION # first we create a motion correction object with the specified parameters @@ -109,10 +110,10 @@ def main(): # compare with original movie if not cfg.no_play: - m_orig = cm.load(fnames) - m_rig = cm.load(mc.mmap_file) + m_orig = caiman.load(fnames) + m_rig = caiman.load(mc.mmap_file) ds_ratio = 0.2 - moviehandle = cm.concatenate([m_orig.resize(1, 1, ds_ratio), + moviehandle = caiman.concatenate([m_orig.resize(1, 1, ds_ratio), m_rig.resize(1, 1, ds_ratio)], axis=2) moviehandle.play(fr=40, q_max=99.5, magnification=4) # press q to exit @@ -125,7 +126,7 @@ def main(): # the boundaries # memory map the file in order 'C' - fname_new = cm.save_memmap_join(mc.mmap_file, base_name='memmap_' + os.path.splitext(os.path.split(fnames)[-1])[0], + fname_new = caiman.save_memmap_join(mc.mmap_file, base_name='memmap_' + os.path.splitext(os.path.split(fnames)[-1])[0], add_to_mov=border_to_0, dview=dview) # exclude border else: mmap_list = [file for file in os.listdir(file_dir) if @@ -146,7 +147,7 @@ def main(): img_corr = (Cn-np.mean(Cn))/np.std(Cn) summary_images = np.stack([img, img, img_corr], axis=0).astype(np.float32) # save summary images which are used in the VolPy GUI - cm.movie(summary_images).save(fnames[:-5] + '_summary_images.tif') + caiman.movie(summary_images).save(fnames[:-5] + '_summary_images.tif') fig, axs = plt.subplots(1, 2) axs[0].imshow(summary_images[0]); axs[1].imshow(summary_images[2]) axs[0].set_title('mean image'); axs[1].set_title('corr image'); @@ -159,7 +160,7 @@ def main(): weights_path = download_model('mask_rcnn') ROIs = utils.mrcnn_inference(img=summary_images.transpose([1, 2, 0]), size_range=[5, 22], weights_path=weights_path, display_result=True) # size parameter decides size range of masks to be selected - cm.movie(ROIs).save(fnames[:-5] + '_mrcnn_ROIs.hdf5') + caiman.movie(ROIs).save(fnames[:-5] + '_mrcnn_ROIs.hdf5') elif cfg.method == 'gui_annotation': # run volpy_gui.py file in the caiman/source_extraction/volpy folder @@ -172,8 +173,8 @@ def main(): axs[0].set_title('mean image'); axs[1].set_title('masks') # restart cluster to clean up memory - cm.stop_server(dview=dview) - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) + caiman.stop_server(dview=dview) + c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # parameters for trace denoising and spike extraction @@ -255,7 +256,7 @@ def main(): reuse_weights.append(w) # Stop the cluster and clean up log files - cm.stop_server(dview=dview) + caiman.stop_server(dview=dview) if not cfg.keep_logs: log_files = glob.glob('*_LOG_*') From ad2533a003d808d12281b775352e131b0418b41f Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 15 Mar 2024 14:47:40 -0400 Subject: [PATCH 39/86] CNMF: Fix some issues from recent CNMFParams refactor --- demos/general/demo_pipeline_cnmfE.py | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/demos/general/demo_pipeline_cnmfE.py b/demos/general/demo_pipeline_cnmfE.py index e89997414..7e44220c4 100755 --- a/demos/general/demo_pipeline_cnmfE.py +++ b/demos/general/demo_pipeline_cnmfE.py @@ -22,7 +22,7 @@ import numpy as np -import caiman as cm +import caiman from caiman.motion_correction import MotionCorrect from caiman.source_extraction import cnmf from caiman.source_extraction.cnmf import params as params @@ -83,7 +83,7 @@ def main(): opts = params.CNMFParams(params_dict=params_dict) - c, dview, n_processes = cm.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) + c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Motion Correction # The pw_rigid flag set above, determines where to use rigid or pw-rigid # motion correction @@ -104,14 +104,14 @@ def main(): plt.ylabel('pixels') bord_px = 0 if border_nan == 'copy' else bord_px - fname_new = cm.save_memmap(fname_mc, base_name='memmap_', order='C', + fname_new = caiman.save_memmap(fname_mc, base_name='memmap_', order='C', border_to_0=bord_px) else: # if no motion correction just memory map the file - fname_new = cm.save_memmap(filename_reorder, base_name='memmap_', + fname_new = caiman.save_memmap(filename_reorder, base_name='memmap_', order='C', border_to_0=0, dview=dview) # load memory mappable file - Yr, dims, T = cm.load_memmap(fname_new) + Yr, dims, T = caiman.load_memmap(fname_new) images = Yr.T.reshape((T,) + dims, order='F') # Parameters for source extraction and deconvolution (CNMF-E algorithm) @@ -171,7 +171,7 @@ def main(): # compute some summary images (correlation and peak to noise) # change swap dim if output looks weird, it is a problem with tiffile - cn_filter, pnr = cm.summary_images.correlation_pnr(images[::1], gSig=gSig[0], swap_dim=False) + cn_filter, pnr = caiman.summary_images.correlation_pnr(images[::1], gSig=gSig[0], swap_dim=False) # if your images file is too long this computation will take unnecessarily # long time and consume a lot of memory. Consider changing images[::1] to # images[::5] or something similar to compute on a subset of the data @@ -218,7 +218,7 @@ def main(): include_bck=False, gain_res=4, bpx=bord_px) # Stop the cluster and clean up log files - cm.stop_server(dview=dview) + caiman.stop_server(dview=dview) if not cfg.keep_logs: log_files = glob.glob('*_LOG_*') From 896389a861ee9e90c79422aea2a489c9c6899788 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 12 Apr 2024 15:06:08 -0400 Subject: [PATCH 40/86] demo_pipeline_cnmfE.py: ready for testing --- demos/general/demo_pipeline_cnmfE.py | 133 +++--------------- demos/general/params_demo_pipeline_cnmfE.json | 56 ++++++++ 2 files changed, 78 insertions(+), 111 deletions(-) create mode 100644 demos/general/params_demo_pipeline_cnmfE.json diff --git a/demos/general/demo_pipeline_cnmfE.py b/demos/general/demo_pipeline_cnmfE.py index 7e44220c4..871d48c41 100755 --- a/demos/general/demo_pipeline_cnmfE.py +++ b/demos/general/demo_pipeline_cnmfE.py @@ -9,10 +9,7 @@ CNMF-E algorithm for source extraction (as opposed to plain CNMF). Check the companion paper for more details. -You can also run a large part of the pipeline with a single method -(cnmf.fit_file) See inside for details. - -Demo is also available as a jupyter notebook (see demo_pipeline_cnmfE.ipynb) +See also the jupyter notebook demo_pipeline_cnmfE.ipynb """ import argparse @@ -45,54 +42,28 @@ def main(): "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", level=logging.INFO) - if cfg.input is None: + opts = params.CNMFParams(params_from_file=cfg.configfile) + + if cfg.input is not None: # If no input is specified, use sample data, downloading if necessary + opts.change_params({"data": {"fnames": cfg.input}}) + if not opts.data['fnames']: # Set neither by CLI arg nor through JSON, so use default data fnames = [download_demo('data_endoscope.tif')] - else: - fnames = cfg.input - filename_reorder = fnames # XXX What is this? + opts.change_params({"data": {"fnames": fnames}}) # First set up some parameters for data and motion correction # dataset dependent parameters - fr = 10 # movie frame rate - decay_time = 0.4 # length of a typical transient in seconds - - # motion correction parameters - motion_correct = True # flag for motion correction - pw_rigid = False # flag for pw-rigid motion correction - - gSig_filt = (3, 3) # size of filter, in general gSig (see below), - # change this one if algorithm does not work - max_shifts = (5, 5) # maximum allowed rigid shift - strides = (48, 48) # start a new patch for pw-rigid motion correction every x pixels - overlaps = (24, 24) # overlap between patches (size of patch strides+overlaps) - # maximum deviation allowed for patch with respect to rigid shifts - max_deviation_rigid = 3 - border_nan = 'copy' - - params_dict = {'fnames': fnames, - 'fr': fr, - 'decay_time': decay_time, - 'pw_rigid': pw_rigid, - 'max_shifts': max_shifts, - 'gSig_filt': gSig_filt, - 'strides': strides, - 'overlaps': overlaps, - 'max_deviation_rigid': max_deviation_rigid, - 'border_nan': border_nan} - - opts = params.CNMFParams(params_dict=params_dict) c, dview, n_processes = caiman.cluster.setup_cluster(backend=cfg.cluster_backend, n_processes=cfg.cluster_nproc) # Motion Correction - # The pw_rigid flag set above, determines where to use rigid or pw-rigid + # The motion:pw_rigid parameter determines where to use rigid or pw-rigid # motion correction - if motion_correct: + if not cfg.no_motion_correct: # do motion correction rigid - mc = MotionCorrect(fnames, dview=dview, **opts.get_group('motion')) + mc = MotionCorrect(opts.data['fnames'], dview=dview, **opts.get_group('motion')) mc.motion_correct(save_movie=True) - fname_mc = mc.fname_tot_els if pw_rigid else mc.fname_tot_rig - if pw_rigid: + fname_mc = mc.fname_tot_els if opts.motion['pw_rigid'] else mc.fname_tot_rig + if opts.motion['pw_rigid']: bord_px = np.ceil(np.maximum(np.max(np.abs(mc.x_shifts_els)), np.max(np.abs(mc.y_shifts_els)))).astype(int) else: @@ -107,7 +78,7 @@ def main(): fname_new = caiman.save_memmap(fname_mc, base_name='memmap_', order='C', border_to_0=bord_px) else: # if no motion correction just memory map the file - fname_new = caiman.save_memmap(filename_reorder, base_name='memmap_', + fname_new = caiman.save_memmap(opts.data['fnames'], base_name='memmap_', order='C', border_to_0=0, dview=dview) # load memory mappable file @@ -115,95 +86,33 @@ def main(): images = Yr.T.reshape((T,) + dims, order='F') # Parameters for source extraction and deconvolution (CNMF-E algorithm) - p = 1 # order of the autoregressive system - K = None # upper bound on number of components per patch, in general None for 1p data - gSig = (3, 3) # gaussian width of a 2D gaussian kernel, which approximates a neuron - gSiz = (13, 13) # average diameter of a neuron, in general 4*gSig+1 Ain = None # possibility to seed with predetermined binary masks - merge_thr = .7 # merging threshold, max correlation allowed - rf = 40 # half-size of the patches in pixels. e.g., if rf=40, patches are 80x80 - stride_cnmf = 20 # amount of overlap between the patches in pixels - # (keep it at least large as gSiz, i.e 4 times the neuron size gSig) - tsub = 2 # downsampling factor in time for initialization, - # increase if you have memory problems - ssub = 1 # downsampling factor in space for initialization, - # increase if you have memory problems - # you can pass them here as boolean vectors - low_rank_background = None # None leaves background of each patch intact, - # True performs global low-rank approximation if gnb>0 - gnb = 0 # number of background components (rank) if positive, - # else exact ring model with following settings - # gnb= 0: Return background as b and W - # gnb=-1: Return full rank background B - # gnb<-1: Don't return background - nb_patch = 0 # number of background components (rank) per patch if gnb>0, - # else it is set automatically - min_corr = .8 # min peak value from correlation image - min_pnr = 10 # min peak to noise ration from PNR image - ssub_B = 2 # additional downsampling factor in space for background - ring_size_factor = 1.4 # radius of ring is gSiz*ring_size_factor - - opts.change_params(params_dict={'dims': dims, - 'method_init': 'corr_pnr', # use this for 1 photon - 'K': K, - 'gSig': gSig, - 'gSiz': gSiz, - 'merge_thr': merge_thr, - 'p': p, - 'tsub': tsub, - 'ssub': ssub, - 'rf': rf, - 'stride': stride_cnmf, - 'only_init': True, # set it to True to run CNMF-E - 'nb': gnb, - 'nb_patch': nb_patch, - 'method_deconvolution': 'oasis', # could use 'cvxpy' alternatively - 'low_rank_background': low_rank_background, - 'update_background_components': True, # sometimes setting to False improve the results - 'min_corr': min_corr, - 'min_pnr': min_pnr, - 'normalize_init': False, # just leave as is - 'center_psf': True, # leave as is for 1 photon - 'ssub_B': ssub_B, - 'ring_size_factor': ring_size_factor, - 'del_duplicates': True, # whether to remove duplicates from initialization + + opts.change_params(params_dict={'dims': dims, # we rework the source files 'border_pix': bord_px}) # number of pixels to not consider in the borders) # compute some summary images (correlation and peak to noise) # change swap dim if output looks weird, it is a problem with tiffile - cn_filter, pnr = caiman.summary_images.correlation_pnr(images[::1], gSig=gSig[0], swap_dim=False) + cn_filter, pnr = caiman.summary_images.correlation_pnr(images[::1], gSig=opts.init['gSig'][0], swap_dim=False) # if your images file is too long this computation will take unnecessarily # long time and consume a lot of memory. Consider changing images[::1] to # images[::5] or something similar to compute on a subset of the data # inspect the summary images and set the parameters inspect_correlation_pnr(cn_filter, pnr) - # print parameters set above, modify them if necessary based on summary images - print(f"Minimum correlation: {min_corr}") # min correlation of peak (from correlation image) - print(f"Minimum peak to noise ratio: {min_pnr}") # min peak to noise ratio + print(f"Minimum correlation: {opts.init['min_corr']}") # min correlation of peak (from correlation image) + print(f"Minimum peak to noise ratio: {opts.init['min_pnr']}") # min peak to noise ratio # Run CMNF in patches cnm = cnmf.CNMF(n_processes=n_processes, dview=dview, Ain=Ain, params=opts) cnm.fit(images) - # ALTERNATE WAY TO RUN THE PIPELINE AT ONCE (optional -- commented out) - # you can also perform the motion correction plus cnmf fitting steps - # simultaneously after defining your parameters object using - # cnm1 = cnmf.CNMF(n_processes, params=opts, dview=dview) - # cnm1.fit_file(motion_correct=True) - # Quality Control: DISCARD LOW QUALITY COMPONENTS - min_SNR = 2.5 # adaptive way to set threshold on the transient size - r_values_min = 0.85 # threshold on space consistency (if you lower more components - # will be accepted, potentially with worst quality) - cnm.params.set('quality', {'min_SNR': min_SNR, - 'rval_thr': r_values_min, - 'use_cnn': False}) cnm.estimates.evaluate_components(images, cnm.params, dview=dview) print(' ***** ') - print('Number of total components: ', len(cnm.estimates.C)) - print('Number of accepted components: ', len(cnm.estimates.idx_components)) + print(f"Number of total components: {len(cnm.estimates.C)}") + print(f"Number of accepted components: {len(cnm.estimates.idx_components)}") # Play result movies if not cfg.no_play: @@ -227,6 +136,8 @@ def main(): def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate CNMFE Pipeline") + parser.add_argument("--configfile", default=os.path.join(caiman.paths.caiman_datadir(), 'demos', 'general', 'params_demo_pipeline_cnmfE.json'), help="JSON Configfile for Caiman parameters") + parser.add_argument("--no_motion_correct", action='store_true', help="Set to disable motion correction") parser.add_argument("--keep_logs", action="store_true", help="Keep temporary logfiles") parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") diff --git a/demos/general/params_demo_pipeline_cnmfE.json b/demos/general/params_demo_pipeline_cnmfE.json new file mode 100644 index 000000000..8f68a2cea --- /dev/null +++ b/demos/general/params_demo_pipeline_cnmfE.json @@ -0,0 +1,56 @@ +{ + "data": { + "fr": 10, + "decay_time": 0.4 + }, + "init": { + "method_init": "corr_pnr", + "K": null, + "gSig": [3, 3], + "gSiz": [13, 13], + "ssub": 1, + "tsub": 2, + "nb": 0, + "min_corr": 0.8, + "min_pnr": 10, + "normalize_init": false, + "center_psf": true, + "ssub_B": 2, + "ring_size_factor": 1.4 + }, + "motion": { + "pw_rigid": false, + "max_shifts": [5, 5], + "gSig_filt": [3, 3], + "strides": [48, 48], + "overlaps": [24, 24], + "max_deviation_rigid": 3, + "border_nan": "copy" + }, + "preprocess": { + "p": 1 + }, + "temporal": { + "p": 1, + "method_deconvolution": "oasis" + }, + "spatial": { + "update_background_components": true + }, + "patch": { + "rf": 40, + "stride": 20, + "only_init": true, + "nb_patch": 0, + "low_rank_background": null, + "del_duplicates": true + }, + "merging": { + "merge_thr": 0.7 + }, + "quality": { + "min_SNR": 2.5, + "rval_thr": 0.85, + "use_cnn": false + } +} From 1dd6e2ae531a29a903b18feaea5510fe5d544e19 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 12 Apr 2024 15:08:48 -0400 Subject: [PATCH 41/86] remove param temporal.memory_efficient which doesn't seem to do anything --- caiman/source_extraction/cnmf/params.py | 4 ---- caiman/source_extraction/cnmf/temporal.py | 3 --- 2 files changed, 7 deletions(-) diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 595c8313d..99859d8f1 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -361,9 +361,6 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), optimize_g: bool, default: False flag for optimizing time constants - memory_efficient: - (undocumented) - method_deconvolution: 'oasis'|'cvxpy'|'oasis', default: 'oasis' method for solving the constrained deconvolution problem ('oasis','cvx' or 'cvxpy') if method cvxpy, primary and secondary (if problem unfeasible for approx solution) @@ -793,7 +790,6 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), # number of autocovariance lags to be considered for time constant estimation 'lags': 5, 'optimize_g': False, # flag for optimizing time constants - 'memory_efficient': False, # method for solving the constrained deconvolution problem ('oasis','cvx' or 'cvxpy') # if method cvxpy, primary and secondary (if problem unfeasible for approx # solution) solvers to be used with cvxpy, can be 'ECOS','SCS' or 'CVXOPT' diff --git a/caiman/source_extraction/cnmf/temporal.py b/caiman/source_extraction/cnmf/temporal.py index 549a4d01b..b94b24c4a 100644 --- a/caiman/source_extraction/cnmf/temporal.py +++ b/caiman/source_extraction/cnmf/temporal.py @@ -114,9 +114,6 @@ def update_temporal_components(Y, A, b, Cin, fin, bl=None, c1=None, g=None, sn=N You should start the cluster (install ipyparallel and then type ipcluster -n 6, where 6 is the number of processes). - memory_efficient: Bool - whether or not to optimize for memory usage (longer running times). necessary with very large datasets - kwargs: dict all parameters passed to constrained_foopsi except bl,c1,g,sn (see documentation). Some useful parameters are From 28606d44a284f112b9c5a872758bd8bb2c2387ce Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 12 Apr 2024 15:10:37 -0400 Subject: [PATCH 42/86] Params: Remove doc for temporal.memory_efficient which was documented but not present in code --- caiman/source_extraction/cnmf/temporal.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/caiman/source_extraction/cnmf/temporal.py b/caiman/source_extraction/cnmf/temporal.py index b94b24c4a..b5bdde791 100644 --- a/caiman/source_extraction/cnmf/temporal.py +++ b/caiman/source_extraction/cnmf/temporal.py @@ -284,9 +284,6 @@ def update_iteration(parrllcomp, len_parrllcomp, nb, C, S, bl, nr, You should start the cluster (install ipyparallel and then type ipcluster -n 6, where 6 is the number of processes). - memory_efficient: Bool - whether or not to optimize for memory usage (longer running times). necessary with very large datasets - **kwargs: dict all parameters passed to constrained_foopsi except bl,c1,g,sn (see documentation). Some useful parameters are From d607a7c0527496f919c48e37528fcc34277c82f3 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 12 Apr 2024 16:08:36 -0400 Subject: [PATCH 43/86] CNMFParams: Remove documented but never-implemented data.mmap_C and data.mmap_F (verified they never were filled in) --- caiman/source_extraction/cnmf/params.py | 10 +--------- 1 file changed, 1 insertion(+), 9 deletions(-) diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 99859d8f1..e178d6338 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -97,12 +97,6 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), last_commit: str hash of last commit in the caiman repo. Pleaes do not override this. - mmap_F: list[str] - paths to F-order memory mapped files after motion correction - - mmap_C: str - path to C-order memory mapped file after motion correction - CNMFParams.patch (these control how the data is divided into patches): border_pix: int, default: 0 Number of pixels to exclude around each border. @@ -674,9 +668,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), 'dxy': dxy, 'var_name_hdf5': var_name_hdf5, 'caiman_version': pkg_resources.get_distribution('caiman').version, - 'last_commit': None, - 'mmap_F': None, - 'mmap_C': None + 'last_commit': None } self.patch = { From 0c00f4cc925f8cdc6c5d8716479c9407dbcace60 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 16 Apr 2024 10:23:09 -0400 Subject: [PATCH 44/86] Fix bug from 2019 with #030bc6d35 --- caiman/source_extraction/cnmf/initialization.py | 2 +- caiman/source_extraction/cnmf/params.py | 8 ++++---- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/caiman/source_extraction/cnmf/initialization.py b/caiman/source_extraction/cnmf/initialization.py index 4a1528cd7..cabb3c196 100644 --- a/caiman/source_extraction/cnmf/initialization.py +++ b/caiman/source_extraction/cnmf/initialization.py @@ -657,7 +657,7 @@ def graphNMF(Y_ds, nr, max_iter_snmf=500, lambda_gnmf=1, C = mdl.fit_transform(yr).T A = mdl.components_.T W = caiman.source_extraction.cnmf.utilities.fast_graph_Laplacian_patches( - [np.reshape(m, [T, d], order='F').T, [], 'heat', SC_sigma, SC_thr, + [np.reshape(m, [T, d], order='F').T, [], SC_kernel, SC_sigma, SC_thr, SC_nnn, SC_normalize, SC_use_NN]) D = scipy.sparse.spdiags(W.sum(0), 0, W.shape[0], W.shape[0]) for it in range(max_iter_snmf): diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index e178d6338..cee9090d2 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -136,7 +136,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), If list, it should be a list of two elements corresponding to the height and width of patches skip_refinement: bool, default: False - Whether to skip refinement of components (deprecated?) + Whether to skip refinement of components p_ssub: float, default: 2 Spatial downsampling factor @@ -151,7 +151,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), check_nan: bool, default: True whether to check for NaNs - compute_g': bool, default: False + compute_g: bool, default: False whether to estimate global time constant include_noise: bool, default: False @@ -185,7 +185,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), K: int, default: 30 number of components to be found (per patch or whole FOV depending on whether rf=None) - SC_kernel: {'heat', 'cos', binary'}, default: 'heat' + SC_kernel: {'heat', 'cos', 'binary'}, default: 'heat' kernel for graph affinity matrix SC_sigma: float, default: 1 @@ -705,7 +705,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), } self.init = { - 'K': k, # number of components, + 'K': k, # number of components, 'SC_kernel': 'heat', # kernel for graph affinity matrix 'SC_sigma' : 1, # std for SC kernel 'SC_thr': 0, # threshold for affinity matrix From bc95a7f8a910fdfde11d3c0d9eb76db4cfddd2c2 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 16 Apr 2024 10:32:16 -0400 Subject: [PATCH 45/86] CNMF: "is True" -> "" --- caiman/source_extraction/cnmf/initialization.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/caiman/source_extraction/cnmf/initialization.py b/caiman/source_extraction/cnmf/initialization.py index cabb3c196..172f4e57b 100644 --- a/caiman/source_extraction/cnmf/initialization.py +++ b/caiman/source_extraction/cnmf/initialization.py @@ -288,7 +288,7 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter gSig = np.asarray(gSig, dtype=float) / ssub gSiz = np.round(np.asarray(gSiz) / ssub).astype(int) - if normalize_init is True: + if normalize_init: logging.info('Variance Normalization') if img is None: img = np.mean(Y, axis=-1) @@ -403,7 +403,7 @@ def initialize_components(Y, K=30, gSig=[5, 5], gSiz=None, ssub=1, tsub=1, nIter Cin = np.empty((K, T), dtype=np.float32) center = [] - if normalize_init is True: + if normalize_init: if Ain.size > 0: Ain = Ain * np.reshape(img, (np.prod(d), -1), order='F') if sparse_b: From dbd8e50ba284b0ce9391c5445eb7f37d73e22142 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 16 Apr 2024 10:44:56 -0400 Subject: [PATCH 46/86] Remove spatial.block_size_spat This tuned memory usage when it was added in #c55b95 but later refactors removed its use --- caiman/source_extraction/cnmf/cnmf.py | 5 ++--- caiman/source_extraction/cnmf/params.py | 5 +---- caiman/source_extraction/cnmf/spatial.py | 2 +- caiman/tests/comparison_humans.py | 1 - caiman/tests/test_toydata.py | 1 - 5 files changed, 4 insertions(+), 10 deletions(-) diff --git a/caiman/source_extraction/cnmf/cnmf.py b/caiman/source_extraction/cnmf/cnmf.py index 28f8718b5..28893af91 100644 --- a/caiman/source_extraction/cnmf/cnmf.py +++ b/caiman/source_extraction/cnmf/cnmf.py @@ -71,7 +71,7 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p ssub=2, tsub=2, p_ssub=1, p_tsub=1, method_init='greedy_roi', alpha_snmf=0.5, rf=None, stride=None, memory_fact=1, gnb=1, nb_patch=1, only_init_patch=False, method_deconvolution='oasis', n_pixels_per_process=4000, block_size_temp=5000, num_blocks_per_run_temp=20, - block_size_spat=5000, num_blocks_per_run_spat=20, + num_blocks_per_run_spat=20, check_nan=True, skip_refinement=False, normalize_init=True, options_local_NMF=None, minibatch_shape=100, minibatch_suff_stat=3, update_num_comps=True, rval_thr=0.9, thresh_fitness_delta=-20, @@ -272,7 +272,7 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p gnb=gnb, normalize_init=normalize_init, options_local_NMF=options_local_NMF, ring_size_factor=ring_size_factor, rolling_length=rolling_length, rolling_sum=rolling_sum, ssub=ssub, ssub_B=ssub_B, tsub=tsub, - block_size_spat=block_size_spat, num_blocks_per_run_spat=num_blocks_per_run_spat, + num_blocks_per_run_spat=num_blocks_per_run_spat, block_size_temp=block_size_temp, num_blocks_per_run_temp=num_blocks_per_run_temp, update_background_components=update_background_components, method_deconvolution=method_deconvolution, p=p, s_min=s_min, @@ -479,7 +479,6 @@ def fit(self, images, indices=(slice(None), slice(None))): self.params.set('spatial', {'n_pixels_per_process': self.params.get('preprocess', 'n_pixels_per_process')}) logging.info('using ' + str(self.params.get('preprocess', 'n_pixels_per_process')) + ' pixels per process') - logging.info('using ' + str(self.params.get('spatial', 'block_size_spat')) + ' block_size_spat') logging.info('using ' + str(self.params.get('temporal', 'block_size_temp')) + ' block_size_temp') if self.params.get('patch', 'rf') is None: # no patches diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index cee9090d2..8e1817256 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -29,7 +29,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), min_pnr=20, gnb=1, normalize_init=True, options_local_NMF=None, ring_size_factor=1.5, rolling_length=100, rolling_sum=True, ssub=2, ssub_B=2, tsub=2, - block_size_spat=5000, num_blocks_per_run_spat=20, + num_blocks_per_run_spat=20, block_size_temp=5000, num_blocks_per_run_temp=20, update_background_components=True, method_deconvolution='oasis', p=2, s_min=None, @@ -282,8 +282,6 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), temporal downsampling factor CNMFParams.spatial (these control how the algorithms handle spatial components): - block_size_spat : int, default: 5000 - Number of pixels to process at the same time for dot product. Reduce if you face memory problems dist: float, default: 3 expansion factor of ellipse @@ -745,7 +743,6 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), } self.spatial = { - 'block_size_spat': block_size_spat, # number of pixels to parallelize residual computation ** DECREASE IF MEMORY ISSUES 'dist': 3, # expansion factor of ellipse 'expandCore': iterate_structure(generate_binary_structure(2, 1), 2).astype(int), # Flag to extract connected components (might want to turn to False for dendritic imaging) diff --git a/caiman/source_extraction/cnmf/spatial.py b/caiman/source_extraction/cnmf/spatial.py index 38451ea87..f8b1a833d 100644 --- a/caiman/source_extraction/cnmf/spatial.py +++ b/caiman/source_extraction/cnmf/spatial.py @@ -40,7 +40,7 @@ def update_spatial_components(Y, C=None, f=None, A_in=None, sn=None, dims=None, se=np.ones((3, 3), dtype=int), ss=np.ones((3, 3), dtype=int), nb=1, method_ls='lasso_lars', update_background_components=True, - low_rank_background=True, block_size_spat=1000, + low_rank_background=True, num_blocks_per_run_spat=20): """update spatial footprints and background through Basis Pursuit Denoising diff --git a/caiman/tests/comparison_humans.py b/caiman/tests/comparison_humans.py index ea12f1fd4..5b955e26b 100644 --- a/caiman/tests/comparison_humans.py +++ b/caiman/tests/comparison_humans.py @@ -347,7 +347,6 @@ 'p': global_params['p'], }, 'spatial': { - 'block_size_spat': block_size, 'nb': global_params['gnb'], 'num_blocks_per_run_spat': num_blocks_per_run, 'n_pixels_per_process': n_pixels_per_process, diff --git a/caiman/tests/test_toydata.py b/caiman/tests/test_toydata.py index f55316f83..2a0f2a8e0 100644 --- a/caiman/tests/test_toydata.py +++ b/caiman/tests/test_toydata.py @@ -44,7 +44,6 @@ def pipeline(D): gSig=[2, 2, 2][:D], p=1, n_pixels_per_process=np.prod(dims), - block_size_spat=np.prod(dims), block_size_temp=np.prod(dims)) params.spatial['thr_method'] = 'nrg' params.spatial['extract_cc'] = False From 378b7324bdfd366edad3d5545c2910fcdae4d160 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 16 Apr 2024 11:11:58 -0400 Subject: [PATCH 47/86] Remove merging.max_merge_area This once was functional, with #52a0c4b1, but became vestigial with a later refactor of cnmf.merging.merge_components() --- caiman/source_extraction/cnmf/cnmf.py | 15 +++++---------- caiman/source_extraction/cnmf/estimates.py | 5 ++--- caiman/source_extraction/cnmf/merging.py | 6 +----- caiman/source_extraction/cnmf/params.py | 7 +------ 4 files changed, 9 insertions(+), 24 deletions(-) diff --git a/caiman/source_extraction/cnmf/cnmf.py b/caiman/source_extraction/cnmf/cnmf.py index 28893af91..b58e2e773 100644 --- a/caiman/source_extraction/cnmf/cnmf.py +++ b/caiman/source_extraction/cnmf/cnmf.py @@ -86,7 +86,7 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p max_num_added=3, min_num_trial=2, thresh_CNN_noisy=0.5, fr=30, decay_time=0.4, min_SNR=2.5, ssub_B=2, init_iter=2, sniper_mode=False, use_peak_max=False, test_both=False, - expected_comps=500, max_merge_area=None, params=None): + expected_comps=500, params=None): """ Constructor of the CNMF method @@ -247,8 +247,6 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p init_iter: int, optional number of iterations for 1-photon imaging initialization - max_merge_area: int, optional - maximum area (in pixels) of merged components, used to determine whether to merge components during fitting process """ self.dview = dview @@ -284,9 +282,7 @@ def __init__(self, n_processes, k=5, gSig=[4, 4], gSiz=None, merge_thresh=0.8, p n_refit=n_refit, num_times_comp_updated=num_times_comp_updated, simultaneously=simultaneously, sniper_mode=sniper_mode, test_both=test_both, thresh_CNN_noisy=thresh_CNN_noisy, thresh_fitness_delta=thresh_fitness_delta, thresh_fitness_raw=thresh_fitness_raw, thresh_overlap=thresh_overlap, - update_num_comps=update_num_comps, use_dense=use_dense, use_peak_max=use_peak_max, alpha_snmf=alpha_snmf, - max_merge_area=max_merge_area - ) + update_num_comps=update_num_comps, use_dense=use_dense, use_peak_max=use_peak_max, alpha_snmf=alpha_snmf) else: self.params = params params.set('patch', {'n_processes': n_processes}) @@ -539,7 +535,7 @@ def fit(self, images, indices=(slice(None), slice(None))): logging.info('refinement...') if self.params.get('merging', 'do_merge'): logging.info('merging components ...') - self.merge_comps(Yr, mx=50, fast_merge=True, max_merge_area=self.params.get('merging', 'max_merge_area')) + self.merge_comps(Yr, mx=50, fast_merge=True) logging.info('Updating spatial ...') @@ -921,7 +917,7 @@ def update_spatial(self, Y, use_init=True, **kwargs): return self - def merge_comps(self, Y, mx=50, fast_merge=True, max_merge_area=None): + def merge_comps(self, Y, mx=50, fast_merge=True): """merges components """ self.estimates.A, self.estimates.C, self.estimates.nr, self.estimates.merged_ROIs, self.estimates.S, \ @@ -932,8 +928,7 @@ def merge_comps(self, Y, mx=50, fast_merge=True, max_merge_area=None): self.params.get_group('spatial'), dview=self.dview, bl=self.estimates.bl, c1=self.estimates.c1, sn=self.estimates.neurons_sn, g=self.estimates.g, thr=self.params.get('merging', 'merge_thr'), mx=mx, - fast_merge=fast_merge, merge_parallel=self.params.get('merging', 'merge_parallel'), - max_merge_area=max_merge_area) + fast_merge=fast_merge, merge_parallel=self.params.get('merging', 'merge_parallel')) return self diff --git a/caiman/source_extraction/cnmf/estimates.py b/caiman/source_extraction/cnmf/estimates.py index c5889eb6e..1959a2e05 100644 --- a/caiman/source_extraction/cnmf/estimates.py +++ b/caiman/source_extraction/cnmf/estimates.py @@ -1235,7 +1235,7 @@ def deconvolve(self, params, dview=None, dff_flag=False): self.S_dff = np.stack([results[1][i] for i in order]) def merge_components(self, Y, params, mx=50, fast_merge=True, - dview=None, max_merge_area=None): + dview=None): """merges components """ # FIXME This method shares its name with a function elsewhere in the codebase (which it wraps) @@ -1247,8 +1247,7 @@ def merge_components(self, Y, params, mx=50, fast_merge=True, params.get_group('spatial'), dview=dview, bl=self.bl, c1=self.c1, sn=self.neurons_sn, g=self.g, thr=params.get('merging', 'merge_thr'), mx=mx, - fast_merge=fast_merge, merge_parallel=params.get('merging', 'merge_parallel'), - max_merge_area=max_merge_area) + fast_merge=fast_merge, merge_parallel=params.get('merging', 'merge_parallel')) def manual_merge(self, components, params): ''' merge a given list of components. The indices diff --git a/caiman/source_extraction/cnmf/merging.py b/caiman/source_extraction/cnmf/merging.py index baabb9723..5e6452232 100644 --- a/caiman/source_extraction/cnmf/merging.py +++ b/caiman/source_extraction/cnmf/merging.py @@ -19,7 +19,7 @@ def merge_components(Y, A, b, C, R, f, S, sn_pix, temporal_params, spatial_params, dview=None, thr=0.85, fast_merge=True, mx=1000, bl=None, c1=None, sn=None, g=None, - merge_parallel=False, max_merge_area=None) -> tuple[scipy.sparse.csc_matrix, np.ndarray, int, list, np.ndarray, float, float, float, float, list, np.ndarray]: + merge_parallel=False) -> tuple[scipy.sparse.csc_matrix, np.ndarray, int, list, np.ndarray, float, float, float, float, list, np.ndarray]: """ Merging of spatially overlapping components that have highly correlated temporal activity @@ -83,10 +83,6 @@ def merge_components(Y, A, b, C, R, f, S, sn_pix, temporal_params, merge_parallel: bool perform merging in parallel - max_merge_area: int - maximum area (in pixels) of merged components, - used to determine whether to merge - Returns: A: sparse matrix matrix of merged spatial components (d x K) diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 8e1817256..5f8536cc3 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -282,7 +282,6 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), temporal downsampling factor CNMFParams.spatial (these control how the algorithms handle spatial components): - dist: float, default: 3 expansion factor of ellipse @@ -391,9 +390,6 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), merge_parallel: bool, default: False Perform merging in parallel - max_merge_area: int or None, default: None - maximum area (in pixels) of merged components, used to determine whether to merge components during fitting process - CNMFParams.quality (these control how quality of traces are evaluated): SNR_lowest: float, default: 0.5 minimum required trace SNR. Traces with SNR below this will get rejected @@ -796,8 +792,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), self.merging = { 'do_merge': do_merge, 'merge_thr': merge_thresh, - 'merge_parallel': False, - 'max_merge_area': max_merge_area + 'merge_parallel': False } self.quality = { From f8f924bbb52a04718a752ba0bb88c168fe90c193 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 16 Apr 2024 11:26:12 -0400 Subject: [PATCH 48/86] demo_behavior: more rebase nonsense --- demos/general/demo_behavior.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/demos/general/demo_behavior.py b/demos/general/demo_behavior.py index 9547d4e39..afeca70e7 100755 --- a/demos/general/demo_behavior.py +++ b/demos/general/demo_behavior.py @@ -56,9 +56,16 @@ def main(): # TODO: todocument m = cm._load_behavior(fname[0]) +<<<<<<< HEAD #%% load, rotate and eliminate useless pixels m = m.transpose([0, 2, 1]) m = m[:, 150:, :] +======= + plt.ion() + + # TODO: todocument + m = cm._load_behavior(fnames[0]) +>>>>>>> 7f972213 (demo_behavior: pull some changes from dev) #%% visualize movie m.play() From 8f296f62fed362760f790c8791695d7869f7cb3d Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 16 Apr 2024 11:26:57 -0400 Subject: [PATCH 49/86] demo_behavior: rebase nonsense --- demos/general/demo_behavior.py | 124 --------------------------------- 1 file changed, 124 deletions(-) delete mode 100755 demos/general/demo_behavior.py diff --git a/demos/general/demo_behavior.py b/demos/general/demo_behavior.py deleted file mode 100755 index afeca70e7..000000000 --- a/demos/general/demo_behavior.py +++ /dev/null @@ -1,124 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- - -""" -Created on Sun Aug 6 11:43:39 2017 - -@author: agiovann -""" - -import cv2 -from IPython import get_ipython -import logging -import numpy as np -import matplotlib.pyplot as plt - -try: - cv2.setNumThreads(0) -except: - pass - -try: - if __IPYTHON__: - print("Detected iPython") - ipython = get_ipython() - ipython.run_line_magic('load_ext', 'autoreload') - ipython.run_line_magic('autoreload', '2') - ipython.run_line_magic('matplotlib', 'qt') -except NameError: - pass - -import caiman as cm -from caiman.behavior import behavior -from caiman.utils.utils import download_demo - -#%% -# Set up the logger; change this if you like. -# You can log to a file using the filename parameter, or make the output more or less -# verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR - -logging.basicConfig(format= - "%(relativeCreated)12d [%(filename)s:%(funcName)20s():%(lineno)s] [%(process)d] %(message)s", - # filename="/tmp/caiman.log", - level=logging.WARNING) - -#%% -def main(): - pass # For compatibility between running under IDE and the CLI - - #%% - plt.ion() - - fname = [u'demo_behavior.h5'] - if fname[0] in ['demo_behavior.h5']: - # TODO: todocument - fname = [download_demo(fname[0])] - # TODO: todocument - m = cm._load_behavior(fname[0]) - -<<<<<<< HEAD - #%% load, rotate and eliminate useless pixels - m = m.transpose([0, 2, 1]) - m = m[:, 150:, :] -======= - plt.ion() - - # TODO: todocument - m = cm._load_behavior(fnames[0]) ->>>>>>> 7f972213 (demo_behavior: pull some changes from dev) - - #%% visualize movie - m.play() - - #%% select interesting portion of the FOV (draw a polygon on the figure that pops up, when done press enter) - # TODO: Put the message below into the image - print("Please draw a polygon delimiting the ROI on the image that will be displayed after the image; press enter when done") - mask = np.array(behavior.select_roi(np.median(m[::100], 0), 1)[0], np.float32) - - #%% - n_components = 4 # number of movement looked for - resize_fact = 0.5 # for computational efficiency movies are downsampled - # number of standard deviations above mean for the magnitude that are considered enough to measure the angle in polar coordinates - num_std_mag_for_angle = .6 - only_magnitude = False # if onlu interested in factorizing over the magnitude - method_factorization = 'dict_learn' # could also use nmf - # number of iterations for the dictionary learning algorithm (Marial et al, 2010) - max_iter_DL = -30 - - spatial_filter_, time_trace_, of_or = cm.behavior.behavior.extract_motor_components_OF(m, n_components, mask=mask, - resize_fact=resize_fact, only_magnitude=only_magnitude, verbose=True, method_factorization='dict_learn', max_iter_DL=max_iter_DL) - - #%% - mags, dircts, dircts_thresh, spatial_masks_thrs = cm.behavior.behavior.extract_magnitude_and_angle_from_OF( - spatial_filter_, time_trace_, of_or, num_std_mag_for_angle=num_std_mag_for_angle, sav_filter_size=3, only_magnitude=only_magnitude) - - #%% - idd = 0 - axlin = plt.subplot(n_components, 2, 2) - for mag, dirct, spatial_filter in zip(mags, dircts_thresh, spatial_filter_): - plt.subplot(n_components, 2, 1 + idd * 2) - min_x, min_y = np.min(np.where(mask), 1) - - spfl = spatial_filter - spfl = cm.movie(spfl[None, :, :]).resize( - 1 / resize_fact, 1 / resize_fact, 1).squeeze() - max_x, max_y = np.add((min_x, min_y), np.shape(spfl)) - - mask[min_x:max_x, min_y:max_y] = spfl - mask[mask < np.nanpercentile(spfl, 70)] = np.nan - plt.imshow(m[0], cmap='gray') - plt.imshow(mask, alpha=.5) - plt.axis('off') - - axelin = plt.subplot(n_components, 2, 2 + idd * 2, sharex=axlin) - plt.plot(mag / 10, 'k') - dirct[mag < 0.5 * np.std(mag)] = np.nan - plt.plot(dirct, 'r-', linewidth=2) - - idd += 1 - -#%% -# This is to mask the differences between running this demo in an IDE -# versus from the CLI -if __name__ == "__main__": - main() \ No newline at end of file From d87162f711a4dc59b60922c848f472b7b253d84b Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 16 Apr 2024 11:27:22 -0400 Subject: [PATCH 50/86] After rebase, add demo_behavior back in --- demos/general/demo_behavior.py | 111 +++++++++++++++++++++++++++++++++ 1 file changed, 111 insertions(+) create mode 100755 demos/general/demo_behavior.py diff --git a/demos/general/demo_behavior.py b/demos/general/demo_behavior.py new file mode 100755 index 000000000..bb835f61e --- /dev/null +++ b/demos/general/demo_behavior.py @@ -0,0 +1,111 @@ +#!/usr/bin/env python + +""" +Demonstrate Caiman functions relating to behavioral experiments and optical flow + +This demo requires a GUI; it does not make sense to run it noninteractively. +""" + + +import argparse +import cv2 +import logging +import numpy as np +import matplotlib.pyplot as plt + +try: + cv2.setNumThreads(0) +except: + pass + +import caiman as cm +from caiman.behavior import behavior +from caiman.utils.utils import download_demo + + +def main(): + cfg = handle_args() + + if cfg.logfile: + logging.basicConfig(format= + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO, + filename=cfg.logfile) + # You can make the output more or less verbose by setting level to logging.DEBUG, logging.INFO, logging.WARNING, or logging.ERROR + else: + logging.basicConfig(format= + "[%(filename)s:%(funcName)20s():%(lineno)s] %(message)s", + level=logging.INFO) + + if cfg.input is None: + # If no input is specified, use sample data, downloading if necessary + fnames = [download_demo('demo_behavior.h5')] + else: + fnames = cfg.input + # If you prefer to hardcode filenames, you could do something like this: + # fnames = ["/path/to/myfile1.avi", "/path/to/myfile2.avi"] + + plt.ion() + + # TODO: todocument + m = cm._load_behavior(fnames[0]) + + # load, rotate and eliminate useless pixels + m = m.transpose([0, 2, 1]) # XXX Does this really work outside this dataset? + m = m[:, 150:, :] # TODO adopt some syntax for clipping, or make this optional and tell the user to clip before running + + # visualize movie + m.play() + + # select interesting portion of the FOV (draw a polygon on the figure that pops up, when done press enter) + print("Please draw a polygon delimiting the ROI on the image that will be displayed after the image; press enter when done") + mask = np.array(behavior.select_roi(np.median(m[::100], 0), 1)[0], np.float32) + + n_components = 4 # number of movement looked for + resize_fact = 0.5 # for computational efficiency movies are downsampled + # number of standard deviations above mean for the magnitude that are considered enough to measure the angle in polar coordinates + num_std_mag_for_angle = .6 + only_magnitude = False # if onlu interested in factorizing over the magnitude + method_factorization = 'dict_learn' # could also use nmf + # number of iterations for the dictionary learning algorithm (Marial et al, 2010) + max_iter_DL = -30 + + spatial_filter_, time_trace_, of_or = cm.behavior.behavior.extract_motor_components_OF(m, n_components, mask=mask, + resize_fact=resize_fact, only_magnitude=only_magnitude, verbose=True, method_factorization='dict_learn', max_iter_DL=max_iter_DL) + + + mags, dircts, dircts_thresh, spatial_masks_thrs = cm.behavior.behavior.extract_magnitude_and_angle_from_OF( + spatial_filter_, time_trace_, of_or, num_std_mag_for_angle=num_std_mag_for_angle, sav_filter_size=3, only_magnitude=only_magnitude) + + idd = 0 + axlin = pl.subplot(n_components, 2, 2) + for mag, dirct, spatial_filter in zip(mags, dircts_thresh, spatial_filter_): + pl.subplot(n_components, 2, 1 + idd * 2) + min_x, min_y = np.min(np.where(mask), 1) + + spfl = spatial_filter + spfl = cm.movie(spfl[None, :, :]).resize( + 1 / resize_fact, 1 / resize_fact, 1).squeeze() + max_x, max_y = np.add((min_x, min_y), np.shape(spfl)) + + mask[min_x:max_x, min_y:max_y] = spfl + mask[mask < np.nanpercentile(spfl, 70)] = np.nan + pl.imshow(m[0], cmap='gray') + pl.imshow(mask, alpha=.5) + pl.axis('off') + + axelin = pl.subplot(n_components, 2, 2 + idd * 2, sharex=axlin) + pl.plot(mag / 10, 'k') + dirct[mag < 0.5 * np.std(mag)] = np.nan + pl.plot(dirct, 'r-', linewidth=2) + + idd += 1 + +def handle_args(): + parser = argparse.ArgumentParser(description="Demonstrate behavioural/optic flow functions") + parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--logfile", help="If specified, log to the named file") + return parser.parse_args() + +######## +main() \ No newline at end of file From 9c36719c31090979e33d0b06ad25f75d33647189 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 18 Apr 2024 11:53:12 -0400 Subject: [PATCH 51/86] CLI demo refactor: forgot to import os --- caiman/source_extraction/cnmf/params.py | 2 +- demos/general/demo_pipeline_cnmfE.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 5f8536cc3..bba3df291 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -804,7 +804,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), 'rval_lowest': -1, # minimum accepted space correlation 'rval_thr': rval_thr, # space correlation threshold 'use_cnn': True, # use CNN based classifier - 'use_ecc': False, # flag for eccentricity based filtering + 'use_ecc': False, # flag for eccentricity based filtering (2D only) 'max_ecc': 3 } diff --git a/demos/general/demo_pipeline_cnmfE.py b/demos/general/demo_pipeline_cnmfE.py index 871d48c41..d1f2ba37f 100755 --- a/demos/general/demo_pipeline_cnmfE.py +++ b/demos/general/demo_pipeline_cnmfE.py @@ -17,7 +17,7 @@ import logging import matplotlib.pyplot as plt import numpy as np - +import os import caiman from caiman.motion_correction import MotionCorrect From 126ca4ace5cc6364449f026b901632659a389735 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 18 Apr 2024 13:58:55 -0400 Subject: [PATCH 52/86] CLI demos refactor: demo_pipeline_cnmfE: Fix a mistake in move to params --- demos/general/demo_pipeline_cnmfE.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/demos/general/demo_pipeline_cnmfE.py b/demos/general/demo_pipeline_cnmfE.py index d1f2ba37f..3b3d102bf 100755 --- a/demos/general/demo_pipeline_cnmfE.py +++ b/demos/general/demo_pipeline_cnmfE.py @@ -74,7 +74,7 @@ def main(): plt.xlabel('frames') plt.ylabel('pixels') - bord_px = 0 if border_nan == 'copy' else bord_px + bord_px = 0 if opts.motion['border_nan'] == 'copy' else bord_px fname_new = caiman.save_memmap(fname_mc, base_name='memmap_', order='C', border_to_0=bord_px) else: # if no motion correction just memory map the file From b6077691aa31fc8b731ff0af991b449a008d5fcb Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Thu, 18 Apr 2024 13:59:47 -0400 Subject: [PATCH 53/86] Fix paths for more temporary files over a run to land in caiman_data/temp/ --- caiman/mmapping.py | 4 ++-- caiman/motion_correction.py | 9 ++++++--- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/caiman/mmapping.py b/caiman/mmapping.py index 65dd10cf4..ce4b040e8 100644 --- a/caiman/mmapping.py +++ b/caiman/mmapping.py @@ -405,7 +405,7 @@ def save_memmap(filenames:list[str], recompute_each_memmap = True - if recompute_each_memmap or (remove_init>0) or (idx_xy is not None)\ + if recompute_each_memmap or (remove_init > 0) or (idx_xy is not None)\ or (xy_shifts is not None) or (add_to_movie != 0) or (border_to_0>0)\ or slices is not None: @@ -527,7 +527,7 @@ def save_memmap(filenames:list[str], sys.stdout.flush() Ttot = Ttot + T - fname_new = caiman.paths.fn_relocated(fname_tot + f'_frames_{Ttot}.mmap') + fname_new = os.path.join(caiman.paths.get_tempdir(), caiman.paths.fn_relocated(f'{fname_tot}_frames_{Ttot}.mmap')) try: # need to explicitly remove destination on windows os.unlink(fname_new) diff --git a/caiman/motion_correction.py b/caiman/motion_correction.py index a819fff95..d2e9b6bef 100644 --- a/caiman/motion_correction.py +++ b/caiman/motion_correction.py @@ -167,9 +167,12 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig """ if 'ndarray' in str(type(fname)) or isinstance(fname, caiman.base.movies.movie): - logging.info('Creating file for motion correction "tmp_mov_mot_corr.hdf5"') - caiman.movie(fname).save('tmp_mov_mot_corr.hdf5') - fname = ['tmp_mov_mot_corr.hdf5'] + mc_tempfile = os.path.join(caiman.paths.get_tempdir(), 'tmp_mov_mot_corr.hdf5') + if os.path.isfile(mc_tempfile): + os.remove(mc_tempfile) # Eventually get_tempdir() will keep jobs separate and make this safer + logging.info(f"Creating file for motion correction: {mc_tempfile}") + caiman.movie(fname).save(mc_tempfile) + fname = [mc_tempfile] if not isinstance(fname, list): fname = [fname] From 97451bac06990fc3447e320dd6d34168982638ae Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 19 Apr 2024 15:17:14 -0400 Subject: [PATCH 54/86] Adjust gating, path handling to try harder not to drop temporary files anywhere but caiman_data/temp/ --- caiman/motion_correction.py | 9 ++------- caiman/paths.py | 11 ++++++----- caiman/source_extraction/cnmf/online_cnmf.py | 6 ++++-- demos/general/demo_pipeline_NWB.py | 4 ++-- 4 files changed, 14 insertions(+), 16 deletions(-) diff --git a/caiman/motion_correction.py b/caiman/motion_correction.py index d2e9b6bef..04283c527 100644 --- a/caiman/motion_correction.py +++ b/caiman/motion_correction.py @@ -167,9 +167,9 @@ def __init__(self, fname, min_mov=None, dview=None, max_shifts=(6, 6), niter_rig """ if 'ndarray' in str(type(fname)) or isinstance(fname, caiman.base.movies.movie): - mc_tempfile = os.path.join(caiman.paths.get_tempdir(), 'tmp_mov_mot_corr.hdf5') + mc_tempfile = caiman.paths.fn_relocated('tmp_mov_mot_corr.hdf5') if os.path.isfile(mc_tempfile): - os.remove(mc_tempfile) # Eventually get_tempdir() will keep jobs separate and make this safer + os.remove(mc_tempfile) logging.info(f"Creating file for motion correction: {mc_tempfile}") caiman.movie(fname).save(mc_tempfile) fname = [mc_tempfile] @@ -3123,12 +3123,7 @@ def motion_correction_piecewise(fname, splits, strides, overlaps, add_to_movie=0 if base_name is None: base_name = os.path.splitext(os.path.split(fname)[1])[0] base_name = caiman.paths.fn_relocated(base_name) - fname_tot:Optional[str] = caiman.paths.memmap_frames_filename(base_name, dims, T, order) - if isinstance(fname, tuple): - fname_tot = os.path.join(os.path.split(fname[0])[0], fname_tot) - else: - fname_tot = os.path.join(os.path.split(fname)[0], fname_tot) np.memmap(fname_tot, mode='w+', dtype=np.float32, shape=caiman.mmapping.prepare_shape(shape_mov), order=order) diff --git a/caiman/paths.py b/caiman/paths.py index ae5edbbe7..05abfb336 100644 --- a/caiman/paths.py +++ b/caiman/paths.py @@ -45,7 +45,7 @@ def get_tempdir() -> str: os.makedirs(temp_under_data) return temp_under_data -def fn_relocated(fn:str) -> str: +def fn_relocated(fn:str, force_temp:bool=False) -> str: """ If the provided filename does not contain any path elements, this returns what would be its absolute pathname as located in get_tempdir(). Otherwise it just returns what it is passed. @@ -53,10 +53,10 @@ def fn_relocated(fn:str) -> str: but if all they think about is filenames, they go under CaImAn's notion of its temporary dir. This is under the principle of "sensible defaults, but users can override them". """ - if not 'CAIMAN_NEW_TEMPFILE' in os.environ: # XXX We will ungate this in a future version of caiman - return fn - if str(os.path.basename(fn)) == str(fn): # No path stuff + if os.path.split(fn)[0] == '': # No path stuff return os.path.join(get_tempdir(), fn) + elif force_temp: + return os.path.join(get_tempdir(), os.path.split(fn)[1]) else: return fn @@ -124,4 +124,5 @@ def generate_fname_tot(base_name:str, dims:list[int], order:str) -> str: d1, d2, d3 = dims[0], dims[1], dims[2] ret = '_'.join([base_name, 'd1', str(d1), 'd2', str(d2), 'd3', str(d3), 'order', order]) ret = re.sub(r'(_)+', '_', ret) # Turn repeated underscores into just one - return ret + return fn_relocated(ret, force_temp=True) + diff --git a/caiman/source_extraction/cnmf/online_cnmf.py b/caiman/source_extraction/cnmf/online_cnmf.py index fefb1f100..2a1aede6c 100644 --- a/caiman/source_extraction/cnmf/online_cnmf.py +++ b/caiman/source_extraction/cnmf/online_cnmf.py @@ -20,6 +20,7 @@ from math import sqrt from multiprocessing import cpu_count import numpy as np +import os from scipy.ndimage import percentile_filter from scipy.sparse import coo_matrix, csc_matrix, spdiags, hstack from scipy.stats import norm @@ -958,8 +959,9 @@ def initialize_online(self, model_LN=None, T=None): self.estimates = cnm.estimates else: - Y.save(caiman.paths.fn_relocated('init_file.hdf5')) - f_new = caiman.mmapping.save_memmap(['init_file.hdf5'], base_name='Yr', order='C', + temp_init_file = os.path.join(caiman.paths.get_tempdir(), 'init_file.hdf5') + Y.save(temp_init_file) + f_new = caiman.mmapping.save_memmap([temp_init_file], base_name='Yr', order='C', slices=[slice(0, opts['init_batch']), None, None]) Yrm, dims_, T_ = caiman.mmapping.load_memmap(f_new) diff --git a/demos/general/demo_pipeline_NWB.py b/demos/general/demo_pipeline_NWB.py index 22e66f139..956f632b1 100755 --- a/demos/general/demo_pipeline_NWB.py +++ b/demos/general/demo_pipeline_NWB.py @@ -25,7 +25,7 @@ import caiman from caiman.motion_correction import MotionCorrect -from caiman.paths import caiman_datadir +from caiman.paths import caiman_datadir, get_tempdir from caiman.source_extraction.cnmf import cnmf as cnmf from caiman.source_extraction.cnmf import params as params from caiman.utils.utils import download_demo @@ -58,7 +58,7 @@ def main(): # The convert target is the original fn, with the extension swapped to nwb. fr = float(15) # imaging rate in frames per second pre_convert = download_demo('Sue_2x_3000_40_-46.tif') - convert_target = os.path.join(caiman_datadir(), 'Sue_2x_3000_40_-46.nwb') + convert_target = os.path.join(get_tempdir(), 'Sue_2x_3000_40_-46.nwb') orig_movie = caiman.load(pre_convert, fr=fr) # save file in NWB format with various additional info From 3ed88c779d343e4a87326866b5ca51c70e97f3e4 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 19 Apr 2024 15:45:58 -0400 Subject: [PATCH 55/86] Note in docs that caiman.movie.save() returns the filename. Fix a test suite's sloppy path code --- caiman/base/timeseries.py | 4 ++++ caiman/tests/test_motion_correction.py | 3 +-- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/caiman/base/timeseries.py b/caiman/base/timeseries.py index f8868cb1c..d8bad42ed 100644 --- a/caiman/base/timeseries.py +++ b/caiman/base/timeseries.py @@ -147,6 +147,7 @@ def save(self, Args: file_name: str name of file. Possible formats are tif, avi, npz, mmap and hdf5 + If a path is not part of the filename, it will be saved into a temporary directory under caiman_data to32: Bool whether to transform to 32 bits @@ -165,6 +166,9 @@ def save(self, if saving as .tif, specifies the compression level if saving as .avi or .mkv, compress=0 uses the IYUV codec, otherwise the FFV1 codec is used + Returns: + generated_filename: The full filename, path included, where the data was saved + Raises: Exception 'Extension Unknown' diff --git a/caiman/tests/test_motion_correction.py b/caiman/tests/test_motion_correction.py index 98a72d5dd..af56a3050 100644 --- a/caiman/tests/test_motion_correction.py +++ b/caiman/tests/test_motion_correction.py @@ -126,8 +126,7 @@ def gen_data(D=2, noise=.01, T=300, framerate=30, firerate=2., motion=True): def _test_motion_correct_rigid(D): Y, C, S, A, centers, dims, shifts = gen_data(D) - fname = 'testMovie.tif' - cm.movie(Y).save(fname) + fname = cm.movie(Y).save('testMovie.tif') params_dict = { 'motion': { 'border_nan': True, From 66ede9712532b9bee5ccf91eef1e0d2de53495ef Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 19 Apr 2024 16:45:56 -0400 Subject: [PATCH 56/86] Movie.save(): Always return filename (before it only did it for some target file formats) --- caiman/base/timeseries.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/caiman/base/timeseries.py b/caiman/base/timeseries.py index d8bad42ed..55aa127d5 100644 --- a/caiman/base/timeseries.py +++ b/caiman/base/timeseries.py @@ -201,6 +201,8 @@ def foo(i): if to32 and not ('float32' in str(self.dtype)): curfr = curfr.astype(np.float32) tif.save(curfr, compress=compress) + return file_name + elif extension == '.npz': if to32 and not ('float32' in str(self.dtype)): input_arr = self.astype(np.float32) @@ -213,6 +215,8 @@ def foo(i): fr=self.fr, meta_data=self.meta_data, file_name=self.file_name) + return file_name + elif extension in ('.avi', '.mkv'): codec = None if compress == 0: @@ -245,6 +249,7 @@ def foo(i): for d in data: vw.write(cv2.cvtColor(d, cv2.COLOR_GRAY2BGR)) vw.release() + return file_name elif extension == '.mat': if self.file_name[0] is not None: @@ -275,6 +280,7 @@ def foo(i): 'meta_data': self.meta_data, 'file_name': f_name }) + return file_name elif extension in ('.hdf5', '.h5'): with h5py.File(file_name, "w") as f: @@ -293,6 +299,7 @@ def foo(i): if self.meta_data[0] is not None: logging.debug("Metadata for saved file: " + str(self.meta_data)) dset.attrs["meta_data"] = cpk.dumps(self.meta_data) + return file_name elif extension == '.mmap': base_name = name From f78791c15433ad84fafd8010c0c61c45c2e3277f Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 23 Apr 2024 19:00:00 -0400 Subject: [PATCH 57/86] environment.yml: be smarter about alternate builds of conda, set channel-priority to handle if user/distro has set it to something else --- environment-minimal.yml | 2 ++ environment.yml | 2 ++ 2 files changed, 4 insertions(+) diff --git a/environment-minimal.yml b/environment-minimal.yml index 5f16cbd69..9368d45ed 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -1,5 +1,7 @@ +channel_priority: true channels: - conda-forge +- defaults dependencies: - python <=3.12 - av diff --git a/environment.yml b/environment.yml index 4f087c33d..3c4bc4481 100644 --- a/environment.yml +++ b/environment.yml @@ -1,5 +1,7 @@ +channel_priority: true channels: - conda-forge +- defaults dependencies: - python <=3.12 - av From 7d5824d7e564164d4ead987bdb958561d6815338 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 23 Apr 2024 21:40:12 -0400 Subject: [PATCH 58/86] environment.yml: can't actually set channel priority in environment.yml --- environment-minimal.yml | 1 - environment.yml | 1 - 2 files changed, 2 deletions(-) diff --git a/environment-minimal.yml b/environment-minimal.yml index 9368d45ed..03c4a76b3 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -1,4 +1,3 @@ -channel_priority: true channels: - conda-forge - defaults diff --git a/environment.yml b/environment.yml index 3c4bc4481..e3499db00 100644 --- a/environment.yml +++ b/environment.yml @@ -1,4 +1,3 @@ -channel_priority: true channels: - conda-forge - defaults From 53ea964797882b47dbade0966daa341d1c9644d4 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Apr 2024 10:04:21 -0400 Subject: [PATCH 59/86] Try adjusting CI to use environment-minimal.yml --- .github/workflows/run_tests.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/run_tests.yml b/.github/workflows/run_tests.yml index 7a10415ad..2904dacc1 100644 --- a/.github/workflows/run_tests.yml +++ b/.github/workflows/run_tests.yml @@ -26,7 +26,7 @@ jobs: with: auto-update-conda: true python-version: ${{ matrix.python-version }} - environment-file: environment.yml + environment-file: environment-minimal.yml activate-environment: caiman - name: Install OS Dependencies From 8b177d5c5b7519da79273e1d10e9149c20260b7e Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Apr 2024 10:09:47 -0400 Subject: [PATCH 60/86] minimal environment needs a few more things --- environment-minimal.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/environment-minimal.yml b/environment-minimal.yml index 03c4a76b3..dc322e192 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -9,6 +9,7 @@ dependencies: - h5py - holoviews >=1.16.2 - ipyparallel +- ipywidgets - matplotlib - moviepy - numpy <2.0.0 From 701c9af9989a8542eea0bd8d0f7364ba1e4c3f72 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Apr 2024 10:14:43 -0400 Subject: [PATCH 61/86] environment-minimal hasn't seen enough testing over the years. --- environment-minimal.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/environment-minimal.yml b/environment-minimal.yml index dc322e192..e3b5aafce 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -16,6 +16,7 @@ dependencies: - numpydoc - opencv - peakutils +- pims - psutil - pynwb - scikit-image >=0.19.0 From 50829835b3774be50da28817a438b1d423a2563e Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Apr 2024 10:33:53 -0400 Subject: [PATCH 62/86] If we're to use environment-minimal for CI we need to include nose --- environment-minimal.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/environment-minimal.yml b/environment-minimal.yml index e3b5aafce..e017b2b39 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -5,13 +5,14 @@ dependencies: - python <=3.12 - av - cython -- future - h5py - holoviews >=1.16.2 +- ipython - ipyparallel - ipywidgets - matplotlib - moviepy +- nose - numpy <2.0.0 - numpydoc - opencv From b6e67c554b9e996415b5072306891efd2f2dd2c7 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Apr 2024 11:01:31 -0400 Subject: [PATCH 63/86] prevent old numpy breaking typing --- environment-minimal.yml | 2 +- environment.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment-minimal.yml b/environment-minimal.yml index e017b2b39..b3ab85cc5 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -13,7 +13,7 @@ dependencies: - matplotlib - moviepy - nose -- numpy <2.0.0 +- numpy <2.0.0,>=1.20 - numpydoc - opencv - peakutils diff --git a/environment.yml b/environment.yml index e3499db00..65ea3dfe7 100644 --- a/environment.yml +++ b/environment.yml @@ -18,7 +18,7 @@ dependencies: - moviepy - mypy - nose -- numpy <2.0.0 +- numpy <2.0.0,>=1.20 - numpydoc - opencv - panel >=1.0.2 From 0b81678d4996105ca299c5d8f000c6f615be057b Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Apr 2024 11:34:11 -0400 Subject: [PATCH 64/86] yaml: Prevent old versions of numpy and associated problems --- environment-minimal.yml | 2 +- environment.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment-minimal.yml b/environment-minimal.yml index b3ab85cc5..8b14a7c58 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -13,7 +13,7 @@ dependencies: - matplotlib - moviepy - nose -- numpy <2.0.0,>=1.20 +- numpy <2.0.0,>=1.26 - numpydoc - opencv - peakutils diff --git a/environment.yml b/environment.yml index 65ea3dfe7..6e1d1878a 100644 --- a/environment.yml +++ b/environment.yml @@ -18,7 +18,7 @@ dependencies: - moviepy - mypy - nose -- numpy <2.0.0,>=1.20 +- numpy <2.0.0,>=1.26 - numpydoc - opencv - panel >=1.0.2 From 14af8d5a7137d0eeef4376a9b29259818a45a2d9 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Apr 2024 11:55:59 -0400 Subject: [PATCH 65/86] CI's way of managing updating the test environment means lots of minimal version pins --- environment-minimal.yml | 2 +- environment.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/environment-minimal.yml b/environment-minimal.yml index 8b14a7c58..6ff26de47 100644 --- a/environment-minimal.yml +++ b/environment-minimal.yml @@ -5,7 +5,7 @@ dependencies: - python <=3.12 - av - cython -- h5py +- h5py >=3.4.0 - holoviews >=1.16.2 - ipython - ipyparallel diff --git a/environment.yml b/environment.yml index 6e1d1878a..6ae3e04e5 100644 --- a/environment.yml +++ b/environment.yml @@ -7,7 +7,7 @@ dependencies: - bokeh >=3.1.1 - coverage - cython -- h5py +- h5py >=3.4.0 - holoviews >=1.16.2 - ipykernel - ipython From 73d57219b021aa7f907adf1e7ffccc62f078746e Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Apr 2024 13:58:00 -0400 Subject: [PATCH 66/86] Hotfix: Fix issue in demo_caiman_cnmf_3D from a past refactor to visualization.py --- caiman/utils/visualization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index d1a5832e4..39e0db78f 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -528,7 +528,7 @@ def nb_view_patches3d(Y_r, A, C, dims, image_type='mean', Yr=None, if max_projection: if image_type == 'corr': - tmp = [(local_correlations( + tmp = [(caiman.summary_images.local_correlations( Yr[index_permut].reshape(dims + (-1,), order='F'))[:, ::-1]).max(i) for i in range(3)] From fd24f25662806e4c71aa0a3c6dd8368814c7041f Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 24 Apr 2024 13:59:13 -0400 Subject: [PATCH 67/86] cherry-pick 73d572 --- caiman/utils/visualization.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/caiman/utils/visualization.py b/caiman/utils/visualization.py index d1a5832e4..39e0db78f 100644 --- a/caiman/utils/visualization.py +++ b/caiman/utils/visualization.py @@ -528,7 +528,7 @@ def nb_view_patches3d(Y_r, A, C, dims, image_type='mean', Yr=None, if max_projection: if image_type == 'corr': - tmp = [(local_correlations( + tmp = [(caiman.summary_images.local_correlations( Yr[index_permut].reshape(dims + (-1,), order='F'))[:, ::-1]).max(i) for i in range(3)] From 41cd982b70e1b9ff004ace23b80d5624bd0dcc5b Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 26 Apr 2024 14:21:05 -0400 Subject: [PATCH 68/86] params: better handle if user provides ring_CNN (compensating for a misdesign of CNMFParams) --- caiman/motion_correction.py | 4 ++++ caiman/source_extraction/cnmf/params.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/caiman/motion_correction.py b/caiman/motion_correction.py index 04283c527..6ccbe570e 100644 --- a/caiman/motion_correction.py +++ b/caiman/motion_correction.py @@ -2714,6 +2714,10 @@ def compute_metrics_motion_correction(fname, final_size_x, final_size_y, swap_di if play_flow and opencv: cv2.destroyAllWindows() + # FIXME: This generates a metrics dump potentially right next to the datafiles it was generated from; + # We will need to fix this in some future revision of the code, ideally returning the filename we used to the caller + # or abstracting the path handling logic into some kind of a policy-aware getpath function for specific uses. + # This should be fixed with future work to have separate runs have separate workdirs. np.savez(os.path.splitext(fname)[0] + '_metrics', flows=flows, norms=norms, correlations=correlations, smoothness=smoothness, tmpl=tmpl, smoothness_corr=smoothness_corr, img_corr=img_corr) return tmpl, correlations, flows, norms, smoothness diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index bba3df291..bc468d88c 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -1124,7 +1124,7 @@ def change_params(self, params_dict, allow_legacy:bool=True, warn_unused:bool=Tr consumed = {} # Keep track of what parameters in params_dict were used to set something in params (just for legacy API) nagged_once = False # So we don't nag people multiple times in the same call - for paramkey in params_dict: + for paramkey in params_dict and isinstance(params_dict[paramkey], dict): # Latter half is because of scoped keys with the same name as categories, because we apparently have those. ring_CNN is an example. if paramkey in list(self.__dict__.keys()): # Proper pathed part cat_handle = getattr(self, paramkey) for k, v in params_dict[paramkey].items(): From 75c95b270eeb88c87a533925f549f5ab0fbf02c0 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 26 Apr 2024 14:21:05 -0400 Subject: [PATCH 69/86] params: better handle if user provides ring_CNN (compensating for a misdesign of CNMFParams) --- caiman/motion_correction.py | 4 ++++ caiman/source_extraction/cnmf/params.py | 2 +- 2 files changed, 5 insertions(+), 1 deletion(-) diff --git a/caiman/motion_correction.py b/caiman/motion_correction.py index a819fff95..4feb54a13 100644 --- a/caiman/motion_correction.py +++ b/caiman/motion_correction.py @@ -2711,6 +2711,10 @@ def compute_metrics_motion_correction(fname, final_size_x, final_size_y, swap_di if play_flow and opencv: cv2.destroyAllWindows() + # FIXME: This generates a metrics dump potentially right next to the datafiles it was generated from; + # We will need to fix this in some future revision of the code, ideally returning the filename we used to the caller + # or abstracting the path handling logic into some kind of a policy-aware getpath function for specific uses. + # This should be fixed with future work to have separate runs have separate workdirs. np.savez(os.path.splitext(fname)[0] + '_metrics', flows=flows, norms=norms, correlations=correlations, smoothness=smoothness, tmpl=tmpl, smoothness_corr=smoothness_corr, img_corr=img_corr) return tmpl, correlations, flows, norms, smoothness diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 595c8313d..86c397160 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -1144,7 +1144,7 @@ def change_params(self, params_dict, allow_legacy:bool=True, warn_unused:bool=Tr consumed = {} # Keep track of what parameters in params_dict were used to set something in params (just for legacy API) nagged_once = False # So we don't nag people multiple times in the same call - for paramkey in params_dict: + for paramkey in params_dict and isinstance(params_dict[paramkey], dict): # Latter half is because of scoped keys with the same name as categories, because we apparently have those. ring_CNN is an example. if paramkey in list(self.__dict__.keys()): # Proper pathed part cat_handle = getattr(self, paramkey) for k, v in params_dict[paramkey].items(): From c45b9aa5f91081f245883258fac72aa955fb1bd8 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 26 Apr 2024 14:28:07 -0400 Subject: [PATCH 70/86] Fix goof in last hotfix to CNMFParams --- caiman/source_extraction/cnmf/params.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index bc468d88c..398fc754b 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -1124,8 +1124,8 @@ def change_params(self, params_dict, allow_legacy:bool=True, warn_unused:bool=Tr consumed = {} # Keep track of what parameters in params_dict were used to set something in params (just for legacy API) nagged_once = False # So we don't nag people multiple times in the same call - for paramkey in params_dict and isinstance(params_dict[paramkey], dict): # Latter half is because of scoped keys with the same name as categories, because we apparently have those. ring_CNN is an example. - if paramkey in list(self.__dict__.keys()): # Proper pathed part + for paramkey in params_dict: + if paramkey in list(self.__dict__.keys()) and isinstance(params_dict[paramkey], dict): # Handle proper pathed part. Latter half of the conditional is because of scoped keys with the same name as categories, because we apparently have those. ring_CNN is an example. cat_handle = getattr(self, paramkey) for k, v in params_dict[paramkey].items(): if k == 'nb' and paramkey != 'init': From 9ea5587e81682f569d248afdfa92c46a87c247d1 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 26 Apr 2024 14:28:07 -0400 Subject: [PATCH 71/86] Fix goof in last hotfix to CNMFParams --- caiman/source_extraction/cnmf/params.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 86c397160..04614e63e 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -1144,8 +1144,8 @@ def change_params(self, params_dict, allow_legacy:bool=True, warn_unused:bool=Tr consumed = {} # Keep track of what parameters in params_dict were used to set something in params (just for legacy API) nagged_once = False # So we don't nag people multiple times in the same call - for paramkey in params_dict and isinstance(params_dict[paramkey], dict): # Latter half is because of scoped keys with the same name as categories, because we apparently have those. ring_CNN is an example. - if paramkey in list(self.__dict__.keys()): # Proper pathed part + for paramkey in params_dict: + if paramkey in list(self.__dict__.keys()) and isinstance(params_dict[paramkey], dict): # Handle proper pathed part. Latter half of the conditional is because of scoped keys with the same name as categories, because we apparently have those. ring_CNN is an example. cat_handle = getattr(self, paramkey) for k, v in params_dict[paramkey].items(): if k == 'nb' and paramkey != 'init': From b8e97f84ea2a48ebeeec747d4ba5a6957fbdc287 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 26 Apr 2024 16:51:09 -0400 Subject: [PATCH 72/86] Remove (broken) support for the old non-hdf5 format of .mat files --- caiman/base/movies.py | 30 +++++------------------------- 1 file changed, 5 insertions(+), 25 deletions(-) diff --git a/caiman/base/movies.py b/caiman/base/movies.py index 4994020b4..44b921b13 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -1244,7 +1244,8 @@ def load(file_name: Union[str, list[str]], dimension of the movie along x and y if loading from a two dimensional numpy array var_name_hdf5: str - if loading from hdf5/n5 name of the dataset inside the file to load (ignored if the file only has one dataset) + if loading from hdf5/n5 name of the dataset inside the file to load (ignored if the file only has one dataset). + This is also used for (new-style) mat files in_memory: bool=False This changes the behaviour of the function for npy files to be a readwrite rather than readonly memmap, @@ -1314,17 +1315,6 @@ def load(file_name: Union[str, list[str]], basename, extension = os.path.splitext(file_name) extension = extension.lower() - if extension == '.mat': - logging.warning('Loading a *.mat file. x- and y- dimensions ' + - 'might have been swapped.') - try: # scipy >= 1.8 - byte_stream, file_opened = scipy.io.matlab._mio._open_file(file_name, appendmat=False) - mjv, mnv = scipy.io.matlab.miobase.get_matfile_version(byte_stream) - except: # scipy <= 1.7 - byte_stream, file_opened = scipy.io.matlab.mio._open_file(file_name, appendmat=False) - mjv, mnv = scipy.io.matlab.mio.get_matfile_version(byte_stream) - if mjv == 2: - extension = '.h5' if extension in ['.tif', '.tiff', '.btf']: # load tif file with tifffile.TiffFile(file_name) as tffl: @@ -1512,19 +1502,13 @@ def load(file_name: Union[str, list[str]], else: input_arr = input_arr[np.newaxis, :, :] - elif extension == '.mat': # load npy file - input_arr = scipy.io.loadmat(file_name)['data'] - input_arr = np.rollaxis(input_arr, 2, -3) - if subindices is not None: - input_arr = input_arr[subindices] - elif extension == '.npz': # load movie from saved file if subindices is not None: raise Exception('Subindices not implemented') with np.load(file_name) as f: return movie(**f).astype(outtype) - elif extension in ('.hdf5', '.h5', '.nwb', 'n5', 'zarr'): + elif extension in ('.hdf5', '.h5', '.mat', '.nwb', 'n5', 'zarr'): if extension in ('n5', 'zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open f = zarr.open(file_name, "r") else: @@ -2010,11 +1994,7 @@ def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, if os.path.exists(file_name): _, extension = os.path.splitext(file_name)[:2] extension = extension.lower() - if extension == '.mat': - byte_stream, file_opened = scipy.io.matlab.mio._open_file(file_name, appendmat=False) - mjv, mnv = scipy.io.matlab.mio.get_matfile_version(byte_stream) - if mjv == 2: - extension = '.h5' + if extension in ['.tif', '.tiff', '.btf']: tffl = tifffile.TiffFile(file_name) siz = tffl.series[0].shape @@ -2042,7 +2022,7 @@ def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, filename = os.path.split(file_name)[-1] Yr, dims, T = caiman.mmapping.load_memmap(os.path.join( os.path.split(file_name)[0], filename)) - elif extension in ('.h5', '.hdf5', '.nwb', 'n5', 'zarr'): + elif extension in ('.h5', '.hdf5', '.mat', '.nwb', 'n5', 'zarr'): # FIXME this doesn't match the logic in load() if extension in ('n5', 'zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open f = zarr.open(file_name, "r") From fafffce77802c63d473f7329f4e0369fb764c28c Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 26 Apr 2024 17:17:15 -0400 Subject: [PATCH 73/86] .mat files: give users better messages if they try to open a v1 file --- caiman/base/movies.py | 24 +++++++++++++++++++++--- 1 file changed, 21 insertions(+), 3 deletions(-) diff --git a/caiman/base/movies.py b/caiman/base/movies.py index 44b921b13..da9c0fe3c 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -1512,7 +1512,13 @@ def load(file_name: Union[str, list[str]], if extension in ('n5', 'zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open f = zarr.open(file_name, "r") else: - f = h5py.File(file_name, "r") + try: + f = h5py.File(file_name, "r") + except: + if extension == '.mat': + raise Exception(f"Problem loading {file_name}: Unknown format. This may be in the original version 1 (non-hdf5) mat format; please convert it first") + else: + raise Exception(f"Problem in loading {file_name}: Unknown format.") ignore_keys = ['__DATA_TYPES__'] # Known metadata that tools provide, add to this as needed. Sync with get_file_size() !! fkeys = list(filter(lambda x: x not in ignore_keys, f.keys())) if len(fkeys) == 1: # If the file we're parsing has only one dataset inside it, @@ -1939,7 +1945,13 @@ def load_iter(file_name: Union[str, list[str]], subindices=None, var_name_hdf5: if extension in ('.n5', '.zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open f = zarr.open(file_name, "r") else: - f = h5py.File(file_name, "r") + try: + f = h5py.File(file_name, "r") + except: + if extension == '.mat': + raise Exception(f"Problem loading {file_name}: Unknown format. This may be in the original version 1 (non-hdf5) mat format; please convert it first") + else: + raise Exception(f"Problem in loading {file_name}: Unknown format.") ignore_keys = ['__DATA_TYPES__'] # Known metadata that tools provide, add to this as needed. fkeys = list(filter(lambda x: x not in ignore_keys, f.keys())) if len(fkeys) == 1: # If the hdf5 file we're parsing has only one dataset inside it, @@ -2027,7 +2039,13 @@ def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, if extension in ('n5', 'zarr'): # Thankfully, the zarr library lines up closely with h5py past the initial open f = zarr.open(file_name, "r") else: - f = h5py.File(file_name, "r") + try: + f = h5py.File(file_name, "r") + except: + if extension == '.mat': + raise Exception(f"Problem loading {file_name}: Unknown format. This may be in the original version 1 (non-hdf5) mat format; please convert it first") + else: + raise Exception(f"Problem in loading {file_name}: Unknown format.") ignore_keys = ['__DATA_TYPES__'] # Known metadata that tools provide, add to this as needed. Sync with movies.my:load() !! kk = list(filter(lambda x: x not in ignore_keys, f.keys())) if len(kk) == 1: # TODO: Consider recursing into a group to find a dataset From 25818155e47c0c30eabf979cc8dafef5bd98883c Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 26 Apr 2024 17:20:59 -0400 Subject: [PATCH 74/86] mat file revamp: Use consistent phrasing --- caiman/base/movies.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/caiman/base/movies.py b/caiman/base/movies.py index da9c0fe3c..325ef840c 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -1518,7 +1518,7 @@ def load(file_name: Union[str, list[str]], if extension == '.mat': raise Exception(f"Problem loading {file_name}: Unknown format. This may be in the original version 1 (non-hdf5) mat format; please convert it first") else: - raise Exception(f"Problem in loading {file_name}: Unknown format.") + raise Exception(f"Problem loading {file_name}: Unknown format.") ignore_keys = ['__DATA_TYPES__'] # Known metadata that tools provide, add to this as needed. Sync with get_file_size() !! fkeys = list(filter(lambda x: x not in ignore_keys, f.keys())) if len(fkeys) == 1: # If the file we're parsing has only one dataset inside it, @@ -1951,7 +1951,7 @@ def load_iter(file_name: Union[str, list[str]], subindices=None, var_name_hdf5: if extension == '.mat': raise Exception(f"Problem loading {file_name}: Unknown format. This may be in the original version 1 (non-hdf5) mat format; please convert it first") else: - raise Exception(f"Problem in loading {file_name}: Unknown format.") + raise Exception(f"Problem loading {file_name}: Unknown format.") ignore_keys = ['__DATA_TYPES__'] # Known metadata that tools provide, add to this as needed. fkeys = list(filter(lambda x: x not in ignore_keys, f.keys())) if len(fkeys) == 1: # If the hdf5 file we're parsing has only one dataset inside it, @@ -2045,7 +2045,7 @@ def get_file_size(file_name, var_name_hdf5:str='mov') -> tuple[tuple, Union[int, if extension == '.mat': raise Exception(f"Problem loading {file_name}: Unknown format. This may be in the original version 1 (non-hdf5) mat format; please convert it first") else: - raise Exception(f"Problem in loading {file_name}: Unknown format.") + raise Exception(f"Problem loading {file_name}: Unknown format.") ignore_keys = ['__DATA_TYPES__'] # Known metadata that tools provide, add to this as needed. Sync with movies.my:load() !! kk = list(filter(lambda x: x not in ignore_keys, f.keys())) if len(kk) == 1: # TODO: Consider recursing into a group to find a dataset From 29a3a6794d31a29a289af0c1d1651f94e09ff95e Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 26 Apr 2024 17:32:45 -0400 Subject: [PATCH 75/86] Update docs on mat file support --- docs/source/file_formats.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/source/file_formats.md b/docs/source/file_formats.md index 6e1a94246..4eeef523b 100644 --- a/docs/source/file_formats.md +++ b/docs/source/file_formats.md @@ -15,7 +15,7 @@ Notes for specific formats follow: Matlab (\*.mat) --------------- -Caiman may be able to handle Matlab's mat files, although it is not a desirable format. We rely on the scipy.io.matlab functions to handle files of this type, except for certain versions of the format which are actually hdf5 files +Caiman can handle Matlab's V2 mat files (which are HDF5 files in a certain format). If you need old-style support, reach out and we can consider adding it. TIFF (\*.tiff, \*.tif, \*.btf) ------------------------------ From 7943ede75442fdb6d7b0c9181c748e9e80198661 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 30 Apr 2024 10:42:47 -0400 Subject: [PATCH 76/86] CNMFParams: remove some dead code (really just a commit to make CI run, but still a good commit) --- caiman/source_extraction/cnmf/params.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/caiman/source_extraction/cnmf/params.py b/caiman/source_extraction/cnmf/params.py index 398fc754b..7ca69fa04 100644 --- a/caiman/source_extraction/cnmf/params.py +++ b/caiman/source_extraction/cnmf/params.py @@ -309,7 +309,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), number of pixels to be processed by each worker nb: int, default: 1 - number of global background components + number of global background components. Do not set this directly; modify it in init. normalize_yyt_one: bool, default: True Whether to normalize the C and A matrices so that diag(C*C.T) = 1 during update spatial @@ -357,7 +357,7 @@ def __init__(self, fnames=None, dims=None, dxy=(1, 1), if method cvxpy, primary and secondary (if problem unfeasible for approx solution) nb: int, default: 1 - number of global background components + number of global background components. Do not set this directly; modify it in init. noise_method: 'mean'|'median'|'logmexp', default: 'mean' PSD averaging method for computing the noise std @@ -937,8 +937,6 @@ def check_consistency(self): if self.patch['rf'] is not None: if np.any(np.array(self.patch['rf']) <= self.init['gSiz'][0]): logging.warning(f"Changing rf from {self.patch['rf']} to {2 * self.init['gSiz'][0]} because the constraint rf > gSiz was not satisfied.") -# if self.motion['gSig_filt'] is None: -# self.motion['gSig_filt'] = self.init['gSig'] if self.init['nb'] <= 0 and (self.patch['nb_patch'] != self.init['nb'] or self.patch['low_rank_background'] is not None): logging.warning(f"gnb={self.init['nb']}, hence setting keys nb_patch and low_rank_background in group patch automatically.") From ac1e2aa67ff9ec5f57f99bc34958b97dc61093af Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 30 Apr 2024 16:09:17 -0400 Subject: [PATCH 77/86] CLI OnACID demos: correctly handle display of results --- demos/general/demo_OnACID.py | 4 ++++ demos/general/demo_OnACID_mesoscope.py | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/demos/general/demo_OnACID.py b/demos/general/demo_OnACID.py index 2232fefa5..1951e6375 100755 --- a/demos/general/demo_OnACID.py +++ b/demos/general/demo_OnACID.py @@ -10,6 +10,7 @@ import argparse #import code import logging +import matplotlib import numpy as np import os @@ -69,11 +70,14 @@ def main(): # plot results cnm.estimates.view_components(img=Cn, idx=cnm.estimates.idx_components) + if not cfg.no_play: + matplotlib.pyplot.show(block=True) def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate basic Caiman Online functionality with CNMF initialization") parser.add_argument("--configfile", default=os.path.join(caiman_datadir(), 'demos', 'general', 'params_demo_OnACID.json'), help="JSON Configfile for Caiman parameters") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() diff --git a/demos/general/demo_OnACID_mesoscope.py b/demos/general/demo_OnACID_mesoscope.py index 79bd9d585..620a8b1b9 100755 --- a/demos/general/demo_OnACID_mesoscope.py +++ b/demos/general/demo_OnACID_mesoscope.py @@ -17,6 +17,7 @@ import argparse import glob import logging +import matplotlib import numpy as np import os import matplotlib.pyplot as plt @@ -118,6 +119,8 @@ def main(): cnm.save(os.path.splitext(fnames[0])[0] + '_results.hdf5') dview.terminate() + if not cfg.no_play: + matplotlib.pyplot.show(block=True) def handle_args(): parser = argparse.ArgumentParser(description="Full OnACID Caiman demo") @@ -126,6 +129,7 @@ def handle_args(): parser.add_argument("--cluster_backend", default="multiprocessing", help="Specify multiprocessing, ipyparallel, or single to pick an engine") parser.add_argument("--cluster_nproc", type=int, default=None, help="Override automatic selection of number of workers to use") parser.add_argument("--input", action="append", help="File(s) to work on, provide multiple times for more files") + parser.add_argument("--no_play", action="store_true", help="Do not display results") parser.add_argument("--logfile", help="If specified, log to the named file") return parser.parse_args() From 0fa78a355162568f7e8d274743261292ddd8e61f Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 30 Apr 2024 16:16:46 -0400 Subject: [PATCH 78/86] CLI demo pipeline: get rid of qt errors --- demos/general/demo_pipeline.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/demos/general/demo_pipeline.py b/demos/general/demo_pipeline.py index 9a8d45c50..028b9c55a 100755 --- a/demos/general/demo_pipeline.py +++ b/demos/general/demo_pipeline.py @@ -19,6 +19,7 @@ import cv2 import glob import logging +import matplotlib import numpy as np import os @@ -176,6 +177,7 @@ def main(): # Stop the cluster and clean up log files caiman.stop_server(dview=dview) + matplotlib.pyplot.show(block=True) if not cfg.keep_logs: log_files = glob.glob('*_LOG_*') From a5886c43d767356de7c753739aa1885a580c6088 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 30 Apr 2024 16:58:11 -0400 Subject: [PATCH 79/86] Add note to demo_behaviour that it needs a qt6 build of opencv --- demos/general/demo_behavior.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/demos/general/demo_behavior.py b/demos/general/demo_behavior.py index bb835f61e..c9d2d6f32 100755 --- a/demos/general/demo_behavior.py +++ b/demos/general/demo_behavior.py @@ -6,6 +6,9 @@ This demo requires a GUI; it does not make sense to run it noninteractively. """ +# If this demo crashes right after making it past the initial play of the mouse movie, +# you may need to switch your build of opencv+libopencv+py-opencv to a qt6 build of the +# same (in conda, these have a qt6_ prefix to their build id). import argparse import cv2 From fe1ad0cb28038a023fb1d923512237a17424e878 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 30 Apr 2024 17:20:00 -0400 Subject: [PATCH 80/86] demo_behavior and caiman.behavior.behavior: fix ancient calls --- caiman/behavior/behavior.py | 4 ++-- demos/general/demo_behavior.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/caiman/behavior/behavior.py b/caiman/behavior/behavior.py index ce9fc87be..04dfece82 100644 --- a/caiman/behavior/behavior.py +++ b/caiman/behavior/behavior.py @@ -130,8 +130,8 @@ def extract_magnitude_and_angle_from_OF(spatial_filter_, x, y = scipy.signal.medfilt(time_trace, kernel_size=[1, 1]).T x = scipy.signal.savgol_filter(x.squeeze(), sav_filter_size, 1) y = scipy.signal.savgol_filter(y.squeeze(), sav_filter_size, 1) - mag, dirct = to_polar(x - caiman.components_evaluation.mode_robust(x), - y - caiman.components_evaluation.mode_robust(y)) + mag, dirct = to_polar(x - caiman.utils.stats.mode_robust(x), + y - caiman.utils.stats.mode_robust(y)) dirct = scipy.signal.medfilt(dirct.squeeze(), kernel_size=1).T # normalize to pixel units diff --git a/demos/general/demo_behavior.py b/demos/general/demo_behavior.py index c9d2d6f32..8572d66a0 100755 --- a/demos/general/demo_behavior.py +++ b/demos/general/demo_behavior.py @@ -69,12 +69,12 @@ def main(): # number of standard deviations above mean for the magnitude that are considered enough to measure the angle in polar coordinates num_std_mag_for_angle = .6 only_magnitude = False # if onlu interested in factorizing over the magnitude - method_factorization = 'dict_learn' # could also use nmf + method_factorization = 'nmf' # number of iterations for the dictionary learning algorithm (Marial et al, 2010) max_iter_DL = -30 spatial_filter_, time_trace_, of_or = cm.behavior.behavior.extract_motor_components_OF(m, n_components, mask=mask, - resize_fact=resize_fact, only_magnitude=only_magnitude, verbose=True, method_factorization='dict_learn', max_iter_DL=max_iter_DL) + resize_fact=resize_fact, only_magnitude=only_magnitude, verbose=True, method_factorization='nmf', max_iter_DL=max_iter_DL) mags, dircts, dircts_thresh, spatial_masks_thrs = cm.behavior.behavior.extract_magnitude_and_angle_from_OF( From 0c80201a71ca5f9174dee6fae21cc303a6be60b2 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Tue, 30 Apr 2024 17:28:42 -0400 Subject: [PATCH 81/86] CLI demo: fix demo_behavior --- demos/general/demo_behavior.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/demos/general/demo_behavior.py b/demos/general/demo_behavior.py index 8572d66a0..19c46224e 100755 --- a/demos/general/demo_behavior.py +++ b/demos/general/demo_behavior.py @@ -14,7 +14,7 @@ import cv2 import logging import numpy as np -import matplotlib.pyplot as plt +import matplotlib.pyplot as pl try: cv2.setNumThreads(0) @@ -48,7 +48,7 @@ def main(): # If you prefer to hardcode filenames, you could do something like this: # fnames = ["/path/to/myfile1.avi", "/path/to/myfile2.avi"] - plt.ion() + pl.ion() # TODO: todocument m = cm._load_behavior(fnames[0]) @@ -103,6 +103,7 @@ def main(): pl.plot(dirct, 'r-', linewidth=2) idd += 1 + pl.show(block=True) def handle_args(): parser = argparse.ArgumentParser(description="Demonstrate behavioural/optic flow functions") @@ -111,4 +112,4 @@ def handle_args(): return parser.parse_args() ######## -main() \ No newline at end of file +main() From 862dba878f36ba48159036725c4e63f61378665b Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 1 May 2024 15:51:43 -0400 Subject: [PATCH 82/86] Remove caiman.base.movies.online_NMF() which is unused (and needs the spams library, now hard to package) --- caiman/base/movies.py | 63 ------------------------------------------- 1 file changed, 63 deletions(-) diff --git a/caiman/base/movies.py b/caiman/base/movies.py index 4994020b4..a73f4d412 100644 --- a/caiman/base/movies.py +++ b/caiman/base/movies.py @@ -673,69 +673,6 @@ def NonnegativeMatrixFactorization(self, return space_components, time_components - def online_NMF(self, - n_components: int = 30, - method: str = 'nnsc', - lambda1: int = 100, - iterations: int = -5, - model=None, - **kwargs) -> tuple[np.ndarray, np.ndarray]: - """ Method performing online matrix factorization and using the spams - - (http://spams-devel.gforge.inria.fr/doc-python/html/index.html) package from Inria. - Implements bith the nmf and nnsc methods - - Args: - n_components: int - - method: 'nnsc' or 'nmf' (see http://spams-devel.gforge.inria.fr/doc-python/html/index.html) - - lambda1: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html - - iterations: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html - - batchsize: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html - - model: see http://spams-devel.gforge.inria.fr/doc-python/html/index.html - - **kwargs: more arguments to be passed to nmf or nnsc - - Returns: - time_comps - - space_comps - """ - try: - import spams # XXX consider moving this to the head of the file - except: - logging.error("You need to install the SPAMS package") - raise - - T, d1, d2 = np.shape(self) - d = d1 * d2 - X = np.asfortranarray(np.reshape(self, [T, d], order='F')) - - if method == 'nmf': - (time_comps, V) = spams.nmf(X, return_lasso=True, K=n_components, numThreads=4, iter=iterations, **kwargs) - - elif method == 'nnsc': - (time_comps, V) = spams.nnsc(X, - return_lasso=True, - K=n_components, - lambda1=lambda1, - iter=iterations, - model=model, - **kwargs) - else: - raise Exception('Method unknown') - - space_comps = [] - - for _, mm in enumerate(V): - space_comps.append(np.reshape(mm.todense(), (d1, d2), order='F')) - - return time_comps, np.array(space_comps) - def IPCA(self, components: int = 50, batch: int = 1000) -> tuple[np.ndarray, np.ndarray, np.ndarray]: """ Iterative Principal Component analysis, see sklearn.decomposition.incremental_pca From 887e99e2649771d877b1c9f2d4264c318dcbf665 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 1 May 2024 15:57:53 -0400 Subject: [PATCH 83/86] caiman.behavior.extract_components(): remove method_factorization of dict_learn (which needed spams) --- caiman/behavior/behavior.py | 23 +++++++---------------- 1 file changed, 7 insertions(+), 16 deletions(-) diff --git a/caiman/behavior/behavior.py b/caiman/behavior/behavior.py index 04dfece82..9863f5603 100644 --- a/caiman/behavior/behavior.py +++ b/caiman/behavior/behavior.py @@ -37,6 +37,10 @@ def select_roi(img: np.ndarray, n_rois: int = 1) -> list: each element is an the mask considered a ROIs """ + # FIXME This function depends on particular builds of OpenCV + # and may be difficult to support moving forward; would be good to + # move this kind of code out of the core and find more portable ways + # to do it masks = [] for _ in range(n_rois): fig = plt.figure() @@ -325,25 +329,12 @@ def extract_components(mov_tot, if method_factorization == 'nmf': nmf = NMF(n_components=n_components, **kwargs) - time_trace = nmf.fit_transform(newm) spatial_filter = nmf.components_ spatial_filter = np.concatenate([np.reshape(sp, (d1, d2))[np.newaxis, :, :] for sp in spatial_filter], axis=0) - - elif method_factorization == 'dict_learn': - import spams - newm = np.asfortranarray(newm, dtype=np.float32) - time_trace = spams.trainDL(newm, K=n_components, mode=0, lambda1=1, posAlpha=True, iter=max_iter_DL) - - spatial_filter = spams.lasso(newm, - D=time_trace, - return_reg_path=False, - lambda1=0.01, - mode=spams.spams_wrap.PENALTY, - pos=True) - - spatial_filter = np.concatenate([np.reshape(sp, (d1, d2))[np.newaxis, :, :] for sp in spatial_filter.toarray()], - axis=0) + else: + # Caiman used to support a method_factorization called dict_learn, implemented using spams.lasso + raise Exception(f"Unknown or unsupported method_factorization: {method_factorization}") time_trace = [np.reshape(ttr, (c, T)).T for ttr in time_trace.T] From b45633ad6f4d612e8daf5bb8d81837216846b988 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 1 May 2024 16:55:45 -0400 Subject: [PATCH 84/86] Add note about commandline demos to the main README --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index 7c850c993..36d57acc6 100644 --- a/README.md +++ b/README.md @@ -62,6 +62,8 @@ The main use cases and notebooks are listed in the following table: A comprehensive list of references, where you can find detailed discussion of the methods and their development, can be found [here](https://caiman.readthedocs.io/en/master/CaImAn_features_and_references.html#references). +# CLI demos +Caiman also provides commandline demos, similar to the notebooks, demonstrating how to work with the codebase outside of Jupyter. They take their configuration primarily from json files (which you will want to modify to work with your data and its specifics) and should be reasonably easy to modify if they don't already do what you want them to do (in particular, saving things; a standard output format for Caiman is something intended for future releases). To run them, activate your environment, and find the demos in demos/general under your caiman data directory; you can run them like you would any other python application, or edit them with your code editor. Each demo comes with a json configuration file that you can customise. There is a README in the demos directory that covers some of this. # How to get help - [Online documentation](https://caiman.readthedocs.io/en/latest/) contains a lot of general information about Caiman, the parameters, how to interpret its outputs, and more. From 8e9ad9fab55d8a4c604057fd7b8136ecf3c8319c Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Wed, 1 May 2024 18:00:55 -0400 Subject: [PATCH 85/86] caimanmanager: don't copy __pycache__ dirs out of the library bundle --- caiman/caimanmanager.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/caiman/caimanmanager.py b/caiman/caimanmanager.py index 457cb5082..c2d57b8c0 100755 --- a/caiman/caimanmanager.py +++ b/caiman/caimanmanager.py @@ -1,7 +1,6 @@ #!/usr/bin/env python import argparse -import distutils.dir_util import filecmp import glob import os @@ -53,20 +52,21 @@ def do_install_to(targdir: str, inplace: bool = False, force: bool = False) -> None: global sourcedir_base + ignore_pycache=shutil.ignore_patterns('__pycache__') if os.path.isdir(targdir) and not force: raise Exception(targdir + " already exists. You may move it out of the way, remove it, or use --force") if not inplace: # In this case we rely on what setup.py put in the share directory for the module if not force: - shutil.copytree(sourcedir_base, targdir) + shutil.copytree(sourcedir_base, targdir, ignore=ignore_pycache) else: - distutils.dir_util.copy_tree(sourcedir_base, targdir) + shutil.copytree(sourcedir_base, targdir, ignore=ignore_pycache, dirs_exist_ok=True) os.makedirs(os.path.join(targdir, 'temp' ), exist_ok=True) else: # here we recreate the other logical path here. Maintenance concern: Keep these reasonably in sync with what's in setup.py for copydir in extra_dirs: if not force: - shutil.copytree(copydir, os.path.join(targdir, copydir)) + shutil.copytree(copydir, os.path.join(targdir, copydir), ignore=ignore_pycache) else: - distutils.dir_util.copy_tree(copydir, os.path.join(targdir, copydir)) + shutil.copytree(copydir, os.path.join(targdir, copydir), ignore=ignore_pycache, dirs_exist_ok=True) os.makedirs(os.path.join(targdir, 'example_movies'), exist_ok=True) os.makedirs(os.path.join(targdir, 'temp' ), exist_ok=True) for stdmovie in standard_movies: From 63c19326acfdecd0845cb12264a60cd8acb02179 Mon Sep 17 00:00:00 2001 From: Pat Gunn Date: Fri, 3 May 2024 11:43:56 -0400 Subject: [PATCH 86/86] VERSION: next release will be 1.11.0 --- VERSION | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/VERSION b/VERSION index 2ee4474a1..1cac385c6 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -1.10.10 +1.11.0