Skip to content

Commit

Permalink
Merge pull request #240 from scottclowe/enh_extra-verbosity-level
Browse files Browse the repository at this point in the history
ENH: New verbosity=2: argument description and progress bars
  • Loading branch information
scottclowe committed Jul 12, 2021
2 parents 4179f2a + 7c4bcbc commit de1dfc0
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 24 deletions.
16 changes: 8 additions & 8 deletions fissa/core.py
Expand Up @@ -936,13 +936,13 @@ def separation_prep(self, redo=False):
)
msg += "\n Images:"
for image in self.images:
if self.verbosity >= 3 or isinstance(image, basestring):
if self.verbosity >= 4 or isinstance(image, basestring):
msg += "\n {}".format(image)
else:
msg += "\n {}".format(image.__class__)
msg += "\n ROI sets:"
for roiset in self.rois:
if self.verbosity >= 3 or isinstance(roiset, basestring):
if self.verbosity >= 4 or isinstance(roiset, basestring):
msg += "\n {}".format(roiset)
else:
msg += "\n {}".format(roiset.__class__)
Expand All @@ -957,12 +957,12 @@ def separation_prep(self, redo=False):
nRegions=self.nRegions,
expansion=self.expansion,
datahandler=self.datahandler,
verbosity=self.verbosity - 1,
verbosity=self.verbosity - 2,
total=n_trial,
)

# check whether we should show progress bars
disable_progressbars = self.verbosity != 1
disable_progressbars = self.verbosity <= 0 or 3 <= self.verbosity

# Check how many workers to spawn.
# Map the behaviour of ncores=None to one job per CPU core, like for
Expand All @@ -984,7 +984,7 @@ def separation_prep(self, redo=False):
else:
# Use multiprocessing
outputs = Parallel(
n_jobs=n_jobs, backend="threading", verbose=max(0, self.verbosity - 4)
n_jobs=n_jobs, backend="threading", verbose=max(0, self.verbosity - 5)
)(
delayed(_extract_cfg)(image, rois, label=i)
for i, (image, rois) in tqdm(
Expand Down Expand Up @@ -1154,12 +1154,12 @@ def separate(self, redo_prep=False, redo_sep=False):
tol=self.tol,
max_tries=self.max_tries,
method=self.method,
verbosity=self.verbosity - 1,
verbosity=self.verbosity - 2,
total=n_roi,
)

# check whether we should show progress bars
disable_progressbars = self.verbosity != 1
disable_progressbars = self.verbosity <= 0 or 3 <= self.verbosity

# Check how many workers to spawn.
# Map the behaviour of ncores=None to one job per CPU core, like for
Expand All @@ -1182,7 +1182,7 @@ def separate(self, redo_prep=False, redo_sep=False):
else:
# Use multiprocessing
outputs = Parallel(
n_jobs=n_jobs, backend="threading", verbose=max(0, self.verbosity - 4)
n_jobs=n_jobs, backend="threading", verbose=max(0, self.verbosity - 5)
)(
delayed(_separate_cfg)(X, label=i)
for i, X in tqdm(
Expand Down
49 changes: 33 additions & 16 deletions fissa/tests/test_core.py
Expand Up @@ -375,6 +375,23 @@ def test_verbosity_1(self):
self.assertTrue("{0}/{0}".format(len(self.image_names)) in capture_post.err)
self.assertTrue("{0}/{0}".format(len(self.roi_paths)) in capture_post.err)
self.assertFalse("Doing region growing and data extraction" in capture_post.out)
self.assertFalse("nRegions: " in capture_post.out)
self.assertFalse("method: " in capture_post.out)
self.compare_output(exp)

def test_verbosity_2(self):
capture_pre = self.capsys.readouterr() # Clear stdout
exp = core.Experiment(self.images_dir, self.roi_zip_path, verbosity=2)
exp.separate()
capture_post = self.recapsys(capture_pre) # Capture and then re-output
self.assertTrue("Finished separating" in capture_post.out)
self.assertTrue("Extracting traces: 100%" in capture_post.err)
self.assertTrue("Separating data: 100%" in capture_post.err)
self.assertTrue("{0}/{0}".format(len(self.image_names)) in capture_post.err)
self.assertTrue("{0}/{0}".format(len(self.roi_paths)) in capture_post.err)
self.assertTrue("Doing region growing and data extraction" in capture_post.out)
self.assertTrue("nRegions: " in capture_post.out)
self.assertTrue("method: " in capture_post.out)
self.assertFalse(
"[Extraction 1/{}]".format(len(self.image_names)) in capture_post.out
)
Expand All @@ -383,14 +400,14 @@ def test_verbosity_1(self):
)
self.compare_output(exp)

def test_verbosity_2(self):
def test_verbosity_3(self):
capture_pre = self.capsys.readouterr() # Clear stdout
exp = core.Experiment(
self.images_dir,
self.roi_zip_path,
ncores_preparation=1,
ncores_separation=1,
verbosity=2,
verbosity=3,
)
exp.separate()
capture_post = self.recapsys(capture_pre) # Capture and then re-output
Expand All @@ -405,14 +422,14 @@ def test_verbosity_2(self):
self.assertFalse("] Signal separation finished" in capture_post.out)
self.compare_output(exp)

def test_verbosity_3(self):
def test_verbosity_4(self):
capture_pre = self.capsys.readouterr() # Clear stdout
exp = core.Experiment(
self.images_dir,
self.roi_zip_path,
ncores_preparation=1,
ncores_separation=1,
verbosity=3,
verbosity=4,
)
exp.separate()
capture_post = self.recapsys(capture_pre) # Capture and then re-outputs
Expand All @@ -422,54 +439,54 @@ def test_verbosity_3(self):
self.assertFalse("NMF converged after" in capture_post.out)
self.compare_output(exp)

def test_verbosity_4(self):
def test_verbosity_5(self):
capture_pre = self.capsys.readouterr() # Clear stdout
exp = core.Experiment(
self.images_dir,
self.roi_zip_path,
ncores_preparation=1,
ncores_separation=1,
verbosity=4,
verbosity=5,
)
exp.separate()
capture_post = self.recapsys(capture_pre) # Capture and then re-outputs
self.assertTrue("Loading image" in capture_post.out)
self.assertTrue("NMF converged after" in capture_post.out)
self.compare_output(exp)

def test_verbosity_2_imagesloaded(self):
def test_verbosity_3_imagesloaded(self):
# Load images as np.ndarrays
image_paths = [os.path.join(self.images_dir, img) for img in self.image_names]
datahandler = extraction.DataHandlerTifffile()
images = [datahandler.image2array(pth) for pth in image_paths]
# Run FISSA on pre-loaded images with verbosity=2
# Run FISSA on pre-loaded images
capture_pre = self.capsys.readouterr() # Clear stdout
exp = core.Experiment(
images,
self.roi_zip_path,
ncores_preparation=1,
ncores_separation=1,
verbosity=2,
verbosity=3,
)
exp.separation_prep()
capture_post = self.recapsys(capture_pre) # Capture and then re-output
# Check FISSA lists the arguments are arrays
self.assertTrue("numpy.ndarray" in capture_post.out)
self.compare_output(exp, separated=False)

def test_verbosity_3_imagesloaded(self):
def test_verbosity_4_imagesloaded(self):
# Load images as np.ndarrays
image_paths = [os.path.join(self.images_dir, img) for img in self.image_names]
datahandler = extraction.DataHandlerTifffile()
images = [datahandler.image2array(pth) for pth in image_paths]
# Run FISSA on pre-loaded images with verbosity=2
# Run FISSA on pre-loaded images
capture_pre = self.capsys.readouterr() # Clear stdout
exp = core.Experiment(
images,
self.roi_zip_path,
ncores_preparation=1,
ncores_separation=1,
verbosity=3,
verbosity=4,
)
exp.separation_prep()
capture_post = self.recapsys(capture_pre) # Capture and then re-output
Expand Down Expand Up @@ -652,7 +669,7 @@ def test_prepfirst(self):
def test_redo(self):
"""Test whether experiment redoes work when requested."""
exp = core.Experiment(
self.images_dir, self.roi_zip_path, self.output_dir, verbosity=2
self.images_dir, self.roi_zip_path, self.output_dir, verbosity=3
)
capture_pre = self.capsys.readouterr() # Clear stdout
exp.separate()
Expand Down Expand Up @@ -705,7 +722,7 @@ def test_load_cache_piecemeal(self):
image_path = self.images_dir
roi_path = self.roi_zip_path
# Run an experiment to generate the cache
exp1 = core.Experiment(image_path, roi_path, self.output_dir, verbosity=2)
exp1 = core.Experiment(image_path, roi_path, self.output_dir, verbosity=3)
exp1.separate()
# Make a new experiment we will test; this should load the cache
capture_pre = self.capsys.readouterr() # Clear stdout
Expand Down Expand Up @@ -736,7 +753,7 @@ def test_load_cached_prep(self):
exp1.separation_prep()
# Make a new experiment we will test; this should load the cache
capture_pre = self.capsys.readouterr() # Clear stdout
exp = core.Experiment(image_path, roi_path, self.output_dir, verbosity=2)
exp = core.Experiment(image_path, roi_path, self.output_dir, verbosity=3)
capture_post = self.recapsys(capture_pre) # Capture and then re-output
self.assertTrue("Reloading data" in capture_post.out)
# Ensure previous cache is loaded again when we run separation_prep
Expand Down Expand Up @@ -898,7 +915,7 @@ def test_badprepcache(self):
roi_path = self.roi_zip_path
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
exp = core.Experiment(image_path, roi_path, self.output_dir, verbosity=2)
exp = core.Experiment(image_path, roi_path, self.output_dir, verbosity=3)
# Make a bad cache
with open(os.path.join(self.output_dir, "preparation.npz"), "w") as f:
f.write("badfilecontents")
Expand Down

0 comments on commit de1dfc0

Please sign in to comment.