diff --git a/napari/layers/base/base.py b/napari/layers/base/base.py index 54d54045c8f..cd3d36dc9d7 100644 --- a/napari/layers/base/base.py +++ b/napari/layers/base/base.py @@ -869,9 +869,7 @@ def thumbnail(self, thumbnail): if 0 in thumbnail.shape: thumbnail = np.zeros(self._thumbnail_shape, dtype=np.uint8) if thumbnail.dtype != np.uint8: - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - thumbnail = convert_to_uint8(thumbnail) + thumbnail = convert_to_uint8(thumbnail) padding_needed = np.subtract(self._thumbnail_shape, thumbnail.shape) pad_amounts = [(p // 2, (p + 1) // 2) for p in padding_needed] diff --git a/napari/layers/image/image.py b/napari/layers/image/image.py index bc69f117304..3806628b481 100644 --- a/napari/layers/image/image.py +++ b/napari/layers/image/image.py @@ -901,12 +901,9 @@ def _update_thumbnail(self): ) zoom_factor = tuple(new_shape / image.shape[:2]) if self.rgb: - # warning filter can be removed with scipy 1.4 - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - downsampled = ndi.zoom( - image, zoom_factor + (1,), prefilter=False, order=0 - ) + downsampled = ndi.zoom( + image, zoom_factor + (1,), prefilter=False, order=0 + ) if image.shape[2] == 4: # image is RGBA colormapped = np.copy(downsampled) colormapped[..., 3] = downsampled[..., 3] * self.opacity @@ -923,12 +920,9 @@ def _update_thumbnail(self): alpha = np.full(downsampled.shape[:2] + (1,), self.opacity) colormapped = np.concatenate([downsampled, alpha], axis=2) else: - # warning filter can be removed with scipy 1.4 - with warnings.catch_warnings(): - warnings.simplefilter("ignore") - downsampled = ndi.zoom( - image, zoom_factor, prefilter=False, order=0 - ) + downsampled = ndi.zoom( + image, zoom_factor, prefilter=False, order=0 + ) low, high = self.contrast_limits downsampled = np.clip(downsampled, low, high) color_range = high - low diff --git a/napari/layers/utils/layer_utils.py b/napari/layers/utils/layer_utils.py index 4fb62832324..2935854abe3 100644 --- a/napari/layers/utils/layer_utils.py +++ b/napari/layers/utils/layer_utils.py @@ -278,18 +278,20 @@ def segment_normal(a, b, p=(0, 0, 1)): def convert_to_uint8(data: np.ndarray) -> np.ndarray: """ - Convert array content to uint8. + Convert array content to uint8, always returning a copy. - If all negative values are changed on 0. + Based on skimage.util.dtype._convert but limited to an output type uint8, + so should be equivalent to skimage.util.dtype.img_as_ubyte. - If values are integer and bellow 256 it is simple casting otherwise maximum value for this data type is picked - and values are scaled by 255/maximum type value. + If all negative, values are clipped to 0. - Binary images ar converted to [0,255] images. + If values are integers and below 256, this simply casts. + Otherwise the maximum value for the input data type is determined and + output values are proportionally scaled by this value. - float images are multiply by 255 and then casted to uint8. + Binary images are converted so that False -> 0, True -> 255. - Based on skimage.util.dtype.convert but limited to output type uint8 + Float images are multiplied by 255 and then cast to uint8. """ out_dtype = np.dtype(np.uint8) out_max = np.iinfo(out_dtype).max @@ -302,6 +304,7 @@ def convert_to_uint8(data: np.ndarray) -> np.ndarray: image_out = np.multiply(data, out_max, dtype=data.dtype) np.rint(image_out, out=image_out) np.clip(image_out, 0, out_max, out=image_out) + image_out = np.nan_to_num(image_out, copy=False) return image_out.astype(out_dtype) if in_kind in "ui":