Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion documentation/docs/guide/backproject.md
Original file line number Diff line number Diff line change
Expand Up @@ -38,7 +38,7 @@ The offset of the minimum bounding box is stored in the output tiff's descriptio
- 📁 `Slice Options File` - Path to the `-slice-options.json` file which includes the information needed for backprojection.
- 📁 `Output File Folder` - The folder to save all the resulting files into.
- `Output File Name` - Base name for all output files.
- `Output MIP Level` - The MIP level to output the backprojection in (essentially an upsample option). Use this if you downsampled in the slicing step.
- `Output MIP Level` - The MIP level to output the backprojection in (essentially an upsample option). Use this if you downsampled in the slicing step; downsampling changes are currently ignored.
- `Upsample Order` - The interpolation order Ouroboros uses to interpolate values from a lower MIP level. If you check the binary option, feel free to set this to 0.
- `Backprojection Compression` - The compression option to use for the backprojected tiff(s). Recommended options: `none`, `zlib`, `zstd`.
- `Output Single File` - Whether to output one tiff stack file or a folder of files.
Expand Down
22 changes: 18 additions & 4 deletions python/ouroboros/helpers/files.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import numpy as np
from numpy.typing import ArrayLike
from pathlib import Path
import cv2
from tifffile import TiffWriter, TiffFile
import time

Expand Down Expand Up @@ -218,12 +219,25 @@ def volume_from_intermediates(path: Path, shape: DataShape, thread_count: int =
return vol[0]


def write_conv_vol(writer: callable, source_path, shape, dtype, *args, **kwargs):
def write_conv_vol(writer: callable, source_path, shape, dtype, scaling, target_folder, index, interpolation):
perf = {}
start = time.perf_counter()
vol = volume_from_intermediates(source_path, shape)
perf["Merge Volume"] = time.perf_counter() - start
start = time.perf_counter()
writer(*args, data=np_convert(dtype, vol.reshape(shape.Y, shape.X), normalize=False, safe_bool=True), **kwargs)
perf["Write Merged"] = time.perf_counter() - start
if scaling is not None:
start = time.perf_counter()
# CV2 is only 2D but we're resizing from the 1D image anyway at the moment.
new_volume = cv2.resize(
np_convert(dtype, vol.reshape(shape.Y, shape.X), normalize=False, safe_bool=True),
None, fx=scaling[1], fy=scaling[2], interpolation=interpolation)
perf["Zoom"] = time.perf_counter() - start
start = time.perf_counter()
for i in range(round(scaling[0])):
writer(target_folder.joinpath(f"{(index * round(scaling[0]) + i):05}.tif"), data=new_volume)
perf["Write Merged"] = time.perf_counter() - start
else:
start = time.perf_counter()
writer(target_folder.joinpath(f"{index}"),
data=np_convert(dtype, vol.reshape(shape.Y, shape.X), normalize=False, safe_bool=True))
perf["Write Merged"] = time.perf_counter() - start
return perf
288 changes: 26 additions & 262 deletions python/ouroboros/pipeline/backproject_pipeline.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,16 +12,12 @@
import traceback

import numpy as np
import scipy
import scipy.ndimage

from ouroboros.helpers.memory_usage import (
calculate_chunk_size,
calculate_gigabytes_from_dimensions
)
from ouroboros.helpers.slice import ( # noqa: F401
detect_color_channels_shape,
make_volume_binary,
FrontProjStack,
backproject_box,
BackProjectIter
Expand All @@ -32,11 +28,8 @@
from ouroboros.helpers.options import BackprojectOptions
from ouroboros.helpers.files import (
format_backproject_resave_volume,
format_tiff_name,
get_sorted_tif_files,
join_path,
num_digits_for_n_files,
parse_tiff_name,
generate_tiff_write,
write_conv_vol,
write_small_intermediate
Expand Down Expand Up @@ -163,10 +156,26 @@ def _process(self, input_data: any) -> tuple[any, None] | tuple[None, any]:
micron_resolution=volume_cache.get_resolution_um(),
backprojection_offset=bp_offset)

if pipeline_input.slice_options.output_mip_level != config.output_mip_level:
scaling_factors, _ = calculate_scaling_factors(
pipeline_input.source_url,
pipeline_input.slice_options.output_mip_level,
config.output_mip_level,
write_shape
)
else:
scaling_factors = None
print(f"SF: {scaling_factors}")

# Allocate procs equally between BP math and writing if we're rescaling, otherwise 3-1 favoring
# the BP calculation.
exec_procs = self.num_processes // 4 * (2 if scaling_factors is not None else 3)
write_procs = self.num_processes // 4 * (2 if scaling_factors is not None else 1)

# Process each bounding box in parallel, writing the results to the backprojected volume
try:
with (concurrent.futures.ProcessPoolExecutor((self.num_processes // 4) * 3) as executor,
concurrent.futures.ProcessPoolExecutor(self.num_processes // 4) as write_executor):
with (concurrent.futures.ProcessPoolExecutor(exec_procs) as executor,
concurrent.futures.ProcessPoolExecutor(write_procs) as write_executor):
bp_futures = []
write_futures = []

Expand Down Expand Up @@ -222,10 +231,14 @@ def note_written(write_future):
for index in write:
write_futures.append(write_executor.submit(
write_conv_vol,
tif_write(tifffile.imwrite), i_path.joinpath(f"i_{index:05}"),
tif_write(tifffile.imwrite),
i_path.joinpath(f"i_{index:05}"),
ImgSlice(*write_shape[1:]),
bool if config.make_backprojection_binary else np.uint16,
folder_path.joinpath(f"{index:05}.tif")
scaling_factors,
folder_path,
index,
config.upsample_order
))
write_futures[-1].add_done_callback(note_written)

Expand All @@ -249,45 +262,15 @@ def note_written(write_future):
for fname in get_sorted_tif_files(folder_path):
writer(tifffile.imread(folder_path.joinpath(fname)))

# Rescale the backprojected volume to the output mip level
if pipeline_input.slice_options.output_mip_level != config.output_mip_level:
output_name = f"{folder_path}-temp"

error = rescale_mip_volume(
pipeline_input.source_url,
pipeline_input.slice_options.output_mip_level,
config.output_mip_level,
single_path=(None if config.make_single_file is False else folder_path + ".tif"),
folder_path=(folder_path if config.make_single_file is False else None),
output_name=output_name,
compression=config.backprojection_compression,
max_ram_gb=config.max_ram_gb,
order=config.upsample_order,
binary=config.make_backprojection_binary,
)

if error is not None:
return error

# Remove the original backprojected volume
if config.make_single_file:
os.remove(folder_path + ".tif")
else:
shutil.rmtree(folder_path)

# Rename the rescaled volume
if config.make_single_file:
os.rename(output_name + ".tif", folder_path + ".tif")
else:
os.rename(output_name, folder_path)

# Update the pipeline input with the output file path
pipeline_input.backprojected_folder_path = folder_path

self.add_timing("export", time.perf_counter() - start)

if config.make_single_file:
shutil.rmtree(folder_path)
shutil.rmtree(Path(config.output_file_folder,
f"{config.output_file_name}_t_{'_'.join(map(str, full_bounding_box.get_min(np.uint32)))}"))

return None

Expand Down Expand Up @@ -387,225 +370,6 @@ def write_z(i, z_slice):
return durations, index, z_stack + offset[0]


def rescale_mip_volume(
source_url: str,
current_mip: int,
target_mip: int,
single_path: str = None,
folder_path: str = None,
output_name: str = "out",
compression: str = None,
max_ram_gb: int = 0,
order: int = 2,
binary: bool = False,
) -> str | None:
"""
Rescale the volume to the mip level.

Parameters
----------
source_url : str
The URL of the volume.
current_mip : int
The current mip level of the volume.
target_mip : int
The target mip level of the volume.
single_path : str
The path to the single tif file.
folder_path : str
The path to the folder containing the tif files.
output_name : str
The path to the output.
compression : str, optional
The compression to use for the resulting tif file.
The default is None.
max_ram_gb : int, optional
The maximum amount of RAM to use in GB.
The default is 0.
order : int, optional
The order of the interpolation.
The default is 2.
binary : bool, optional
Whether to make the backprojected volume binary.

Returns
-------
str | None
Error message if an error occurred.
"""

if single_path is None and folder_path is None:
return "Either single_path or folder_path must be provided."

if target_mip == current_mip:
return None

if single_path is not None:
return rescale_single_tif(
source_url,
current_mip,
target_mip,
single_path,
compression=compression,
file_name=output_name + ".tif",
max_ram_gb=max_ram_gb,
order=order,
binary=binary,
)

return rescale_folder_tif(
source_url,
current_mip,
target_mip,
folder_path,
compression=compression,
folder_name=output_name,
max_ram_gb=max_ram_gb,
order=order,
binary=binary,
)


def rescale_single_tif(
source_url: str,
current_mip: int,
target_mip: int,
single_path: str,
file_name: str = "out.tif",
compression: str = None,
max_ram_gb: int = 0,
order: int = 1,
binary: bool = False,
) -> str | None:
with tifffile.TiffFile(single_path) as tif:
tif_shape = (len(tif.pages),) + tif.pages[0].shape

scaling_factors, _ = calculate_scaling_factors(
source_url, current_mip, target_mip, tif_shape
)

# Calculate the output tiff shape
output_shape = tuple(
int(tif_shape[i] * scaling_factors[i]) for i in range(len(tif_shape))
)

# Note: The chunk size is divided by the scaling factor to account for the
# number of slices that need to be loaded to produce chunk_size slices in the output volume
chunk_size = max(
int(
calculate_chunk_size(
output_shape, tif.pages[0].dtype, max_ram_gb=max_ram_gb
)
/ scaling_factors[0]
),
1,
)

with tifffile.TiffWriter(file_name) as output_volume:
for i in range(0, tif_shape[0], chunk_size):
# Stack the tif layers along the first axis (chunk_size)
tif_layer = np.array(
[
tif.pages[j].asarray()
for j in range(i, min(i + chunk_size, tif_shape[0]))
]
)

layers = scipy.ndimage.zoom(tif_layer, scaling_factors, order=order)

if binary:
layers = make_volume_binary(layers)

size = layers.shape[0]

# Save the layers to the tif file
for j in range(size):
output_volume.write(
layers[j],
contiguous=compression is None or compression == "none",
compression=compression,
software="ouroboros",
)

return None


def rescale_folder_tif(
source_url: str,
current_mip: int,
target_mip: int,
folder_path: str,
folder_name: str = "out",
compression: str = None,
max_ram_gb: int = 0,
order: int = 1,
binary: bool = False,
) -> str | None:
# Create output folder if it doesn't exist
output_folder = folder_name
os.makedirs(output_folder, exist_ok=True)

tifs = get_sorted_tif_files(folder_path)

if len(tifs) == 0:
return "No tif files found in the folder."

sample_tif = tifffile.imread(join_path(folder_path, tifs[0]))

# Determine the shape of the tif stack
new_shape = (len(tifs), *sample_tif.shape)

scaling_factors, resolution_factors = calculate_scaling_factors(
source_url, current_mip, target_mip, new_shape
)

# Note: The chunk size is divided by the scaling factor to account for the
# number of slices that need to be loaded to produce chunk_size slices in the output volume
chunk_size = max(
int(
calculate_chunk_size(new_shape, sample_tif.dtype, max_ram_gb=max_ram_gb)
/ scaling_factors[0]
),
1,
)

num_digits = num_digits_for_n_files(len(tifs))

first_index = parse_tiff_name(tifs[0])

output_index = int(first_index * resolution_factors[0])

# Resize the volume
for i in range(0, len(tifs), chunk_size):
# Stack the tif layers along the first axis (chunk_size)
tif = np.array(
[
tifffile.imread(join_path(folder_path, tifs[j]))
for j in range(i, min(i + chunk_size, len(tifs)))
]
)

layers = scipy.ndimage.zoom(tif, scaling_factors, order=order)

if binary:
layers = make_volume_binary(layers)

size = layers.shape[0]

# Write the layers to new tif files
for j in range(size):
tifffile.imwrite(
join_path(output_folder, format_tiff_name(output_index, num_digits)),
layers[j],
contiguous=True if compression is None else False,
compression=compression,
software="ouroboros",
)
output_index += 1

return None


def calculate_scaling_factors(
source_url: str, current_mip: int, target_mip: int, tif_shape: tuple
) -> tuple[tuple, tuple]:
Expand Down
Loading