diff --git a/docs/examples/livestream_napari.py b/docs/examples/livestream_napari.py index ef4b1f9e..0b8d461f 100644 --- a/docs/examples/livestream_napari.py +++ b/docs/examples/livestream_napari.py @@ -64,11 +64,12 @@ def is_not_done() -> bool: def next_frame(): #-> Optional[npt.NDArray[Any]]: """Get the next frame from the current stream.""" if nframes[stream_id] < config.video[stream_id].max_frame_count: - if packet := runtime.get_available_data(stream_id): - n = packet.get_frame_count() - nframes[stream_id] += n - f = next(packet.frames()) - return f.data().squeeze().copy() + with runtime.get_available_data(stream_id) as data: + if packet := data: + n = packet.get_frame_count() + nframes[stream_id] += n + f = next(packet.frames()) + return f.data().squeeze().copy() return None stream = 1 diff --git a/docs/examples/sample_props.json b/docs/examples/sample_props.json index 69ae862f..abef590f 100644 --- a/docs/examples/sample_props.json +++ b/docs/examples/sample_props.json @@ -1,200 +1,186 @@ { - "video": [ - { - "camera": { - "identifier": { - "id": [ - 0, - 1 - ], - "kind": "Camera", - "name": "simulated: radial sin" - }, - "settings": { - "binning": 1, - "exposure_time_us": 0.0, - "input_triggers": { - "acquisition_start": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 - }, - "exposure": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 - }, - "frame_start": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 - } - }, - "line_interval_us": 0.0, - "offset": [ - 0, - 0 - ], - "output_triggers": { - "exposure": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 + "video": [ + { + "camera": { + "identifier": { + "id": [ + 0, + 1 + ], + "kind": "Camera", + "name": "simulated: radial sin" + }, + "settings": { + "binning": 1, + "exposure_time_us": 0.0, + "input_triggers": { + "acquisition_start": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + }, + "exposure": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + }, + "frame_start": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + } + }, + "line_interval_us": 0.0, + "offset": [ + 0, + 0 + ], + "output_triggers": { + "exposure": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + }, + "frame_start": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + }, + "trigger_wait": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + } + }, + "pixel_type": "U16", + "readout_direction": "Forward", + "shape": [ + 1, + 1 + ] + } }, - "frame_start": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 - }, - "trigger_wait": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 - } - }, - "pixel_type": "U16", - "readout_direction": "Forward", - "shape": [ - 1, - 1 - ] - } - }, - "frame_average_count": 0, - "max_frame_count": 18446744073709551615, - "storage": { - "identifier": { - "id": [ - 0, - 0 - ], - "kind": "NONE", - "name": "" - }, - "settings": { - "chunking": { - "max_bytes_per_chunk": 16777216, - "tile": { - "height": 0, - "planes": 0, - "width": 0 + "frame_average_count": 0, + "max_frame_count": 18446744073709551615, + "storage": { + "identifier": { + "id": [ + 0, + 5 + ], + "kind": "Storage", + "name": "trash" + }, + "settings": { + "acquisition_dimensions": [], + "enable_multiscale": false, + "external_metadata_json": "", + "filename": "", + "first_frame_id": 0, + "pixel_scale_um": [ + 0.0, + 0.0 + ] + }, + "write_delay_ms": 0.0 } - }, - "enable_multiscale": false, - "external_metadata_json": "", - "filename": "", - "first_frame_id": 0, - "pixel_scale_um": [ - 0.0, - 0.0 - ] }, - "write_delay_ms": 0.0 - } - }, - { - "camera": { - "identifier": { - "id": [ - 0, - 0 - ], - "kind": "NONE", - "name": "" - }, - "settings": { - "binning": 1, - "exposure_time_us": 0.0, - "input_triggers": { - "acquisition_start": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 - }, - "exposure": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 - }, - "frame_start": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 - } - }, - "line_interval_us": 0.0, - "offset": [ - 0, - 0 - ], - "output_triggers": { - "exposure": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 + { + "camera": { + "identifier": { + "id": [ + 0, + 0 + ], + "kind": "NONE", + "name": "" + }, + "settings": { + "binning": 1, + "exposure_time_us": 0.0, + "input_triggers": { + "acquisition_start": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + }, + "exposure": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + }, + "frame_start": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + } + }, + "line_interval_us": 0.0, + "offset": [ + 0, + 0 + ], + "output_triggers": { + "exposure": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + }, + "frame_start": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + }, + "trigger_wait": { + "edge": "Rising", + "enable": false, + "kind": "Input", + "line": 0 + } + }, + "pixel_type": "U16", + "readout_direction": "Forward", + "shape": [ + 0, + 0 + ] + } }, - "frame_start": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 - }, - "trigger_wait": { - "edge": "Rising", - "enable": false, - "kind": "Input", - "line": 0 + "frame_average_count": 0, + "max_frame_count": 18446744073709551615, + "storage": { + "identifier": { + "id": [ + 0, + 0 + ], + "kind": "NONE", + "name": "" + }, + "settings": { + "acquisition_dimensions": [], + "enable_multiscale": false, + "external_metadata_json": "", + "filename": "", + "first_frame_id": 0, + "pixel_scale_um": [ + 0.0, + 0.0 + ] + }, + "write_delay_ms": 0.0 } - }, - "pixel_type": "U16", - "readout_direction": "Forward", - "shape": [ - 0, - 0 - ] } - }, - "frame_average_count": 0, - "max_frame_count": 18446744073709551615, - "storage": { - "identifier": { - "id": [ - 0, - 0 - ], - "kind": "NONE", - "name": "" - }, - "settings": { - "chunking": { - "max_bytes_per_chunk": 16777216, - "tile": { - "height": 0, - "planes": 0, - "width": 0 - } - }, - "enable_multiscale": false, - "external_metadata_json": "", - "filename": "", - "first_frame_id": 0, - "pixel_scale_um": [ - 0.0, - 0.0 - ] - }, - "write_delay_ms": 0.0 - } - } - ] + ] } diff --git a/docs/get_started.md b/docs/get_started.md index f700c7fe..eba7d64b 100644 --- a/docs/get_started.md +++ b/docs/get_started.md @@ -49,31 +49,27 @@ Acquire also supports the following output file formats: - [Tiff](https://en.wikipedia.org/wiki/TIFF) - [Zarr](https://zarr.dev/) -For testing and demonstration purposes, Acquire provides a few simulated cameras, as well as raw and trash output devices. -To see all the devices that Acquire supports, you can run the following script: - -```python -import acquire - -for device in acquire.Runtime().device_manager().devices(): - print(device) -``` +Acquire also provides a few simulated cameras, as well as raw byte storage and "trash," which discards all data written to it. ## Tutorial Prerequisites -We will be writing to and reading from the [Zarr format](https://zarr.readthedocs.io/en/stable/), using the [Dask library](https://www.dask.org/) to load and inspect the data, and visualizing the data using [napari](https://napari.org/stable/). +We will be streaming to [TIFF](http://bigtiff.org/), using [scikit-image](https://scikit-image.org/) to load and inspect the data, and visualizing the data using [napari](https://napari.org/stable/). -You can install these prerequisites with: +You can install the prerequisites with: ``` -python -m pip install dask "napari[all]" zarr +python -m pip install "napari[all]" scikit-image ``` ## Setup for Acquisition -We will use one of Acquire's simulated cameras to generate data and use Zarr for our output file format, which is called "storage device" in `Acquire`. +In Acquire parlance, the combination of a source (camera), filter, and sink (output) is called a **video stream**. +We will generate data using simulated cameras (our source) and output to TIFF on the filesystem (our sink). +(For this tutorial, we will not use a filter.) +Acquire supports up to two such video streams. -To begin, instantiate `Runtime` and `DeviceManager` and list the currently supported devices. +Sources are implemented as **Camera** devices, and sinks are implemented as **Storage** devices. +We'll start by seeing all the devices that Acquire supports: ```python import acquire @@ -84,22 +80,23 @@ dm = runtime.device_manager() for device in dm.devices(): print(device) ``` + The **runtime** is the main entry point in Acquire. Through the runtime, you configure your devices, start acquisition, check acquisition status, inspect data as it streams from your cameras, and terminate acquisition. Let's configure our devices now. To do this, we'll get a copy of the current runtime configuration. -We can update the configuration with identifiers from the the runtime's **device manager**, but these devices won't instantiate until we start acquisition. +We can update the configuration with identifiers from the runtime's **device manager**, but these devices won't be created until we start the acquisition. -Acquire supports up to two video streams. -These streams consist of a **source** (i.e., a camera), optionally a **filter**, and a **sink** (an output, like a Zarr dataset or a Tiff file). Before configuring the streams, grab the current configuration of the `Runtime` object with: ```python config = runtime.get_configuration() ``` -Video streams are configured independently. Configure the first video stream by setting properties on `config.video[0]` and the second video stream with `config.video[1]`. We'll be using simulated cameras, one generating a radial sine pattern and one generating a random pattern. +Video streams are configured independently. +Configure the first video stream by setting properties on `config.video[0]` and the second video stream with `config.video[1]`. +We'll be using simulated cameras, one generating a radial sine pattern and one generating a random pattern. ```python config.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, "simulated: radial sin") @@ -137,34 +134,28 @@ config.video[1].camera.settings.shape = (1280, 720) ``` Now we'll configure each output, or sink device. -For both simulated cameras, we'll be writing to Zarr, a format which supports chunked arrays. - +For both simulated cameras, we'll be writing to [TIFF](http://bigtiff.org/), a well-known format for storing image data. +For now, we'll simply specify the output file name. ```python -config.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, "Zarr") +config.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, "Tiff") # what file or directory to write the data to -config.video[0].storage.settings.filename = "output1.zarr" - -# where applicable, how large should a chunk file get before opening the next chunk file -config.video[0].storage.settings.chunking.max_bytes_per_chunk = 32 * 2**20 # 32 MiB chunk sizes +config.video[0].storage.settings.filename = "output1.tif" ``` ```python -config.video[1].storage.identifier = dm.select(acquire.DeviceKind.Storage, "Zarr") +config.video[1].storage.identifier = dm.select(acquire.DeviceKind.Storage, "Tiff") # what file or directory to write the data to -config.video[1].storage.settings.filename = "output2.zarr" - -# where applicable, how large should a chunk file get before opening the next chunk file -config.video[1].storage.settings.chunking.max_bytes_per_chunk = 64 * 2**20 # 64 MiB chunk sizes +config.video[1].storage.settings.filename = "output2.tif" ``` Finally, let's specify how many frames to generate for each camera before stopping our simulated acquisition. We also need to register our configuration with the runtime using the `set_configuration` method. -If you want to let the runtime just keep acquiring effectively forever, you can set `max_frame_count` to `2**64 - 1`. +If you want to let the runtime acquire effectively forever, you can set `max_frame_count` to `2**64 - 1`. ```python config.video[0].max_frame_count = 100 # collect 100 frames @@ -178,19 +169,15 @@ config = runtime.set_configuration(config) If you run this tutorial multiple times, you can clear output from previous runs with: ```python - import os - import shutil - - if config.video[0].storage.settings.filename in os.listdir("."): - shutil.rmtree(config.video[0].storage.settings.filename) + from pathlib import Path - if config.video[1].storage.settings.filename in os.listdir("."): - shutil.rmtree(config.video[1].storage.settings.filename) + Path(config.video[0].storage.settings.uri).unlink(missing_ok=True) + Path(config.video[1].storage.settings.uri).unlink(missing_ok=True) ``` ## Acquire Data -To start aquiring data: +To start acquiring data: ```python runtime.start() @@ -198,7 +185,6 @@ runtime.start() Acquisition happens in a separate thread, so at any point we can check on the status by calling the `get_state` method. - ```python runtime.get_state() ``` @@ -210,19 +196,17 @@ This method will wait until you've reached the number of frames to collect speci runtime.stop() ``` -## Visualizing the Data with napari +## Visualizing the data with napari Let's take a look at what we've written. We'll load each Zarr dataset as a Dask array and inspect its dimensions, then we'll use napari to view it. ```python -import dask.array as da +from skimage.io import imread import napari -data1 = da.from_zarr(config.video[0].storage.settings.filename, component="0") -data1 - -data2 = da.from_zarr(config.video[1].storage.settings.filename, component="0") +data1 = imread(config.video[0].storage.settings.filename) +data2 = imread(config.video[1].storage.settings.filename) viewer1 = napari.view_image(data1) @@ -232,3 +216,8 @@ viewer2 = napari.view_image(data2) ## Conclusion For more examples of using Acquire, check out our [tutorials page](tutorials/index.md). + +References: +[Tiff]: https://en.wikipedia.org/wiki/TIFF +[scikit-image]: https://scikit-image.org/ +[napari]: https://napari.org/stable/ diff --git a/docs/tutorials/access_data/framedata.md b/docs/tutorials/access_data/framedata.md index 23040dfa..129bce7c 100644 --- a/docs/tutorials/access_data/framedata.md +++ b/docs/tutorials/access_data/framedata.md @@ -27,6 +27,7 @@ config.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, "Tras # Set the time for collecting data for a each frame config.video[0].camera.settings.exposure_time_us = 5e4 # 50 ms +# Set the shape of the region of interest on the camera chip config.video[0].camera.settings.shape = (1024, 768) # Set the max frame count to 2**(64-1) the largest number supported by Uint64 for essentially infinite acquisition @@ -37,8 +38,13 @@ config = runtime.set_configuration(config) ``` ## Working with `AvailableData` objects -During Acquisition, the `AvailableData` object is the streaming interface, and this class has a `frames` method which iterates over the `VideoFrame` objects in `AvailableData`. Once we start acquisition, we'll utilize this iterator method to list the frames. To increase the likelihood of `AvailableData` containing data, we'll utilize the time python package to introduce a delay before we create our `AvailableData` object +During Acquisition, the `AvailableData` object is the streaming interface. We can create an `AvailableData` object by calling `get_available_data` in a `with` statement, and work with the `AvailableData` object while it exists inside of the `with` loop. The data is invalidated after exiting the `with` block, so make a copy of the `AvailableData` object to work with the data outside of the `with` block. In this example, we'll simply use the `AvailableData` object inside of the `with` block. +There may not be data available. To increase the likelihood of `AvailableData` containing data, we'll utilize the `time` python package to introduce a delay before we create our `AvailableData` object. + +If there is data, we'll use the `AvailableData` `frames` method, which iterates over the `VideoFrame` objects in `AvailableData`, and the python `list` method to create a variable `video_frames`, a list of the `VideoFrame` objects one for each stream. + +`VideoFrame` has a `data` method which provides the frame as an `NDArray`. The shape of this NDArray corresponds to the image dimensions used internally by Acquire namely [planes, height, width, channels]. Since we have a single channel, both the first and the last dimensions will be 1. The interior dimensions are height and width, respectively. We can use the `numpy.squeeze` method to grab the desired NDArray image data since the other dimensions are 1. This is equivalent to `image = first_frame[0][:, :, 0]`. ```python # package for introducing time delays @@ -52,65 +58,36 @@ time.sleep(0.5) # grab the packet of data available on disk for video stream 0. # This is an AvailableData object. -available_data = runtime.get_available_data(0) -``` - -There may not be data available, in which case our variable `available_data` would be `None`. To avoid errors associated with this circumstance, we'll only grab data if `available_data` is not `None`. - -Once `get_available_data()` is called the `AvailableData` object will be locked into memory, so the circular buffer that stores the available data will overflow if `AvailableData` isn’t released, so we'll delete the object with `del available_data` if there is no data available. - - -```python -# NoneType if there is no available data. -# We can only grab frames if data is available. -if available_data is not None: - - - # frames is an iterator over available_data - # we'll use this iterator to make a list of the frames - video_frames = list(available_data.frames()) - -else: - # delete the available_data variable - # if there is no data in the packet to free up RAM - del available_data +with runtime.get_available_data(0) as available_data: + + # NoneType if there is no available data. + # We can only grab frames if data is available. + if available_data.get_frame_count() > 0: + + # frames is an iterator over available_data + # we'll use this iterator to make a list of the frames + video_frames = list(available_data.frames()) + + # grab the first VideoStream object in frames and convert it to an NDArray + first_frame = video_frames[0].data() + + #inspect the dimensions of the first_frame + print(first_frame.shape) + + # Selecting the image data. Equivalent to image = first_frame[0][:, :, 0] + image = first_frame.squeeze() + + # inspect the dimensions of the squeezed first_frame + print(image.shape) +# stop runtime +runtime.stop() ``` -`video_frames` is a list with each element being an instance of the `VideoFrame` class. `VideoFrame` has a `data` method which provides the frame as an `NDArray`. The shape of this NDArray corresponds to the image dimensions used internally by Acquire with (planes, height, width, channels). Since we have a single channel, both the first and the last dimensions will be 1. The interior dimensions are height and width, respectively. - - -```python -# grab the first VideoStream object in frames and convert it to an NDArray -first_frame = video_frames[0].data() -print(first_frame.shape) -``` -Output: +The output will be: ``` (1, 768, 1024, 1) -``` - -We can use the `numpy.squeeze` method to grab the desired NDArray image data from `first_frame` since the other dimensions are 1. This is equivalent to `image = first_frame[0][:, :, 0]`. - -```python -image = first_frame.squeeze() - - -print(image.shape) -``` -Output: -``` (768, 1024) ``` -Finally, delete the `available_data` to unlock the region in the circular buffer. - - -```python -# delete the available_data to free up disk space -del available_data - -# stop runtime -runtime.stop() -``` [Download this tutorial as a Python script](framedata.py){ .md-button .md-button-center } diff --git a/docs/tutorials/access_data/index.md b/docs/tutorials/access_data/index.md index 5fad37b4..e4cd7020 100644 --- a/docs/tutorials/access_data/index.md +++ b/docs/tutorials/access_data/index.md @@ -5,3 +5,6 @@ Please [submit an issue on GitHub](https://github.com/acquire-project/acquire-do if you'd like to request a tutorial. If you are also interested in contributing to this documentation, please visit our [contribution guide](https://acquire-project.github.io/acquire-docs/dev/for_contributors/). + +- [Accessing Data during Acquisition](./framedata.md) +- [Livestream to napari](./livestream.md) diff --git a/docs/tutorials/setup_acquisition/configure.md b/docs/tutorials/setup_acquisition/configure.md index dd084bd6..88e9c136 100644 --- a/docs/tutorials/setup_acquisition/configure.md +++ b/docs/tutorials/setup_acquisition/configure.md @@ -58,14 +58,14 @@ config.video[0].camera.settings.exposure_time_us = 5e4 # 50 ms # (x, y) size of the image in pixels config.video[0].camera.settings.shape = (1024, 768) -# Specify the pixel type as Uint32 -config.video[0].camera.settings.pixel_type = acquire.SampleType.U32 +# Specify the pixel type as uint16 +config.video[0].camera.settings.pixel_type = acquire.SampleType.U16 ``` ## Configure `Storage` `Storage` objects have 2 attributes, `settings`, a `StorageProperties` object, and an optional attribute `identifier`, which is an instance of the `DeviceIdentifier` class described above. -`StorageProperties` has 2 attributes `external_metadata_json` and `filename` which are strings of the filename or filetree of the output metadata in JSON format and image data in whatever format corresponds to the selected storage device, respectively. `first_frame_id` is an integer ID that corresponds to the first frame of the current acquisition and is typically 0. `pixel_scale_um` is the camera pixel size in microns. `enable_multiscale` is a boolean used to specify if the data should be saved as an image pyramid. See [Multiscale Data Acqusition](../zarr/multiscale.md) for more information. The `chunking` attribute is an instance of the `ChunkingProperties` class, used for Zarr storage. See [Chunking Data for Zarr Storage](../zarr/chunked.md) for more information. +`StorageProperties` has 2 attributes `external_metadata_json` and `filename` which are strings of the filename or filetree of the output metadata in JSON format and image data in whatever format corresponds to the selected storage device, respectively. `first_frame_id` is an integer ID that corresponds to the first frame of the current acquisition and is typically 0. `pixel_scale_um` is the camera pixel size in microns. `acquisition_dimensions` is a list of `StorageDimension`, one for each acquisition dimension, ordered from fastest changing to slowest changing. For more information on using the `StorageDimension` class, check out [Chunking Data for Zarr Storage](../zarr/chunked.md). `enable_multiscale` is a boolean used to specify if the data should be saved as an image pyramid. See the [Multiscale tutorial](../zarr/multiscale.md) for more information. We'll specify the name of the output image file below. diff --git a/docs/tutorials/setup_acquisition/drivers.md b/docs/tutorials/setup_acquisition/drivers.md index 2109abed..8122e00a 100644 --- a/docs/tutorials/setup_acquisition/drivers.md +++ b/docs/tutorials/setup_acquisition/drivers.md @@ -47,6 +47,9 @@ The output of this code is below. All discovered devices, both cameras and stora + + + ``` For cameras that weren't discovered you will see an error like the one below. These errors will not affect performance and can be ignored. diff --git a/docs/tutorials/setup_acquisition/index.md b/docs/tutorials/setup_acquisition/index.md index 8d231115..4817b223 100644 --- a/docs/tutorials/setup_acquisition/index.md +++ b/docs/tutorials/setup_acquisition/index.md @@ -5,3 +5,11 @@ Please [submit an issue on GitHub](https://github.com/acquire-project/acquire-do if you'd like to request a tutorial. If you are also interested in contributing to this documentation, please visit our [contribution guide](https://acquire-project.github.io/acquire-docs/dev/for_contributors/). + +- [Configure an Acquisition](./configure.md) +- [Test Camera Drivers](./drivers.md) +- [Device Selection](./select.md) +- [Utilizing the Setup Method](./setup.md) +- [Multiple Acquisitions](./start_stop.md) +- [Storage Device Selection](./storage.md) +- [Finite Triggered Acquisition](./trigger.md) diff --git a/docs/tutorials/setup_acquisition/select.md b/docs/tutorials/setup_acquisition/select.md index b1e35da4..11d6ee71 100644 --- a/docs/tutorials/setup_acquisition/select.md +++ b/docs/tutorials/setup_acquisition/select.md @@ -31,9 +31,12 @@ Output of the above code is below: + + + ``` -All identified devices will be listed, and in the case of this tutorial, no cameras were connected to the machine, so only simulated cameras were found. Note that discovered storage devices will also print. +All identified devices will be listed, and in the case of this tutorial, none of the vendor provided camera drivers were installed on the machine, so only simulated cameras were found. Note that discovered storage devices will also print. The order of those printed devices matters. Below are two examples of how the `select` method works. In the first, without a specific device name provided, `select` will choose the first device of the specified kind in the list of discovered devices. In the second example, a specific device name is provided, so `select` will grab that device if it was discovered by `Runtime`. diff --git a/docs/tutorials/setup_acquisition/start_stop.md b/docs/tutorials/setup_acquisition/start_stop.md index c62b5919..88200556 100644 --- a/docs/tutorials/setup_acquisition/start_stop.md +++ b/docs/tutorials/setup_acquisition/start_stop.md @@ -82,11 +82,8 @@ DeviceState.Armed ``` 1. The first time we print is immediately after starting acquisition, so no time has elapsed for data collection as compared to the camera exposure time, so while the camera is running, `Running`, there is no data available. - -3. The next print happens after waiting 0.5 seconds, so acquisition is still runnning and now there is acquired data available. - -5. The subsequent print is following calling `runtime.stop()` which waits until the specified max number of frames is collected and then terminates acquisition. Thus, the device is no longer running and there is no available data, since all objects were deleted by calling the `stop` method. The device is in an `Armed` state ready for the next acquisition. - -7. The final print occurs after waiting 5 seconds following the start of acquisition. This waiting period is longer than the 1 second acqusition time (0.1 seconds/frame and 10 frames), so the device is no longer collecting data. However, `runtime.stop()` hasn't been called, so the `AvailableData` object has not yet been deleted. +2. The next print happens after waiting 0.5 seconds, so acquisition is still runnning and now there is acquired data available. +3. The subsequent print is following calling `runtime.stop()` which waits until the specified max number of frames is collected and then terminates acquisition. Thus, the device is no longer running and there is no available data, since all objects were deleted by calling the `stop` method. The device is in an `Armed` state ready for the next acquisition. +4. The final print occurs after waiting 5 seconds following the start of acquisition. This waiting period is longer than the 1 second acqusition time (0.1 seconds/frame and 10 frames), so the device is no longer collecting data. However, `runtime.stop()` hasn't been called, so the `AvailableData` object has not yet been deleted. [Download this tutorial as a Python script](start_stop.py){ .md-button .md-button-center } diff --git a/docs/tutorials/setup_acquisition/storage.md b/docs/tutorials/setup_acquisition/storage.md index 35994ae1..52aab9ee 100644 --- a/docs/tutorials/setup_acquisition/storage.md +++ b/docs/tutorials/setup_acquisition/storage.md @@ -30,9 +30,12 @@ The output of that script will be: + + + ``` -`Acquire` supports streaming data to [bigtiff](http://bigtiff.org/) and [Zarr V2](https://zarr.readthedocs.io/en/stable/spec/v2.html). +`Acquire` supports streaming data to [bigtiff](http://bigtiff.org/), [Zarr V2](https://zarr-specs.readthedocs.io/en/latest/v2/v2.0.html), [Zarr V3](https://zarr-specs.readthedocs.io/en/latest/specs.html). For both Zarr V2 and Zarr V3, Acquire provides OME metadata. Zarr has additional capabilities relative to the basic storage devices, namely _chunking_, _compression_, and _multiscale storage_. You can learn more about the Zarr capabilities in `Acquire` in [the Acquire Zarr documentation](https://github.com/acquire-project/acquire-driver-zarr/blob/main/README.md). @@ -50,6 +53,12 @@ Zarr has additional capabilities relative to the basic storage devices, namely _ - **ZarrBlosc1Lz4ByteShuffle** - Streams compressed data (_lz4_ codec) to a [Zarr V2](https://zarr.readthedocs.io/en/stable/spec/v2.html) file with associated metadata. +- - **ZarrV3** - Streams data to a [Zarr V3](https://zarr-specs.readthedocs.io/en/latest/specs.html) file with associated metadata. + +- **ZarrV3Blosc1ZstdByteShuffle** - Streams compressed data (_zstd_ codec) to a [Zarr V3](https://zarr-specs.readthedocs.io/en/latest/specs.html) file with associated metadata. + +- **ZarrV3Blosc1Lz4ByteShuffle** - Streams compressed data (_lz4_ codec) to a [Zarr V3](https://zarr-specs.readthedocs.io/en/latest/specs.html) file with associated metadata. + ## Configure the Storage Device In the example below, the the `tiff` storage device is selected, and the data from one video source will be streamed to a file `out.tif`. diff --git a/docs/tutorials/setup_acquisition/trigger.md b/docs/tutorials/setup_acquisition/trigger.md index 75a10e5d..e615c001 100644 --- a/docs/tutorials/setup_acquisition/trigger.md +++ b/docs/tutorials/setup_acquisition/trigger.md @@ -69,7 +69,7 @@ Output triggers can be set to begin exposure, start a new frame, or wait before ```python config.video[0].camera.settings.output_triggers.exposure = acquire.Trigger( - enable=True, line=1, edge="Rising" + edge="Rising", enable=True, line=1, kind="Output" ) ``` diff --git a/docs/tutorials/using_json/index.md b/docs/tutorials/using_json/index.md index 736c8772..4e1336e9 100644 --- a/docs/tutorials/using_json/index.md +++ b/docs/tutorials/using_json/index.md @@ -5,3 +5,6 @@ settings. Please [submit an issue on GitHub](https://github.com/acquire-project/ if you'd like to request a tutorial. If you are also interested in contributing to this documentation, please visit our [contribution guide](https://acquire-project.github.io/acquire-docs/dev/for_contributors/). + +- [Loading Properties from a JSON file](./props_json.md) +- [Loading Triggers from a JSON file](./trig_json.md) diff --git a/docs/tutorials/zarr/chunked.md b/docs/tutorials/zarr/chunked.md deleted file mode 100644 index 69f1cfc6..00000000 --- a/docs/tutorials/zarr/chunked.md +++ /dev/null @@ -1,90 +0,0 @@ -# Chunking Data for Zarr Storage - -This tutorial will provide an example of writing chunked data to a Zarr storage device. - -Zarr has additional capabilities relative to the basic storage devices, namely _chunking_, _compression_, and _multiscale storage_. To enable _chunking_, set the attributes in an instance of the `ChunkingProperties` class. You can learn more about the Zarr capabilities in `Acquire` in [the Acquire Zarr documentation](https://github.com/acquire-project/acquire-driver-zarr/blob/main/README.md). - -## Configure `Runtime` -To start, we'll create a `Runtime` object and configure the streaming process, selecting `Zarr` as the storage device to enable chunking the data. - -```python -import acquire - -# Initialize a Runtime object -runtime = acquire.Runtime() - -# Initialize the device manager -dm = runtime.device_manager() - -# Grab the current configuration -config = runtime.get_configuration() - -# Select the radial sine simulated camera as the video source -config.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, "simulated: radial sin") - -# Set the storage to Zarr to take advantage of chunking -config.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, "Zarr") - -# Set the time for collecting data for a each frame -config.video[0].camera.settings.exposure_time_us = 5e4 # 50 ms - -# size of image region of interest on the camera (x, y) -config.video[0].camera.settings.shape = (1920, 1080) - -# specify the pixel datatype as a uint8 -config.video[0].camera.settings.pixel_type = acquire.SampleType.U8 - -# Set the max frame count -config.video[0].max_frame_count = 10 # collect 10 frames - -# Set the output file to out.zarr -config.video[0].storage.settings.filename = "out.zarr" -``` -Below we'll configure the chunking specific settings and update all settings with the `set_configuration` method. - -```python -# Chunk size may need to be optimized for each acquisition. -# See Zarr documentation for further guidance: -# https://zarr.readthedocs.io/en/stable/tutorial.html#chunk-optimizations -config.video[0].storage.settings.chunking.max_bytes_per_chunk = 32 * 2**20 # 32 MB - -# x, y dimensions of each chunk -# 1/2 of the width and height of the image, generating 4 chunks -config.video[0].storage.settings.chunking.tile.width = 1920 // 2 -config.video[0].storage.settings.chunking.tile.height = 1080 // 2 - -# Update the configuration with the chosen parameters -config = runtime.set_configuration(config) -``` - -## Collect and Inspect the Data - -```python -# collect data -runtime.start() -runtime.stop() -``` - -You can inspect the Zarr file directory to check that the data saved as expected. Alternatively, you can inspect the data programmatically with: - -```python -# Utilize the zarr library to open the data -import zarr - -# create a zarr Group object -group = zarr.open(config.video[0].storage.settings.filename) - -# check for the expected # of directories in the zarr container -assert len(group) == 1 - -# inspect the characteristics of the data -group["0"] -``` - -The output will be: -``` - -``` -As expected, we have only 1 top level directory, corresponding to the single array in the group. We would expect more than 1 array only if we were writing [multiscale data](multiscale.md). The overall array shape is (10, 1, 1080, 1920), corresponding to 10 frames, 1 channel, and a height and width of 1080 and 1920, respectively, per frame. - -[Download this tutorial as a Python script](chunked.py){ .md-button .md-button-center } diff --git a/docs/tutorials/zarr/compressed.md b/docs/tutorials/zarr/compressed.md index f79b4cbc..1504f5b5 100644 --- a/docs/tutorials/zarr/compressed.md +++ b/docs/tutorials/zarr/compressed.md @@ -28,14 +28,15 @@ config.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, "simula config.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, "ZarrBlosc1ZstdByteShuffle") # Set the time for collecting data for a each frame -config.video[0].camera.settings.exposure_time_us = 5e4 # 50 ms +config.video[0].camera.settings.exposure_time_us = 7e4 # 70 ms +# Set the size in pixels of the image region of interest on the camera config.video[0].camera.settings.shape = (1024, 768) # Set the max frame count config.video[0].max_frame_count = 100 # collect 100 frames -# Set the output file to out.zarr +# Set the output location to out.zarr config.video[0].storage.settings.filename = "out.zarr" # Update the configuration with the chosen parameters @@ -52,10 +53,10 @@ runtime.start() runtime.stop() ``` -We'll use the [Zarr Python package](https://zarr.readthedocs.io/en/stable/) to read the data in `out.zarr` file. +We'll use the [zarr-python package](https://zarr.readthedocs.io/en/stable/) to read the data in `out.zarr` directory. ```python -# We'll utilize the Zarr python package to read the data +# We'll utilize the zarr-python package to read the data import zarr # load from Zarr diff --git a/docs/tutorials/zarr/index.md b/docs/tutorials/zarr/index.md index 613abcfa..dedec464 100644 --- a/docs/tutorials/zarr/index.md +++ b/docs/tutorials/zarr/index.md @@ -1,7 +1,9 @@ # Zarr -These tutorials will help you learn about using OME-Zarr with Acquire. Please +These tutorials will help you learn about using Zarr with Acquire. Please [submit an issue on GitHub](https://github.com/acquire-project/acquire-docs/issues/new) if you'd like to request a tutorial. If you are also interested in contributing to this documentation, please visit our [contribution guide](https://acquire-project.github.io/acquire-docs/dev/for_contributors/). + +- [Writing to Compressed Zarr Files](./compressed.md) diff --git a/docs/tutorials/zarr/multiscale.md b/docs/tutorials/zarr/multiscale.md deleted file mode 100644 index ef189889..00000000 --- a/docs/tutorials/zarr/multiscale.md +++ /dev/null @@ -1,102 +0,0 @@ -# Multiscale Data Acqusition - -This tutorial will provide an example of writing multiscale data to a Zarr file. - -Zarr has additional capabilities relative to Acquire's basic storage devices, namely _chunking_, _compression_, and _multiscale storage_. To enable _chunking_ and _multiscale storage_, set those attributes in instances of the `ChunkingProperties` and `StorageProperties` classes, respectively. You can learn more about the Zarr capabilities in `Acquire` in [the Acquire Zarr documentation](https://github.com/acquire-project/acquire-driver-zarr/blob/main/README.md). - -## Configure `Runtime` -To start, we'll create a `Runtime` object and begin to configure the streaming process, selecting `Zarr` as the storage device so that writing multiscale data is possible. - -```python -import acquire - -# Initialize a Runtime object -runtime = acquire.Runtime() - -# Initialize the device manager -dm = runtime.device_manager() - -# Grab the current configuration -config = runtime.get_configuration() - -# Select the radial sine simulated camera as the video source -config.video[0].camera.identifier = dm.select(acquire.DeviceKind.Camera, "simulated: radial sin") - -# Set the storage to Zarr to have the option to save multiscale data -config.video[0].storage.identifier = dm.select(acquire.DeviceKind.Storage, "Zarr") - -# Set the time for collecting data for a each frame -config.video[0].camera.settings.exposure_time_us = 5e4 # 50 ms - -# Set the size of image region of interest on the camera (x, y) -config.video[0].camera.settings.shape = (1920, 1080) - -# Set the max frame count -config.video[0].max_frame_count = 5 # collect 5 frames - -# Set the image data type as a Uint8 -config.video[0].camera.settings.pixel_type = acquire.SampleType.U8 - -# Set the scale of the pixels -config.video[0].storage.settings.pixel_scale_um = (1, 1) # 1 micron by 1 micron - -# Set the output file to out.zarr -config.video[0].storage.settings.filename = "out.zarr" -``` - -To complete configuration, we'll configure the multiscale specific settings and update all settings with the `set_configuration` method. - -```python -# Chunk size may need to be optimized for each acquisition. -# See Zarr documentation for further guidance: -# https://zarr.readthedocs.io/en/stable/tutorial.html#chunk-optimizations -config.video[0].storage.settings.chunking.max_bytes_per_chunk = 16 * 2**20 # 16 MB - -# x, y dimensions of each chunk -# 1/3 of the width and height of the image, generating 9 chunks -config.video[0].storage.settings.chunking.tile.width = (config.video[0].camera.settings.shape[0] // 3) -config.video[0].storage.settings.chunking.tile.height = (config.video[0].camera.settings.shape[1] // 3) - -# turn on multiscale mode -config.video[0].storage.settings.enable_multiscale = True - -# Update the configuration with the chosen parameters -config = runtime.set_configuration(config) -``` -## Collect and Inspect the Data - -```python - -# collect data -runtime.start() -runtime.stop() -``` - -You can inspect the Zarr file directory to check that the data saved as expected. This zarr file should have multiple subdirectories, one for each resolution in the multiscale data. Alternatively, you can inspect the data programmatically with: - -```python -# Utilize the zarr python library to read the data -import zarr - -# Open the data to create a zarr Group -group = zarr.open("out.zarr") -``` -With multiscale mode enabled, an image pyramid will be formed by rescaling the data by a factor of 2 progressively until the rescaled image is smaller than the specified zarr chunk size in both dimensions. In this example, the original image dimensions are (1920, 1080), and we chunked the data using tiles 1/3 of the size of the image, namely (640, 360). To illustrate this point, we'll inspect the sizes of the various levels in the multiscale data and compare it to our specified chunk size. - -```python -group["0"], group["1"], group["2"] -``` - -The output will be: - -``` -(, - , - ) -``` - -Here, the `"0"` directory contains the full-resolution array of frames of size 1920 x 1080, with a single channel, saving all 10 frames. -The `"1"` directory contains the first rescaled array of frames of size 960 x 540, averaging every two frames, taking the frame count from 10 to 5. -The `"2"` directory contains a further rescaled array of frames of size 480 x 270, averaging every four frames, taking the frame count from 10 to 2. Notice that both the frame width and frame height are now smaller than the chunk width and chunk height of 640 and 360, respectively, so this should be the last array in the group. - -[Download this tutorial as a Python script](multiscale.py){ .md-button .md-button-center }