diff --git a/.cspell.json b/.cspell.json new file mode 100644 index 0000000..74024db --- /dev/null +++ b/.cspell.json @@ -0,0 +1,62 @@ +// cSpell Settings +//https://github.com/streetsidesoftware/vscode-spell-checker +{ + "version": "0.2", // Version of the setting file. Always 0.2 + "language": "en", // language - current active spelling language + "enabledLanguageIds": [ + "markdown", + "yaml", + "python" + ], + // flagWords - list of words to be always considered incorrect + // This is useful for offensive words and common spelling errors. + // For example "hte" should be "the" + "flagWords": [], + "allowCompoundWords": true, + "ignorePaths": [ + "./element_*.egg-info/*", + "./images/*" + ], + "words": [ + "asarray", + "astype", + "Berens", + "bossdb", + "CICD", + "connectome", + "Connectomics", + "DBURLs", + "djarchive", + "DJARCHIVE", + "Ecker", + "elif", + "Ephys", + "genotyping", + "Hoenselaar", + "IACUC", + "inlinehilite", + "Kasthuri", + "linenums", + "mkdocs", + "mkdocstrings", + "numpy", + "pymdownx", + "pyproject", + "pytest", + "Reimer", + "Roboto", + "RRID", + "Rxiv", + "Sasaki", + "segmentations", + "Shen", + "Siapas", + "Sinz", + "Sitonic", + "Tolias", + "voxel", + "witvliet", + "Yatsenko", + "Zuckerman" + ] +} diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000..31fe9fc --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,39 @@ +--- +name: Bug report +about: Create a report to help us improve +title: '' +labels: 'bug' +assignees: '' + +--- + +## Bug Report + +### Description + +A clear and concise description of what is the overall operation that is intended to be +performed that resulted in an error. + +### Reproducibility +Include: +- OS (WIN | MACOS | Linux) +- DataJoint Element Version +- MySQL Version +- MySQL Deployment Strategy (local-native | local-docker | remote) +- Minimum number of steps to reliably reproduce the issue +- Complete error stack as a result of evaluating the above steps + +### Expected Behavior +A clear and concise description of what you expected to happen. + +### Screenshots +If applicable, add screenshots to help explain your problem. + +### Additional Research and Context +Add any additional research or context that was conducted in creating this report. + +For example: +- Related GitHub issues and PR's either within this repository or in other relevant + repositories. +- Specific links to specific lines or a focus within source code. +- Relevant summary of Maintainers development meetings, milestones, projects, etc. diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000..b3d197d --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,5 @@ +blank_issues_enabled: false +contact_links: + - name: DataJoint Contribution Guideline + url: https://datajoint.com/docs/community/contribute/ + about: Please make sure to review the DataJoint Contribution Guidelines \ No newline at end of file diff --git a/.github/ISSUE_TEMPLATE/feature_request.md b/.github/ISSUE_TEMPLATE/feature_request.md new file mode 100644 index 0000000..1f2b784 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature_request.md @@ -0,0 +1,57 @@ +--- +name: Feature request +about: Suggest an idea for a new feature +title: '' +labels: 'enhancement' +assignees: '' + +--- + +## Feature Request + +### Problem + +A clear and concise description how this idea has manifested and the context. Elaborate +on the need for this feature and/or what could be improved. Ex. I'm always frustrated +when [...] + +### Requirements + +A clear and concise description of the requirements to satisfy the new feature. Detail +what you expect from a successful implementation of the feature. Ex. When using this +feature, it should [...] + +### Justification + +Provide the key benefits in making this a supported feature. Ex. Adding support for this +feature would ensure [...] + +### Alternative Considerations + +Do you currently have a work-around for this? Provide any alternative solutions or +features you've considered. + +### Related Errors +Add any errors as a direct result of not exposing this feature. + +Please include steps to reproduce provided errors as follows: +- OS (WIN | MACOS | Linux) +- DataJoint Element Version +- MySQL Version +- MySQL Deployment Strategy (local-native | local-docker | remote) +- Minimum number of steps to reliably reproduce the issue +- Complete error stack as a result of evaluating the above steps + +### Screenshots +If applicable, add screenshots to help explain your feature. + +### Additional Research and Context +Add any additional research or context that was conducted in creating this feature request. + +For example: +- Related GitHub issues and PR's either within this repository or in other relevant + repositories. +- Specific links to specific lines or a focus within source code. +- Relevant summary of Maintainers development meetings, milestones, projects, etc. +- Any additional supplemental web references or links that would further justify this + feature request. diff --git a/.github/workflows/u24_workflow_before_release.yaml b/.github/workflows/u24_workflow_before_release.yaml new file mode 100644 index 0000000..28a5ff5 --- /dev/null +++ b/.github/workflows/u24_workflow_before_release.yaml @@ -0,0 +1,18 @@ +name: u24_workflow_before_release_0.0.1 +on: + pull_request: + push: + branches: + - '**' + tags-ignore: + - '**' + workflow_dispatch: +jobs: + call_context_check: + uses: dj-sciops/djsciops-cicd/.github/workflows/context_check.yaml@main + call_u24_workflow_build_debian: + uses: dj-sciops/djsciops-cicd/.github/workflows/u24_workflow_build.yaml@main + with: + jhub_ver: 1.4.2 + py_ver: 3.9 + dist: debian diff --git a/.github/workflows/u24_workflow_release_call.yaml b/.github/workflows/u24_workflow_release_call.yaml new file mode 100644 index 0000000..8196673 --- /dev/null +++ b/.github/workflows/u24_workflow_release_call.yaml @@ -0,0 +1,20 @@ +name: u24_workflow_release_call_0.0.1 +on: + workflow_run: + workflows: ["u24_workflow_tag_to_release_0.0.1"] + types: + - completed +jobs: + call_context_check: + uses: dj-sciops/djsciops-cicd/.github/workflows/context_check.yaml@main + call_u24_workflow_release_debian: + if: >- + github.event.workflow_run.conclusion == 'success' && github.repository_owner == 'datajoint' + uses: dj-sciops/djsciops-cicd/.github/workflows/u24_workflow_release.yaml@main + with: + jhub_ver: 1.4.2 + py_ver: 3.9 + dist: debian + secrets: + REGISTRY_USERNAME: ${{secrets.DOCKER_USERNAME}} + REGISTRY_PASSWORD: ${{secrets.DOCKER_PASSWORD}} diff --git a/.github/workflows/u24_workflow_tag_to_release.yaml b/.github/workflows/u24_workflow_tag_to_release.yaml new file mode 100644 index 0000000..3a6ce58 --- /dev/null +++ b/.github/workflows/u24_workflow_tag_to_release.yaml @@ -0,0 +1,15 @@ +name: u24_workflow_tag_to_release_0.0.1 +on: + push: + tags: + - '*.*.*' + - 'test*.*.*' +jobs: + call_context_check: + uses: dj-sciops/djsciops-cicd/.github/workflows/context_check.yaml@main + call_u24_workflow_build_debian: + uses: dj-sciops/djsciops-cicd/.github/workflows/u24_workflow_build.yaml@main + with: + jhub_ver: 1.4.2 + py_ver: 3.9 + dist: debian diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..b2ec45f --- /dev/null +++ b/.gitignore @@ -0,0 +1,80 @@ +# User files and data directories +example_data/ +test.ipynb + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# Distribution, packaging, PyInstaller +.Python +env/ +build/ +*egg*/ +*dist/ +downloads/ +lib*/ +parts/ +var/ +wheels/ +.installed.cfg +*.egg +*.manifest +*.spec +pip-*.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +*.cov* +.cache +nosetests.xml +coverage.xml +.hypothesis/ +.pytest_cache/ + +# C extension, Translations +# editors: vscode, emacs, Mac +*.so +*.mo +*.pot +.vscode +**/*~ +**/#*# +**/.#* +.DS_Store + +# Django, Flask, Scrapy, Sphinx, mkdocs +# PyBuilder, Jupyter, SageMath, celery beat +*.log +local_settings.py +instance/ +.webassets-cache +.scrapy +scratchpaper.* +docs/_build/ +/site +target/ +.*checkpoints +celerybeat-schedule +*.sage.py + +# dotenv, virtualenv, pyenv, mypy +.*env +venv/ +ENV/ +.python-version +.mypy_cache/ + +# Spyder/Rope project settings +.spy*project +.ropeproject + +# datajoint, notes, nwb export +dj_local_c*.json +temp* +*nwb + +# vscode +*.code-workspace \ No newline at end of file diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000..29eab3b --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,58 @@ +default_stages: [commit, push] +exclude: (^.github/|^docs/|^images/|^notebooks/py_scripts/) + +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: trailing-whitespace + - id: end-of-file-fixer + - id: check-yaml + - id: check-added-large-files # prevent giant files from being committed + - id: requirements-txt-fixer + - id: mixed-line-ending + args: ["--fix=lf"] + description: Forces to replace line ending by the UNIX 'lf' character. + + # black + - repo: https://github.com/psf/black + rev: 22.12.0 + hooks: + - id: black + - id: black-jupyter + args: + - --line-length=88 + + # isort + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + args: ["--profile", "black"] + description: Sorts imports in an alphabetical order + + # flake8 + - repo: https://github.com/pycqa/flake8 + rev: 4.0.1 + hooks: + - id: flake8 + args: # arguments to configure flake8 + # making isort line length compatible with black + - "--max-line-length=88" + - "--max-complexity=18" + - "--select=B,C,E,F,W,T4,B9" + + # these are errors that will be ignored by flake8 + # https://www.flake8rules.com/rules/{code}.html + - "--ignore=E203,E501,W503,W605,E402" + # E203 - Colons should not have any space before them. + # Needed for list indexing + # E501 - Line lengths are recommended to be no greater than 79 characters. + # Needed as we conform to 88 + # W503 - Line breaks should occur after the binary operator. + # Needed because not compatible with black + # W605 - a backslash-character pair that is not a valid escape sequence now + # generates a DeprecationWarning. This will eventually become a SyntaxError. + # Needed because we use \d as an escape sequence + # E402 - Place module level import at the top. + # Needed to prevent circular import error diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000..76b45af --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,10 @@ +# Changelog + +Observes [Semantic Versioning](https://semver.org/spec/v2.0.0.html) standard and +[Keep a Changelog](https://keepachangelog.com/en/1.0.0/) convention. + +## [0.1.0] - Unreleased + ++ Add - Workflow pipeline, pytests, CICD + +[0.1.0]: https://github.com/datajoint/workflow-zstack/releases/tag/0.1.0 diff --git a/CODE_OF_CONDUCT.md b/CODE_OF_CONDUCT.md new file mode 100644 index 0000000..0502528 --- /dev/null +++ b/CODE_OF_CONDUCT.md @@ -0,0 +1,132 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +We as members, contributors, and leaders pledge to make participation in our +community a harassment-free experience for everyone, regardless of age, body +size, visible or invisible disability, ethnicity, sex characteristics, gender +identity and expression, level of experience, education, socio-economic status, +nationality, personal appearance, race, caste, color, religion, or sexual +identity and orientation. + +We pledge to act and interact in ways that contribute to an open, welcoming, +diverse, inclusive, and healthy community. + +## Our Standards + +Examples of behavior that contributes to a positive environment for our +community include: + +* Demonstrating empathy and kindness toward other people +* Being respectful of differing opinions, viewpoints, and experiences +* Giving and gracefully accepting constructive feedback +* Accepting responsibility and apologizing to those affected by our mistakes, + and learning from the experience +* Focusing on what is best not just for us as individuals, but for the overall + community + +Examples of unacceptable behavior include: + +* The use of sexualized language or imagery, and sexual attention or advances of + any kind +* Trolling, insulting or derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or email address, + without their explicit permission +* Other conduct which could reasonably be considered inappropriate in a + professional setting + +## Enforcement Responsibilities + +Community leaders are responsible for clarifying and enforcing our standards of +acceptable behavior and will take appropriate and fair corrective action in +response to any behavior that they deem inappropriate, threatening, offensive, +or harmful. + +Community leaders have the right and responsibility to remove, edit, or reject +comments, commits, code, wiki edits, issues, and other contributions that are +not aligned to this Code of Conduct, and will communicate reasons for moderation +decisions when appropriate. + +## Scope + +This Code of Conduct applies within all community spaces, and also applies when +an individual is officially representing the community in public spaces. +Examples of representing our community include using an official e-mail address, +posting via an official social media account, or acting as an appointed +representative at an online or offline event. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be +reported to the community leaders responsible for enforcement at +[Support@DataJoint.com](mailto:support@datajoint.com). +All complaints will be reviewed and investigated promptly and fairly. + +All community leaders are obligated to respect the privacy and security of the +reporter of any incident. + +## Enforcement Guidelines + +Community leaders will follow these Community Impact Guidelines in determining +the consequences for any action they deem in violation of this Code of Conduct: + +### 1. Correction + +**Community Impact**: Use of inappropriate language or other behavior deemed +unprofessional or unwelcome in the community. + +**Consequence**: A private, written warning from community leaders, providing +clarity around the nature of the violation and an explanation of why the +behavior was inappropriate. A public apology may be requested. + +### 2. Warning + +**Community Impact**: A violation through a single incident or series of +actions. + +**Consequence**: A warning with consequences for continued behavior. No +interaction with the people involved, including unsolicited interaction with +those enforcing the Code of Conduct, for a specified period of time. This +includes avoiding interactions in community spaces as well as external channels +like social media. Violating these terms may lead to a temporary or permanent +ban. + +### 3. Temporary Ban + +**Community Impact**: A serious violation of community standards, including +sustained inappropriate behavior. + +**Consequence**: A temporary ban from any sort of interaction or public +communication with the community for a specified period of time. No public or +private interaction with the people involved, including unsolicited interaction +with those enforcing the Code of Conduct, is allowed during this period. +Violating these terms may lead to a permanent ban. + +### 4. Permanent Ban + +**Community Impact**: Demonstrating a pattern of violation of community +standards, including sustained inappropriate behavior, harassment of an +individual, or aggression toward or disparagement of classes of individuals. + +**Consequence**: A permanent ban from any sort of public interaction within the +community. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], +version 2.1, available at +[https://www.contributor-covenant.org/version/2/1/code_of_conduct.html][v2.1]. + +Community Impact Guidelines were inspired by +[Mozilla's code of conduct enforcement ladder][Mozilla CoC]. + +For answers to common questions about this code of conduct, see the FAQ at +[https://www.contributor-covenant.org/faq][FAQ]. Translations are available at +[https://www.contributor-covenant.org/translations][translations]. + +[homepage]: https://www.contributor-covenant.org +[v2.1]: https://www.contributor-covenant.org/version/2/1/code_of_conduct.html +[Mozilla CoC]: https://github.com/mozilla/diversity +[FAQ]: https://www.contributor-covenant.org/faq +[translations]: https://www.contributor-covenant.org/translations diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md new file mode 100644 index 0000000..e04d170 --- /dev/null +++ b/CONTRIBUTING.md @@ -0,0 +1,5 @@ +# Contribution Guidelines + +This project follows the +[DataJoint Contribution Guidelines](https://datajoint.com/docs/community/contribute/). +Please reference the link for more full details. diff --git a/README.md b/README.md new file mode 100644 index 0000000..072f73d --- /dev/null +++ b/README.md @@ -0,0 +1,68 @@ +# DataJoint Workflow for ZStack Imaging + +The DataJoint Workflow for ZStack Imaging combines five DataJoint Elements for +volume cell segmentation - Elements Lab, Animal, Session, Calcium Imaging, and +ZStack. DataJoint Elements collectively standardize and automate data collection +and analysis for neuroscience experiments. Each Element is a modular pipeline for data +storage and processing with corresponding database tables that can be combined with +other Elements to assemble a fully functional pipeline. This repository also provides +a tutorial environment and notebook to learn the pipeline. + +## Experiment Flowchart + +![flowchart](https://raw.githubusercontent.com/datajoint/element-zstack/main/images/flowchart.svg) + +## Data Pipeline Diagram + +![pipeline](https://raw.githubusercontent.com/datajoint/element-zstack/main/images/pipeline.svg) + +## Getting Started + ++ [Interactive tutorial](#interactive-tutorial) + ++ Install Element ZStack from PyPI + + ```bash + pip install element-zstack + ``` + ++ [Documentation](https://datajoint.com/docs/elements/element-zstack) + +## Support + ++ If you need help getting started or run into any errors, please contact our team by email at support@datajoint.com. + +## Interactive Tutorial + +### Launch Environment + ++ Local Environment + + Install [Git](https://git-scm.com/book/en/v2/Getting-Started-Installing-Git) + + Install [VSCode](https://code.visualstudio.com/) + + Install [Conda](https://docs.conda.io/en/latest/miniconda.html) + + Configure a database. See [here](https://tutorials.datajoint.org/setting-up/local-database.html) for details. + + `git clone` the code repository and open it in VSCode + + Install the repository with `pip install -e .` + + Setup a `dj_local_conf.json` with the `database.prefix` and `volume_root_data_dir`. See [User Guide](https://datajoint.com/docs/elements/user-guide/) for details. + + Add your example data to the `volume_root_data_dir`. + +### Instructions + +1. To upload data to BossDB, [create an account](https://api.bossdb.io) to + access the BossDB API and generate an API token. Please contact the team at [BossDB](https://bossdb.org) + to ensure you have `resource-manager` permissions for your account. +2. Follow the instructions below to set up the + [intern](https://github.com/jhuapl-boss/intern) REST API locally. + + Create a new folder `.intern` in your root directory. + + Create a configuration file `intern.cfg` within the `.intern` folder. The + path to this file should be `~/.intern/intern.cfg`. + + The `intern.cfg` file should contain the following exactly as shown below: + ```bash + # ~/.intern/intern.cfg + [Default] + protocol = https + host = api.bossdb.io + token = + ``` +3. Use the instructions above to set up a local environment. +4. Navigate to the `notebooks` directory. Execute the cells in the notebooks to begin your walk through of the tutorial. \ No newline at end of file diff --git a/docker-compose.yaml b/docker-compose.yaml new file mode 100644 index 0000000..f9765c9 --- /dev/null +++ b/docker-compose.yaml @@ -0,0 +1,15 @@ +# MYSQL_VER=8.0 docker compose up --build +version: "2.4" +services: + db: + restart: always + image: datajoint/mysql:${MYSQL_VER} + environment: + - MYSQL_ROOT_PASSWORD=${DJ_PASS} + ports: + - "3306:3306" + healthcheck: + test: [ "CMD", "mysqladmin", "ping", "-h", "localhost" ] + timeout: 15s + retries: 100 + interval: 15s \ No newline at end of file diff --git a/notebooks/tutorial.ipynb b/notebooks/tutorial.ipynb new file mode 100644 index 0000000..171f504 --- /dev/null +++ b/notebooks/tutorial.ipynb @@ -0,0 +1,759 @@ +{ + "cells": [ + { + "attachments": {}, + "cell_type": "markdown", + "metadata": { + "tags": [] + }, + "source": [ + "# Manage volumetric fluorescent microscopy experiments with DataJoint Elements\n", + "\n", + "This notebook will walk through processing volumetric two-photon calcium imaging\n", + "data with the DataJoint Workflow for volumetric image processing. The workflow\n", + "currently supports volumetric data collected\n", + "from `ScanImage`. \n", + "\n", + "**Please note that uploading data to BossDB via this pipeline requires the\n", + "following:**\n", + "+ An API token which can be obtained by creating an account at\n", + "[api.bossdb.io](https://api.bossdb.io). You will also need resource manager\n", + "permissions from the team at [BossDB](https://bossdb.org).\n", + "+ A local setup of the [intern](https://github.com/jhuapl-boss/intern) REST API\n", + " using the instructions below:\n", + " + Create a new folder `.intern` in your root directory.\n", + " + Create a configuration file `intern.cfg` within the `.intern` folder. The\n", + " path to this file should be `~/.intern/intern.cfg`. \n", + " + The `intern.cfg` file should contain the following exactly as shown below:\n", + " ```bash\n", + " # ~/.intern/intern.cfg\n", + " [Default]\n", + " protocol = https\n", + " host = api.bossdb.io\n", + " token = \n", + " ```\n", + "\n", + "\n", + "In this notebook, we will explain the following concepts as they relate to this workflow:\n", + "- What is an Element versus a Workflow?\n", + "- Plot the workflow with `dj.Diagram`\n", + "- Insert data into tables\n", + "- Query table contents\n", + "- Fetch table contents\n", + "- Run the workflow for your experiments\n", + "\n", + "For detailed documentation and tutorials on general DwataJoint principles that support collaboration, automation, reproducibility, and visualizations:\n", + "\n", + "- [DataJoint Interactive Tutorials](https://github.com/datajoint/datajoint-tutorials) - Fundamentals including table tiers, query operations, fetch operations, automated computations with the `make` function, etc.\n", + "\n", + "- [DataJoint Core - Documentation](https://datajoint.com/docs/core/) - Relational data model principles\n", + "\n", + "- [DataJoint API for Python - Documentation](https://datajoint.com/docs/core/datajoint-python/)\n", + "\n", + "- [DataJoint Element for Volumetric Calcium Imaging - Documentation](https://datajoint.com/docs/elements/element-zstack/)\n", + "\n", + "Let's start by importing the packages necessary to run this workflow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "\n", + "if os.path.basename(os.getcwd()) == \"notebooks\":\n", + " os.chdir(\"..\")\n", + "\n", + "\n", + "import datajoint as dj\n", + "import datetime" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## The DataJoint Workflow for volumetric Calcium Imaging is assembled from 5 DataJoint Elements\n", + "\n", + "| Element | Source Code | Documentation | Description |\n", + "| -- | -- | -- | -- |\n", + "| Element Lab | [Link](https://github.com/datajoint/element-lab) | [Link](https://datajoint.com/docs/elements/element-lab) | Lab management related information, such as Lab, User, Project, Protocol, Source. |\n", + "| Element Animal | [Link](https://github.com/datajoint/element-animal) | [Link](https://datajoint.com/docs/elements/element-animal) | General animal metadata and surgery information. |\n", + "| Element Session | [Link](https://github.com/datajoint/element-session) | [Link](https://datajoint.com/docs/elements/element-session) | General information of experimental sessions. |\n", + "| Element Calcium Imaging | [Link](https://github.com/datajoint/element-calcium-imaging) | [Link](https://datajoint.com/docs/elements/element-calcium-imaging) | General information about the calcium imaging scan. |\n", + "| Element zstack | [Link](https://github.com/datajoint/element-zstack) | [Link](https://datajoint.com/docs/elements/element-zstack) | Volumetric data segmentation and export. |\n", + "\n", + "Each workflow is composed of multiple Elements. Each Element contains 1 or more modules, and each module declares its own schema in the database.\n", + "\n", + "The Elements are imported within the `workflow_zstack.pipeline` script.\n", + "\n", + "By importing the modules for the first time, the schemas and tables will be created in the database. Once created, importing modules will not create schemas and tables again, but the existing schemas/tables can be accessed.\n", + "\n", + "The schema diagram (shown below) is a good reference for understanding the order of the tables within the workflow.\n", + "\n", + "Let's activate the Elements." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from workflow_zstack.pipeline import (\n", + " lab,\n", + " subject,\n", + " session,\n", + " scan,\n", + " volume,\n", + " volume_matching,\n", + " bossdb,\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Diagram\n", + "\n", + "We can plot the diagram of tables within multiple schemas and their dependencies using `dj.Diagram()`. For details, see the [documentation](https://datajoint.com/docs/core/concepts/getting-started/diagrams/)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "(\n", + " dj.Diagram(subject.Subject)\n", + " + dj.Diagram(session.Session)\n", + " + dj.Diagram(scan.Scan)\n", + " + dj.Diagram(volume)\n", + " + dj.Diagram(bossdb)\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "While the diagram above seems complex at first, it becomes more clear when it's approached as a hierarchy of tables that define the order in which the workflow expects to receive data in each of its tables.\n", + "\n", + "The tables higher up in the diagram such as `subject.Subject()` should be the first to receive data.\n", + "\n", + "Data is manually entered into the green, rectangular tables with the `insert1()` method.\n", + "\n", + "Tables connected by a solid line depend on entries from the table above it.\n", + "\n", + "There are 5 table tiers in DataJoint. Some of these tables appear in the diagram above.\n", + "\n", + "| Table tier | Color and shape | Description |\n", + "| -- | -- | -- |\n", + "| Manual table | Green box | Data entered from outside the pipeline, either by hand or with external helper scripts. |\n", + "| Lookup table | Gray box | Small tables containing general facts and settings of the data pipeline; not specific to any experiment or dataset. | \n", + "| Imported table | Blue oval | Data ingested automatically inside the pipeline but requiring access to data outside the pipeline. |\n", + "| Computed table | Red circle | Data computed automatically entirely inside the pipeline. |\n", + "| Part table | Plain text | Part tables share the same tier as their master table. |" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Insert entries into manual tables\n", + "\n", + "In this section, we will insert metadata about an animal subject, experiment session, and optogenetic stimulation parameters.\n", + "\n", + "Let's start with the first schema and table in the schema diagram (i.e. `subject.Subject` table).\n", + "\n", + "Each module (e.g. `subject`) contains a schema object that enables interaction with the schema in the database." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "subject.schema" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The table classes in the module correspond to a table in the database." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "subject.Subject()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We can view the table dependencies and the attributes we need to insert by using the functions `.describe()` and `.heading`. The `describe()` function displays the table definition with foreign key references and the `heading` function displays the attributes of the table definition. These are particularly useful functions if you are new to DataJoint Elements and are unsure of the attributes required for each table." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(subject.Subject.describe())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "subject.Subject.heading" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We will insert data into the `subject.Subject` table. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "subject.Subject.insert1(\n", + " dict(\n", + " subject=\"subject1\",\n", + " sex=\"M\",\n", + " subject_birth_date=\"2023-01-01\",\n", + " subject_description=\"Cellpose segmentation of volumetric data.\",\n", + " )\n", + ")\n", + "subject.Subject()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's continue inserting in the other manual tables. The `Session` table is next." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(session.Session.describe())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session.Session.heading" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The cells above show the dependencies and attributes for the `session.Session` table.\n", + "\n", + "Notice that `describe` shows the dependencies of the table on upstream tables (i.e. foreign key references). The `Session` table depends on the upstream `Subject` table. \n", + "\n", + "Whereas `heading` lists all the attributes of the `Session` table, regardless of\n", + "whether they are declared in an upstream table." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session_key = dict(\n", + " subject=\"subject1\",\n", + " session_id=0,\n", + ")\n", + "session.Session.insert1(\n", + " dict(\n", + " session_key,\n", + " session_datetime=datetime.datetime.now(),\n", + " ),\n", + ")\n", + "session.Session()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `SessionDirectory` table locates the relevant data files in a directory path\n", + "relative to the root directory defined in your `dj.config[\"custom\"]`. More\n", + "information about `dj.config` is provided at the end of this tutorial and is\n", + "particularly useful for local deployments of this workflow." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(session.SessionDirectory.describe())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session.SessionDirectory.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "session.SessionDirectory.insert1(\n", + " dict(session_key, session_dir=\"subject1/session1\"),\n", + " skip_duplicates=True,\n", + ")\n", + "session.SessionDirectory()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Each volume requires an entry in the `Scan` table from\n", + "`element-calcium-imaging`. Here, we'll use `describe` and `heading` for the Scan\n", + "table and insert an entry for the current session." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(scan.Scan.describe())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "scan.Scan.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "scan.Scan.insert1(\n", + " dict(\n", + " session_key,\n", + " scan_id=0,\n", + " acq_software=\"ScanImage\",\n", + " ),\n", + " skip_duplicates=True,\n", + ")\n", + "scan_key = (scan.Scan & \"subject = 'subject1'\").fetch1(\"KEY\")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Populate\n", + "\n", + "### Automatically populate tables\n", + "\n", + "`volume.Volume` is the first table in the pipeline that can be populated automatically.\n", + "If a table contains a part table, this part table is also populated during the\n", + "`populate()` call. `populate()` takes several arguments including a session\n", + "key. This key restricts `populate()` to performing the operation on the session\n", + "of interest rather than all possible sessions which could be a time-intensive\n", + "process for databases with lots of entries.\n", + "\n", + "Let's view the `volume.Volume` and populate it using the `populate()` call." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.Volume.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.Volume()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.Volume.populate(scan_key, display_progress=True)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's view the information was entered into this table:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.Volume()" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We're almost ready to perform volume segmentation with `cellpose`. An important step before\n", + "processing is managing the parameters which will be used in that step. To do so, we will\n", + "insert parameters required by cellpose into a DataJoint table\n", + "`SegmentationParamSet`. This table keeps track of all combinations of your image\n", + "processing parameters. You can choose which parameters are used during\n", + "processing in a later step.\n", + "\n", + "Let's view the attributes and insert data into `volume.SegmentationParamSet`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.SegmentationParamSet.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.SegmentationParamSet.insert_new_params(\n", + " segmentation_method=\"cellpose\",\n", + " paramset_idx=1,\n", + " params=dict(\n", + " diameter=None,\n", + " min_size=2,\n", + " do_3d=False,\n", + " anisotropy=0.5,\n", + " model_type=\"nuclei\",\n", + " channels=[[0, 0]],\n", + " z_axis=0,\n", + " skip_duplicates=True,\n", + " ),\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "DataJoint uses a `SegmentationTask` table to\n", + "manage which `Volume` and `SegmentationParamSet` should be used during processing. \n", + "\n", + "This table is important for defining several important aspects of\n", + "downstream processing. Let's view the attributes to get a better understanding. " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(volume.SegmentationTask.describe())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.SegmentationTask.heading" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The `SegmentationTask` table contains two important attributes: \n", + "+ `paramset_idx`\n", + "+ `task_mode`\n", + "\n", + "The `paramset_idx` attribute is tracks\n", + "your segmentation parameter sets. You can choose the parameter set on which\n", + "you want to run segmentation analysis based on this attribute. This\n", + "attribute tells the `Segmentation` table which set of parameters you are\n", + "processing in a given `populate()`.\n", + "\n", + "The `task_mode` attribute can be set to either `load` or `trigger`. When set to `trigger`, the\n", + "segmentation step will run cellpose on the raw data." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.SegmentationTask.insert1(\n", + " dict(\n", + " scan_key,\n", + " paramset_idx=1,\n", + " task_mode=\"trigger\",\n", + " ),\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For now, Element ZStack only supports triggering cellpose. Now, we can popluate\n", + "the `Segmentation` table. This step may take several hours, depending on your\n", + "computer's capabilities." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.Segmentation.populate(scan_key, display_progress=True)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Volumetric data uploaded to BossDB requires information about voxel size. The\n", + "DataJoint table `volume.VoxelSize` can be used to insert this information for a\n", + "given dataset." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.VoxelSize.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "volume.VoxelSize.insert1(dict(scan_key, width=0.001, height=0.001, depth=0.001))" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Let's define an `upload_key` to automatically upload the volume to BossDB via the\n", + "`bossdb` schema. The `upload_key` combines information about the current scan from\n", + "`scan.Scan` and the `paramset_idx` from `SegmentationParamSet`." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "upload_key = dict(scan_key, paramset_idx=1)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The first table in this schema is `VolumeUploadTask`. Let's define the upload task by naming the collection, experiment,\n", + "and channel where the data should be uploaded." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "print(bossdb.VolumeUploadTask.describe())" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bossdb.VolumeUploadTask.heading" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "collection = \"dataJointTestUpload\"\n", + "experiment = \"CalciumImaging\"\n", + "channel = \"volume-image\"\n", + "bossdb.VolumeUploadTask.insert1(\n", + " dict(\n", + " upload_key,\n", + " collection_name=collection,\n", + " experiment_name=experiment,\n", + " channel_name=channel,\n", + " )\n", + ")" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we can upload data. \n", + "\n", + "As a reminder, uploading data to BossDB via this pipeline requires an API\n", + "token which can be obtained by creating an account at\n", + "[api.bossdb.io](https://api.bossdb.io). You will also need resource manager\n", + "permissions from the team at [BossDB](https://bossdb.org)." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "bossdb.VolumeUpload.populate(upload_key)" + ] + }, + { + "attachments": {}, + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To visualize the volumetric data, import the neuroglancer URL and paste it into\n", + "your browser." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "(bossdb.VolumeUpload.WebAddress & upload_key & \"upload_type='image+annotation'\").fetch1(\n", + " \"web_address\"\n", + ")" + ] + } + ], + "metadata": { + "jupytext": { + "formats": "ipynb,py:percent" + }, + "kernelspec": { + "display_name": "Python 3.9.13 ('ele')", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.10" + }, + "vscode": { + "interpreter": { + "hash": "d00c4ad21a7027bf1726d6ae3a9a6ef39c8838928eca5a3d5f51f3eb68720410" + } + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 0000000..8e45a4e --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,15 @@ +[tool.pytest.ini_options] +minversion = "6.0" +addopts = "--capture=tee-sys -p no:warnings --cov=element_zstack --cov-report term-missing" +# Verbosity: -v for pytest more verbose +# Warnings: -p no:warnings to disable +# Stepwise: --sw to restart pytest at last failure point +# Debug: --pdb enter debug mode on first failure +# Capturing output: -s for none, --capture=tee-sys for both stdout and stderr +# Coverage: --cov={package} - package for which we're measuring coverage +# Coverage report: --cov-report term-missing send report to stdout with line numbers of missed +testpaths = [ + "tests", +] +norecursedirs = ["docs", "*.egg-info", ".git"] +# PYTHONDONTWRITEBYTECODE=1 # Setting this env variable will speed up pytest diff --git a/requirements.txt b/requirements.txt new file mode 100644 index 0000000..e0c9d23 --- /dev/null +++ b/requirements.txt @@ -0,0 +1,8 @@ +datajoint>=0.13.0 +element-animal>=0.1.5 +element-lab>=0.2.0 +element-session>=0.1.2 +element-calcium-imaging>=0.5.5 +element-zstack>=0.1.0 +intern>=1.4.1 +ipykernel>=6.0.1 diff --git a/requirements_dev.txt b/requirements_dev.txt new file mode 100755 index 0000000..9955dec --- /dev/null +++ b/requirements_dev.txt @@ -0,0 +1,2 @@ +pytest +pytest-cov diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..bfdbb1a --- /dev/null +++ b/setup.py @@ -0,0 +1,30 @@ +from os import path + +from setuptools import find_packages, setup + +pkg_name = "workflow_zstack" +here = path.abspath(path.dirname(__file__)) + +long_description = """ +# Workflow for volumetric data +""" + +with open(path.join(here, "requirements.txt")) as f: + requirements = f.read().splitlines() + +with open(path.join(here, pkg_name, "version.py")) as f: + exec(f.read()) + +setup( + name="workflow-zstack", + version=__version__, # noqa: F821 + description="DataJoint Workflow for Element ZStack", + long_description=long_description, + author="DataJoint", + author_email="info@datajoint.com", + license="MIT", + url="https://github.com/datajoint/workflow-zstack", + keywords="neuroscience volumetric bossdb datajoint", + packages=find_packages(exclude=["contrib", "docs", "tests*"]), + install_requires=requirements, +) diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 0000000..6f83d15 --- /dev/null +++ b/tests/__init__.py @@ -0,0 +1,5 @@ +""" +See pyproject.toml for config options. +Run all: pytest tests/ +Run one: pytest tests/test_SCRIPT.py -k test_name +""" diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..2e15ba6 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,180 @@ +import logging +import os +import pathlib +from pathlib import Path + +import datajoint as dj +import pytest +from element_interface.utils import find_full_path + +from workflow_zstack.paths import get_volume_root_data_dir + +# ------------------- SOME CONSTANTS ------------------- + + +logger = logging.getLogger("datajoint") + + +sessions_dirs = [ + "subject1", +] + +# ---------------------- FIXTURES ---------------------- + + +@pytest.fixture(scope="session") +def test_data(): + + for p in sessions_dirs: + try: + find_full_path(get_volume_root_data_dir, p).as_posix() + except FileNotFoundError as e: + print(e) + + +@pytest.fixture(autouse=True, scope="session") +def pipeline(): + from workflow_zstack import pipeline + + yield { + "subject": pipeline.subject, + "lab": pipeline.lab, + "session": pipeline.session, + "scan": pipeline.scan, + "volume": pipeline.volume, + "bossdb": pipeline.bossdb, + } + + +@pytest.fixture(scope="session") +def insert_upstream(pipeline): + import datetime + + + subject = pipeline["subject"] + session = pipeline["session"] + scan = pipeline["scan"] + + subject.Subject.insert1( + dict( + subject="subject1", + sex="M", + subject_birth_date="2023-01-01", + subject_description="Cellpose segmentation of volumetric data."), + skip_duplicates=True, + ) + + session_key = dict( + subject="subject1", + session_id=0, + ) + session.Session.insert1( + dict( + session_key, + session_datetime=datetime.datetime.now(), + ), + skip_duplicates=True, + ) + + session.SessionDirectory.insert1( + dict(session_key, session_dir="subject1/session1"), + skip_duplicates=True, + ) + scan.Scan.insert1( + dict( + session_key, + scan_id=0, + acq_software="ScanImage", + ), + skip_duplicates=True, + ) + + yield + + +@pytest.fixture(scope="session") +def volume_volume(pipeline): + volume = pipeline["volume"] + + volume.Volume.populate() + + yield + + +@pytest.fixture(scope="session") +def volume_segmentation_task(pipeline): + volume = pipeline["volume"] + key = (volume.Volume & "subject='subject1'").fetch1("KEY") + volume.SegmentationParamSet.insert_new_params( + segmentation_method="cellpose", + paramset_idx=1, + params=dict( + diameter=8, + min_size=2, + do_3d=False, + anisotropy=0.5, + model_type="nuclei", + channels=[[0, 0]], + z_axis=0, + skip_duplicates=True, + ), + ) + yield + + +@pytest.fixture(scope="session") +def volume_segmentation_task(pipeline): + volume = pipeline["volume"] + volume.SegmentationTask.insert1(dict( + key, + paramset_idx=1, + )) + yield + + +@pytest.fixture(scope="session") +def volume_segmentation(pipeline): + volume = pipeline["volume"] + key = (volume.Volume & "subject='subject1'").fetch1("KEY") + volume.Segmentation.populate(key) + yield + + +@pytest.fixture(scope="session") +def volume_voxel_size(pipeline): + volume = pipeline["volume"] + key = (volume.Volume & "subject='subject1'").fetch1("KEY") + volume.VoxelSize.insert1( + dict( + key, + width=0.001, + height=0.001, + depth=0.001, + ) + ) + yield + +@pytest.fixture(scope="session") +def bossdb_volume_upload_task(pipeline): + bossdb = pipeline["bossdb"] + volume = pipeline["volume"] + key = (volume.Segmentation & "subject='subject1'").fetch1("KEY") + col_name = "dataJointTestUpload" + exp_name = "CaImagingFinal" + chn_name = "test1" + + bossdb.VolumeUploadTask.insert1( + dict( + key, + collection_name=col_name, + experiment_name=exp_name, + channel_name=chn_name, + ), skip_duplicates=True + ) + yield + +@pytest.fixture(scope="session") +def bossdb_volume_upload(pipeline): + bossdb = pipeline["bossdb"] + bossdb.VolumeUpload.populate() + yield \ No newline at end of file diff --git a/tests/test_export.py b/tests/test_export.py new file mode 100644 index 0000000..096886e --- /dev/null +++ b/tests/test_export.py @@ -0,0 +1,95 @@ +def test_export(pipeline): + """Test export to BossDB. + + Please note that uploading data to BossDB via this pipeline requires an API + token which can be obtained by creating an account at + https://api.bossdb.io. You will also need resource manager permissions from the team at https://bossdb.org. + """ + import datetime + + subject = pipeline["subject"] + session = pipeline["session"] + scan = pipeline["scan"] + volume = pipeline["volume"] + bossdb = pipeline["bossdb"] + + subject.Subject.insert1( + dict( + subject="subject1", + sex="M", + subject_birth_date="2023-01-01", + subject_description="Cellpose segmentation of volumetric data."), + skip_duplicates=True, + ) + + session_key = dict( + subject="subject1", + session_id=0, + ) + session.Session.insert1( + dict( + session_key, + session_datetime=datetime.datetime.now(), + ), + skip_duplicates=True, + ) + + session.SessionDirectory.insert1( + dict(session_key, session_dir="subject1/session1"), + skip_duplicates=True, + ) + scan.Scan.insert1( + dict( + session_key, + scan_id=0, + acq_software="ScanImage", + ), + skip_duplicates=True, + ) + volume.Volume.populate() + key = (volume.Volume & "subject='subject1'").fetch1("KEY") + volume.SegmentationParamSet.insert_new_params( + segmentation_method="cellpose", + paramset_idx=2, + params=dict( + diameter=None, + min_size=2, + do_3d=False, + anisotropy=0.5, + model_type="nuclei", + channels=[[0, 0]], + z_axis=0, + skip_duplicates=True, + ), + ) + volume.SegmentationTask.insert1(dict( + key, + paramset_idx=2, + task_mode="trigger", + ), + skip_duplicates=True, + ) + segmentation_key = (volume.SegmentationTask & "subject='subject1'").fetch1("KEY") + volume.Segmentation.populate(segmentation_key) + volume.VoxelSize.insert1( + dict( + segmentation_key, + width=0.001, + height=0.001, + depth=0.001, + ) + ) + col_name = "dataJointTestUpload" + exp_name = "CaImagingFinal" + chn_name = "test1" + + bossdb.VolumeUploadTask.insert1( + dict( + segmentation_key, + collection_name=col_name, + experiment_name=exp_name, + channel_name=chn_name, + ), skip_duplicates=True + ) + + bossdb.VolumeUpload.populate(segmentation_key) \ No newline at end of file diff --git a/tests/test_pipeline_generation.py b/tests/test_pipeline_generation.py new file mode 100644 index 0000000..bedf5e1 --- /dev/null +++ b/tests/test_pipeline_generation.py @@ -0,0 +1,23 @@ +def test_generate_pipeline(pipeline): + subject = pipeline["subject"] + session = pipeline["session"] + scan = pipeline["scan"] + volume = pipeline["volume"] + bossdb = pipeline["bossdb"] + + # Test connection from Subject to Session + assert subject.Subject.full_table_name in session.Session.parents() + + # Test connection from Session to Scan and Scan to Volume + assert session.Session.full_table_name in scan.Scan.parents() + assert scan.Scan.full_table_name in volume.Volume.parents() + assert "mask_npix" in (volume.Segmentation.Mask.heading.secondary_attributes) + + assert all( + [ + bossdb.VolumeUploadTask.full_table_name in bossdb.VolumeUpload.parents(), + volume.Segmentation.full_table_name in bossdb.VolumeUploadTask.parents(), + ] + ) + + assert "web_address" in (bossdb.VolumeUpload.WebAddress.heading.secondary_attributes) diff --git a/workflow_zstack/__init__.py b/workflow_zstack/__init__.py new file mode 100644 index 0000000..8d01cc8 --- /dev/null +++ b/workflow_zstack/__init__.py @@ -0,0 +1,17 @@ +import os +import datajoint as dj + +if "custom" not in dj.config: + dj.config["custom"] = {} + +# overwrite dj.config['custom'] values with environment variables if available + +dj.config["custom"]["database.prefix"] = os.getenv( + "DATABASE_PREFIX", dj.config["custom"].get("database.prefix", "") +) + +dj.config["custom"]["volume_root_data_dir"] = os.getenv( + "VOLUME_ROOT_DATA_DIR", dj.config["custom"].get("volume_root_data_dir", "") +) + +db_prefix = dj.config["custom"].get("database.prefix", "") \ No newline at end of file diff --git a/workflow_zstack/paths.py b/workflow_zstack/paths.py new file mode 100644 index 0000000..9da17a0 --- /dev/null +++ b/workflow_zstack/paths.py @@ -0,0 +1,48 @@ +from collections.abc import Sequence +from typing import List +import pathlib +import datajoint as dj +from element_interface.utils import find_full_path +from element_session import session_with_id as session + + +def get_volume_root_data_dir() -> List[str]: + """Return root directory for volumetric data in dj.config + + Returns: + path (any): List of path(s) if available or None + """ + vol_root_dirs = dj.config.get("custom", {}).get("volume_root_data_dir", None) + if not vol_root_dirs: + return None + elif not isinstance(vol_root_dirs, Sequence): + return list(vol_root_dirs) + else: + return pathlib.Path(vol_root_dirs) + + +def get_volume_tif_file(scan_key): + """Retrieve the ScanImage file associated with a given Scan. + + Args: + scan_key (dict): Primary key from Scan. + + Returns: + path (str): Absolute path of the scan file. + + Raises: + FileNotFoundError: If the tiff file(s) are not found. + """ + # Folder structure: root / subject / session / .tif (raw) + sess_dir = find_full_path( + get_volume_root_data_dir(), + pathlib.Path((session.SessionDirectory & scan_key).fetch1("session_dir")), + ) + + tiff_filepaths = [fp.as_posix() for fp in sess_dir.rglob("*.tif")] + + if tiff_filepaths: + assert len(tiff_filepaths) == 1, "More than 1 `.tif` file in file path. Please ensure the session directory contains only 1 image file." + return tiff_filepaths[0] + else: + raise FileNotFoundError(f"No tiff file found in {sess_dir}") diff --git a/workflow_zstack/pipeline.py b/workflow_zstack/pipeline.py new file mode 100644 index 0000000..991828a --- /dev/null +++ b/workflow_zstack/pipeline.py @@ -0,0 +1,46 @@ +import datajoint as dj +from element_lab import lab +from element_lab.lab import Lab, Location, Project, Protocol, Source, User +from element_animal import subject, surgery +from element_animal.subject import Subject +from element_session import session_with_id as session +from element_calcium_imaging import imaging, scan +from element_zstack import volume, bossdb + +from . import db_prefix +from .paths import get_volume_root_data_dir, get_volume_tif_file +from .reference import Device + +__all__ = [ + "db_prefix", + "lab", + "scan", + "imaging", + "session", + "subject", + "surgery", + "volume", + "bossdb", + "Device", + "get_volume_root_data_dir", + "get_volume_tif_file", +] + +# ---------------------------------- Activate schemas ---------------------------------- + +lab.activate(db_prefix + "lab") +subject.activate(db_prefix + "subject", linking_module=__name__) +surgery.activate(db_prefix + "surgery", linking_module=__name__) + +Experimenter = lab.User +session.activate(db_prefix + "session", linking_module=__name__) + +Equipment = Device +Session = session.Session +SessionDirectory = session.SessionDirectory +imaging.activate(db_prefix + "imaging", db_prefix + "scan", linking_module=__name__) + +Mask = imaging.Segmentation.Mask +Scan = scan.Scan +volume.activate(db_prefix + "volume", linking_module=__name__) +bossdb.activate(db_prefix + "bossdb", linking_module=__name__) diff --git a/workflow_zstack/reference.py b/workflow_zstack/reference.py new file mode 100644 index 0000000..0dfad14 --- /dev/null +++ b/workflow_zstack/reference.py @@ -0,0 +1,26 @@ +import datajoint as dj + +from . import db_prefix + +schema = dj.Schema(db_prefix + "reference") + + +@schema +class Device(dj.Lookup): + """Table for managing lab equipment. + + Attributes: + device ( varchar(32) ): Device short name. + modality ( varchar(64) ): Modality for which this device is used. + description ( varchar(256) ): Optional. Description of device. + """ + + definition = """ + device : varchar(32) + --- + modality : varchar(64) + description=null : varchar(256) + """ + contents = [ + ["scanner1", "fluorescence microscope", ""], + ] diff --git a/workflow_zstack/version.py b/workflow_zstack/version.py new file mode 100644 index 0000000..ee6de92 --- /dev/null +++ b/workflow_zstack/version.py @@ -0,0 +1,2 @@ +"""Package metadata.""" +__version__ = "0.1.0"