diff --git a/.github/actions/install-basic-deps/action.yml b/.github/actions/install-basic-deps/action.yml
deleted file mode 100644
index e2fee2dc..00000000
--- a/.github/actions/install-basic-deps/action.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-name: install-basic-deps
-runs:
- using: "composite"
- steps:
- - run: sudo apt-get update && sudo apt-get install --no-install-recommends --yes libgomp1 libgl1-mesa-glx libglu1-mesa libegl1-mesa
- shell: bash
diff --git a/.github/workflows/cff.yml b/.github/workflows/cff.yml
deleted file mode 100644
index 519cffd2..00000000
--- a/.github/workflows/cff.yml
+++ /dev/null
@@ -1,22 +0,0 @@
-name: validate-citation-file
-
-on:
- push:
- branches: [ main ]
- pull_request:
- branches: [ main ]
-
- workflow_dispatch:
-
-jobs:
- validate-citation-cff:
- runs-on: ubuntu-latest
- env:
- GITHUB_PAT: ${{ secrets.GITHUB_TOKEN }}
-
- steps:
- - name: Checkout
- uses: actions/checkout@v3
-
- - name: Validate CITATION.cff
- uses: dieghernan/cff-validator@main
diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml
deleted file mode 100644
index 492bb3e7..00000000
--- a/.github/workflows/main.yml
+++ /dev/null
@@ -1,306 +0,0 @@
-name: run-exemplary-workflow
-
-on:
- push:
- branches: [ main ]
- pull_request:
- branches: [ main ]
-
- # Allows you to run this workflow manually from the Actions tab
- workflow_dispatch:
-
-jobs:
- run-pydoit:
- runs-on: ubuntu-latest
-
- steps:
- - name: checkout-repository
- uses: actions/checkout@v2
-
- - name: install-basic-deps
- uses: ./.github/actions/install-basic-deps
-
- - name: setup-conda-environment
- uses: conda-incubator/setup-miniconda@v2
- with:
- environment-file: exemplary_workflow/source/envs/default_env.yaml
- miniforge-version: latest
- activate-environment: exemplary_workflow
-
- - name: run-workflow
- shell: bash -l {0}
- run: |
- conda install --channel conda-forge doit=0.33.1
- cd $GITHUB_WORKSPACE/exemplary_workflow/pydoit
- doit
- - name: upload-paper-artifact
- uses: actions/upload-artifact@v2
- with:
- name: paper
- path: ./exemplary_workflow/pydoit/paper.pdf
- retention-days: 1
- if-no-files-found: error
-
- run-cwl:
- runs-on: ubuntu-latest
-
- steps:
- - name: checkout-repository
- uses: actions/checkout@v2
-
- - name: install-basic-deps
- uses: ./.github/actions/install-basic-deps
-
- - name: setup-conda-environment
- uses: conda-incubator/setup-miniconda@v2
- with:
- environment-file: exemplary_workflow/source/envs/default_env.yaml
- miniforge-version: latest
- activate-environment: exemplary_workflow
-
- - name: run-workflow
- shell: bash -l {0}
- run: |
- conda install --channel conda-forge cwltool
- cd $GITHUB_WORKSPACE/exemplary_workflow/cwl
- cwltool wf_run_exemplary_wf.cwl
- - name: upload-paper-artifact
- uses: actions/upload-artifact@v2
- with:
- name: paper
- path: ./exemplary_workflow/cwl/paper.pdf
- retention-days: 1
- if-no-files-found: error
-
- run-nextflow:
- runs-on: ubuntu-latest
-
- steps:
- - name: checkout-repository
- uses: actions/checkout@v2
-
- - name: install-basic-deps
- uses: ./.github/actions/install-basic-deps
-
- - name: setup-conda-environment
- uses: conda-incubator/setup-miniconda@v2
- with:
- miniforge-version: latest
- activate-environment: exemplary_workflow
-
- - name: run-workflow
- shell: bash -l {0}
- run: |
- conda install --channel bioconda nextflow=21.04.0
- cd $GITHUB_WORKSPACE/exemplary_workflow/nextflow
- nextflow run exemplarywf.nf
- - name: upload-paper-artifact
- uses: actions/upload-artifact@v2
- with:
- name: paper
- path: ./exemplary_workflow/nextflow/paper.pdf
- retention-days: 1
- if-no-files-found: error
-
-
- run-snakemake:
- runs-on: ubuntu-latest
-
- steps:
- - name: checkout-repository
- uses: actions/checkout@v2
-
- - name: install-basic-deps
- uses: ./.github/actions/install-basic-deps
-
- - name: setup-conda-environment
- uses: conda-incubator/setup-miniconda@v2
- with:
- miniforge-version: latest
- activate-environment: exemplary_workflow
- channels: conda-forge,bioconda
- channel-priority: strict
-
- - name: run-workflow
- shell: bash -l {0}
- run: |
- conda install snakemake
- cd $GITHUB_WORKSPACE/exemplary_workflow/snakemake
- snakemake --cores 1 --use-conda --conda-frontend conda ./paper.pdf
- - name: upload-paper-artifact
- uses: actions/upload-artifact@v2
- with:
- name: paper
- path: ./exemplary_workflow/snakemake/paper.pdf
- retention-days: 1
- if-no-files-found: error
-
- run-kadistudio:
- runs-on: ubuntu-latest
-
- steps:
- - name: checkout-repository
- uses: actions/checkout@v2
-
- - name: install-basic-deps
- uses: ./.github/actions/install-basic-deps
-
- - name: setup-conda-environment
- uses: conda-incubator/setup-miniconda@v2
- with:
- environment-file: exemplary_workflow/source/envs/default_env.yaml
- miniforge-version: latest
- activate-environment: exemplary_workflow
-
- - name: run-workflow
- shell: bash -l -i {0}
- run: |
- bash $GITHUB_WORKSPACE/exemplary_workflow/kadistudio/install_components.sh
- mkdir $GITHUB_WORKSPACE/run
- process_engine run $GITHUB_WORKSPACE/exemplary_workflow/kadistudio/exemplary_workflow.flow -p $GITHUB_WORKSPACE/run
-
- - name: upload-paper-artifact
- uses: actions/upload-artifact@v2
- with:
- name: paper
- path: ./exemplary_workflow/kadistudio/paper.pdf
- retention-days: 1
- if-no-files-found: error
-
- run-aiida:
- runs-on: ubuntu-latest
-
- services:
- postgres:
- image: postgres:10
- env:
- POSTGRES_DB: test_aiida
- POSTGRES_PASSWORD: ''
- POSTGRES_HOST_AUTH_METHOD: trust
- options: >-
- --health-cmd pg_isready
- --health-interval 10s
- --health-timeout 5s
- --health-retries 5
- ports:
- - 5432:5432
- rabbitmq:
- image: rabbitmq:3.8.14
- ports:
- - 5672:5672
- slurm:
- image: xenonmiddleware/slurm:17
- ports:
- - 5001:22
-
- steps:
- - name: checkout-repository
- uses: actions/checkout@v2
-
- - name: install-basic-deps
- uses: ./.github/actions/install-basic-deps
-
- - name: install-system-dependencies
- run: |
- sudo apt update
- sudo apt install postgresql graphviz
-
- - name: setup-conda-environment
- uses: conda-incubator/setup-miniconda@v2
- with:
- environment-file: exemplary_workflow/source/envs/default_env.yaml
- miniforge-version: latest
- activate-environment: exemplary_workflow
-
- - name: install-aiida-shell
- # the shell directive is necessary to properly activate the shell
- # see https://github.com/marketplace/actions/setup-miniconda
- shell: bash -l {0}
- run: |
- pip install "aiida-core>=2.0,<2.1" "aiida-shell==0.2.0"
-
- - name: setup-aiida-profile
- shell: bash -l {0}
- run: |
- verdi quicksetup --non-interactive --profile default --email aiida@localhost --first-name Giuseppe --last-name Verdi --institution Khedivial --db-backend psql_dos
-
- - name: run-workflow
- working-directory: exemplary_workflow/aiida
- shell: bash -l {0}
- run:
- ./exemplary_workflow.py
-
- - name: upload-paper-artifact
- uses: actions/upload-artifact@v2
- with:
- name: paper
- path: ./exemplary_workflow/aiida/paper.pdf
- retention-days: 1
- if-no-files-found: error
-
- run-gwl:
- runs-on: ubuntu-latest
-
- steps:
- - name: Install Guix
- id: install-guix
- uses: PromyLOPh/guix-install-action@v1
- with:
- channels: |-
- (list (channel
- (name 'guix)
- (url "https://git.savannah.gnu.org/git/guix.git")
- (branch "master")
- (commit
- "8e54584d4448d37ddf8ae995bb545a181ba2493c")))
-
- - name: Install Guix Workflow Language
- run: |
- guix install gwl
- echo "GUIX_EXTENSIONS_PATH=$HOME/.guix-profile/share/guix/extensions" >> $GITHUB_ENV
-
- - name: checkout-repository
- uses: actions/checkout@v2
-
- - name: run-workflow
- run: |
- cd exemplary_workflow/gwl
- guix workflow run workflow.w
-
- - name: upload-paper-artifact
- uses: actions/upload-artifact@v2
- with:
- name: paper
- path: exemplary_workflow/gwl/paper.pdf
-
- run-pyiron:
- runs-on: ubuntu-latest
-
- steps:
- - name: checkout-repository
- uses: actions/checkout@v2
-
- - name: install-basic-deps
- uses: ./.github/actions/install-basic-deps
-
- - name: setup-conda-environment
- uses: conda-incubator/setup-miniconda@v2
- with:
- miniforge-version: latest
- activate-environment: exemplary_workflow
- channels: conda-forge
- channel-priority: strict
-
- - name: run-workflow
- shell: bash -l {0}
- run: |
- conda install conda_subprocess=0.0.4 pyiron_base=0.9.7
- cd $GITHUB_WORKSPACE/exemplary_workflow/pyiron
- python workflow.py
- - name: upload-paper-artifact
- uses: actions/upload-artifact@v2
- with:
- name: paper
- path: ./exemplary_workflow/pyiron/paper.pdf
- retention-days: 1
- if-no-files-found: error
diff --git a/.github/workflows/pipeline.yml b/.github/workflows/pipeline.yml
new file mode 100644
index 00000000..3006ea06
--- /dev/null
+++ b/.github/workflows/pipeline.yml
@@ -0,0 +1,74 @@
+name: Pipeline
+
+on:
+ push:
+ branches: [ main ]
+ pull_request:
+
+jobs:
+ aiida:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v4
+ - name: Conda config
+ run: echo -e "channels:\n - conda-forge\n" > .condarc
+ - uses: conda-incubator/setup-miniconda@v3
+ with:
+ python-version: "3.12"
+ miniforge-version: latest
+ condarc-file: .condarc
+ environment-file: environment.yml
+ - name: Test
+ shell: bash -l {0}
+ run: |
+ sudo apt-get install -y $(cat apt.txt)
+ conda env create -n preprocessing -f source/envs/preprocessing.yaml -y
+ conda env create -n processing -f source/envs/processing.yaml -y
+ conda env create -n postprocessing -f source/envs/postprocessing.yaml -y
+ verdi presto --profile-name pwd
+ echo -e 'from aiida import load_profile\nload_profile()\n\nfrom python_workflow_definition.aiida import load_workflow_json\n\n\nif __name__ == "__main__":\n workgraph = load_workflow_json(file_name="workflow.json")\n workgraph.run()' > test_with_aiida.py
+ python test_with_aiida.py
+
+ jobflow:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v4
+ - name: Conda config
+ run: echo -e "channels:\n - conda-forge\n" > .condarc
+ - uses: conda-incubator/setup-miniconda@v3
+ with:
+ python-version: "3.12"
+ miniforge-version: latest
+ condarc-file: .condarc
+ environment-file: environment.yml
+ - name: Test
+ shell: bash -l {0}
+ run: |
+ sudo apt-get install -y $(cat apt.txt)
+ conda env create -n preprocessing -f source/envs/preprocessing.yaml -y
+ conda env create -n processing -f source/envs/processing.yaml -y
+ conda env create -n postprocessing -f source/envs/postprocessing.yaml -y
+ echo -e 'from jobflow.managers.local import run_locally\nfrom python_workflow_definition.jobflow import load_workflow_json\n\n\nif __name__ == "__main__":\n flow = load_workflow_json(file_name="workflow.json")\n print(run_locally(flow))' > test_with_jobflow.py
+ python test_with_jobflow.py
+
+ pyiron:
+ runs-on: ubuntu-22.04
+ steps:
+ - uses: actions/checkout@v4
+ - name: Conda config
+ run: echo -e "channels:\n - conda-forge\n" > .condarc
+ - uses: conda-incubator/setup-miniconda@v3
+ with:
+ python-version: "3.12"
+ miniforge-version: latest
+ condarc-file: .condarc
+ environment-file: environment.yml
+ - name: Test
+ shell: bash -l {0}
+ run: |
+ sudo apt-get install -y $(cat apt.txt)
+ conda env create -n preprocessing -f source/envs/preprocessing.yaml -y
+ conda env create -n processing -f source/envs/processing.yaml -y
+ conda env create -n postprocessing -f source/envs/postprocessing.yaml -y
+ echo -e 'from python_workflow_definition.pyiron_base import load_workflow_json\n\n\nif __name__ == "__main__":\n delayed_object_lst = load_workflow_json(file_name="workflow.json")\n print(delayed_object_lst[-1].pull())' > test_with_pyiron.py
+ python test_with_pyiron.py
diff --git a/apt.txt b/apt.txt
new file mode 100644
index 00000000..611b8e6c
--- /dev/null
+++ b/apt.txt
@@ -0,0 +1,2 @@
+libgl1-mesa-glx
+libegl1-mesa
diff --git a/conf.py b/conf.py
deleted file mode 100644
index afc3dc0b..00000000
--- a/conf.py
+++ /dev/null
@@ -1,77 +0,0 @@
-# Configuration file for the Sphinx documentation builder.
-#
-# This file only contains a selection of the most common options. For a full
-# list see the documentation:
-# https://www.sphinx-doc.org/en/master/usage/configuration.html
-
-# -- Path setup --------------------------------------------------------------
-
-# If extensions (or modules to document with autodoc) are in another directory,
-# add these directories to sys.path here. If the directory is relative to the
-# documentation root, use os.path.abspath to make it absolute, like shown here.
-#
-# import os
-# import sys
-# sys.path.insert(0, os.path.abspath('.'))
-import sphinx_rtd_theme
-
-
-# -- Project information -----------------------------------------------------
-
-project = 'NFDI4IngScientificWorkflowRequirements'
-copyright = '2021, Jörg F. Unger'
-author = 'Jörg F. Unger, Dennis Gläser, Philipp Diercks'
-
-
-# -- General configuration ---------------------------------------------------
-
-# Add any Sphinx extension module names here, as strings. They can be
-# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
-# ones.
-extensions = [
- 'sphinx.ext.mathjax',
- 'sphinx.ext.githubpages',
- 'sphinx_rtd_theme',
-]
-
-# Add any paths that contain templates here, relative to this directory.
-templates_path = ['_templates']
-
-# The suffix(es) of source filenames.
-# You can specify multiple suffix as a list of string:
-#
-source_suffix = {
- '.rst': 'restructuredtext',
-}
-
-# List of patterns, relative to source directory, that match files and
-# directories to ignore when looking for source files.
-# This pattern also affects html_static_path and html_extra_path.
-exclude_patterns = ['conda-env','conda-env', '_build', 'Thumbs.db', '.DS_Store', 'exemplary_workflow']
-
-
-# The master toctree document.
-master_doc = 'index'
-
-# The language for content autogenerated by Sphinx. Refer to documentation
-# for a list of supported languages.
-#
-# This is also used if you do content translation via gettext catalogs.
-# Usually you set "language" from the command line for these cases.
-language = None
-
-# The name of the Pygments (syntax highlighting) style to use.
-pygments_style = None
-
-
-# -- Options for HTML output -------------------------------------------------
-
-# The theme to use for HTML and HTML Help pages. See the documentation for
-# a list of builtin themes.
-#
-html_theme = 'sphinx_rtd_theme'
-
-# Add any paths that contain custom static files (such as style sheets) here,
-# relative to this directory. They are copied after the builtin static files,
-# so a file named "default.css" will overwrite the builtin "default.css".
-# html_static_path = ['_static']
diff --git a/docs/exemplarywf.rst b/docs/exemplarywf.rst
deleted file mode 100644
index 455e05c0..00000000
--- a/docs/exemplarywf.rst
+++ /dev/null
@@ -1,78 +0,0 @@
-
-.. _exemplarywf:
-
-Exemplary workflow
-==================
-As a minimal working example representative of workflows in computational science, the
-poisson equation is solved using the finite element method for a set of predefined boundary conditions :math:`u_{\mathrm{D}}`
-on a square domain :math:`\varOmega={(0, s)}^2, s\in\mathbb{R}^+`.
-
-.. math::
- -\Delta u &= f\,,\quad&&\mathrm{in}\;\varOmega\,,\\
- u &= u_{\mathrm{D}}\,,\quad&&\mathrm{on}\;\partial\varOmega\,.
-
-The domain size :math:`s` is defined as an input to the workflow, such that the workflow can
-be run for different computational domains.
-The final output of the workflow is a PDF document showing the solution :math:`u` over the
-line :math:`L=\{(x, y)\in\varOmega\,\vert\,x=y\}` and the number of degrees of freedom used in the finite element model.
-The (directed acyclic graph of the) workflow is shown below and consists of the following processes:
-
-.. |DAG| image:: ./../img/exemplary_wf_dag.png
- :class: align-right
- :width: 95%
- :alt: exemplary_wf_dag
-
-1. Partition of the computational domain using `Gmsh `_, |DAG|
-
-.. code-block:: console
-
- gmsh -2 -setnumber domain_size 1.0 source/square.geo -o ./square.msh
-
-
-2. Conversion of the file format (into one readable by `FEniCS `_) using `meshio `_,
-
-.. code-block:: console
-
- meshio convert ./square.msh ./square.xdmf
-
-
-3. Solution of the poisson equation using `FEniCS `_,
-
-.. code-block:: console
-
- python source/poisson.py --mesh ./square.xdmf --degree 2 --outputfile ./poisson.pvd
-
-
-4. Postprocessing using `ParaView `_,
-
-.. code-block:: console
-
- pvbatch source/postprocessing.py ./poisson.pvd ./plotoverline.csv
-
-
-5. Preparation of macro definitions,
-
-.. code-block:: console
-
- python source/prepare_paper_macros.py --macro-template-file source/macros.tex.template \
- --plot-data-path ./plotoverline.csv --domain-size 1.0 --num-dofs num_dofs \
- --output-macro-file ./macros.tex
-
-
-6. Generation of a PDF using `LaTeX `_, `Tectonic `_ respectively.
-
-.. code-block:: console
-
- cp source/paper.tex ./paper.tex
- tectonic ./paper.tex
-
-It is important to note that the `source files `_ are
-regarded as static inputs, i.e. changing them may break the workflow and thus
-only the variable domain size should be exposed as an actual input parameter.
-The number of degrees of freedom is written to stdout
-by the FEniCS script `source/poisson.py`
-and used here to showcase how one might deal with output of integer type (i.e. non-file output).
-If the tool does not support this the number of degrees of freedom is written to a file.
-
-Details on the specific versions used for each software package can be found in the `conda environment specification file `_.
-Exemplary implementations with various tools can be found `here `_.
diff --git a/docs/fairness.rst b/docs/fairness.rst
deleted file mode 100644
index cb58fa6a..00000000
--- a/docs/fairness.rst
+++ /dev/null
@@ -1,86 +0,0 @@
-.. _fairness:
-
-FAIRness of research workflows
-==============================
-
-.. image:: ./../img/fairness.png
- :width: 500
- :alt: TODO: caption
-
-As mentioned in the introduction, an overarching goal is to make research
-software `FAIR `_. In this section, we
-therefore want to discuss possible aspects of FAIRness associated with the code
-and data that are used in or produced by scientific inquiry. A scientific workflow
-may consist of a variety of different pieces of software as well as input data,
-which makes it nontrivial to publish it in a comprehensive, reusable and reproducible
-way.
-
-.. contents::
-
-.. _transparency:
-
-Increasing transparency
-------------------------
-
-By publishing the data and software used in scientific investigations, researchers
-give their peers the opportunity to retrace the steps that were necessary to arrive
-at the conclusions drawn in a publication. This significantly increases the
-transparency of their research and besides this, it allows others to reuse the data
-and software in proceeding investigations.
-
-
-.. _reusable_data:
-
-Reusable data
--------------
-
-Integration of externally published research data in a research project may be
-cumbersome if the data is not well-documented or self-explanatory. To mitigate
-this, data should be published in standard formats that are widely used in the
-community to guarantee their seamless integration into other projects. Moreover,
-rich metadata should be provided, which explains the contents of the data in
-detail and in a machine-readable format.
-
-.. _reusable_software:
-
-Reusable software
------------------
-
-Source code used to process or produce data should be published such
-that other researchers can easily reconstruct how the data was generated or analyzed.
-Besides a license which states terms and conditions of use, it is again important
-to publish metadata alongside the code, which state the requirements of the software
-on the compute environment, that is, compiler or interpreter versions, third-party libraries,
-hardware requirements etc.
-
-*Ideally, this code expresses only project-specific tasks, reusing established
-packages for generic functionality, whenever possible, instead of reimplementing
-it. This reduces the risk of bugs and makes it easier for other researchers
-to understand the code, who possibly are familiar with the syntax of the used
-packages. If generic, possibly reusable functionality has to be implemented, we encourage
-researchers to publish these parts separately in the form of a code repository
-and/or software package for others to use independently of the project-specific
-code. This, of course, requires competence in sustainable software development
-or the help of research software engineers.*
-
-.. _reproducible workflows:
-
-Reproducible research workflows
--------------------------------------
-
-A scientific workflow may involve many different processes that are executed in
-a particular order while exchanging data. With increasing number of processes,
-retracing the workflow logic can become rather cumbersome. Using workflow tools
-or languages to describe the logic and the flow of data provides the means to
-communicate this in a more comprehensible way. Moreover, some tools provide
-the possibility to visualize the workflow in a directed acyclic graph (DAG),
-depicting inputs, outputs and metadata of the processes and how they are interconnected.
-In addition to the graphical visualization of the provenance graph, the workflow tool
-may also automatically track and record the full data provenance in a database.
-Besides this, other researchers may easily extend or change the workflow by adding or exchanging processes.
-
-In order for the workflow to be reproducible, metadata must be provided that state
-the requirements on the compute environment and all of its dependencies, such that
-users can prepare their system for the workflow execution. Ideally, workflow tools
-automatically track the involved dependencies and/or provide the means to encapsulate
-the environment (e.g. using container solutions) such that it can be distributed.
diff --git a/docs/requirements.rst b/docs/requirements.rst
deleted file mode 100644
index 474aad2b..00000000
--- a/docs/requirements.rst
+++ /dev/null
@@ -1,226 +0,0 @@
-.. _requirements:
-
-Requirements on workflow tools
-==============================
-
-In this section, we want to translate the aspects discussed in :ref:`fairness`
-into possible capabilities that workflow tools should provide in order to
-fulfill these aspects.
-
-.. contents::
-
-.. _requirements_execution:
-
-Execution
----------
-The workflow tool should automatically execute the scientific workflow, i.e.\ the processes that make up the workflow should be executed in the correct order to satisfy dependencies between them.
-To this end, a directed acyclic graph (DAG; dependency-graph) should be created and stored for later tracing and visualisation of specific instances of a workflow execution (see also :ref:`requirements_provenance`.
-Moreover, the (possible) reuse of already computed results is an important feature with regard to the execution of the workflow, but also handled quite differently by the investigated tools.
-Therefore, this is listed as a separate requirement (see :ref:`requirements_uptodateness`).
-In this requirement, it is focused on how (easily) the execution of the workflow either locally (your computer) or on a remote machine (HPC cluster or cloud) can be integrated.
-Note, that this relates to the configuration of remote machines and e.g. the ressource manager on a HPC cluster or the cloud computing service with the workflow tool rather than the actual execution, since for this a suitable compute environment needs to be instantiated which is covered in :ref:`requirements_compute_environment`.
-Ideally, the workflow can be executed anywhere without changing the workflow definition script apart from small changes (one-liner) related to the workflow tool configuration file.
-
-Evaluation criteria:
-
-1. The workflow system supports the execution of the workflow on the local system.
-2. The workflow system supports the execution of the workflow on the local system via a batch system.
-3. The workflow system supports the execution of the workflow via a batch system on the local or a remote system.
-
-.. _requirements_monitor:
-
-Monitoring
-----------
-Depending on the application, the execution of scientific workflows can be very time-consuming. This can be caused by compute-intensive processes,
-such as e.g. numerical simulations, or by a large number of short processes that are executed many times. In both cases, it can be very helpful to
-be able to query the state of the execution, that is, which processes have been finished, which processes are currently being processed, and which
-are still pending. A trivial way of such monitoring would be, for instance, when the workflow is started in a terminal which is kept open to inspect
-the output written by the workflow system and the running processes. However, ideally, the workflow system allows for submission of the workflow in
-the form of a process running in the background, while still providing means to monitor the state of the execution.
-
-Evaluation criteria:
-
-1. Only way to monitor the workflow is to watch the console output
-2. The workflow system provides a way to query the execution status at any time
-
-.. _requirements_provenance:
-
-Data provenance
----------------
-The data provenance graph contains, for a particular execution of the workflow, which data and processes participated in the generation of a particular
-piece of data. Thus, this is closely related to the workflow itself, which can be thought of as a template for how that data generation should take place.
-However, a concrete realization of the workflow must contain information on the exact input data and parameters that were used, possibly along with meta
-information on the person that executed the workflow, the compute ressources used and the time it took to finish. Collection of all relevant information,
-its storage in machine-readable formats and subsequent publication alongside the data can be very useful for future researchers in order to understand
-how exactly the data was produced. Ideally, the workflow system has the means to automatically collect this information upon workflow execution.
-
-Evaluation criteria:
-
-1. The workflow system provides no means to export relevant information from a particular execution
-2. Upon workflow execution, the tool writes metadata files alongside the results, overwriting them upon re-execution
-3. Produced data is stored in a database, allowing to uniquely associate produced data with particular workflow instantiations
-
-
-.. _requirements_compute_environment:
-
-Compute environment
--------------------
-In order to guarantee interoperability and reproducibility of scientific workflows, the workflows need to be executable by others.
-Here, the reinstantiation of the compute environment (installation of libraries or source code) poses the main challenge.
-Therefore, it is of great use that the workflow tool is able to automatically deploy the software stack (on a per workflow or per process basis) by means of a package manager (e.g. conda) or that running processes in a container (e.g. docker, singularity, etc.) is integrated in the tool.
-
-Evaluation criteria:
-
-1. The automatic instantiation of the compute environment is not intended.
-2. The workflow system allows the automatic instantiation of the compute environment on a per workflow basis.
-3. The workflow system allows the automatic instantiation of the compute environment on a per process basis.
-
-.. _requirements_uptodateness:
-
-Up-to-dateness
---------------
-There are different areas for the application of workflows. On the one hand,
-people might use a workflow to define a single piece of reproducible code
-that when executed, always returns the same result. Based on that they might
-start a large quantity of different jobs and use the workflow system to
-perform this task. Another area of application is the constant development
-within the workflow (e.g. exchanging processes, varying parameter or even
-modifying the source code of a process) until a satisfactory result is
-obtained. The two scenarios require a slightly different behavior of the
-workflow system. In the first scenario, all runs should be kept in the data
-provenance graph with a documentation of how each result instance has been
-obtained (e.g. by always documenting the codes, parameters, and processes).
-If identical runs (identical inputs and processes should result in the same
-output) are detected, a recomputation should be avoided and the original
-output should be linked in the data provenance graph. The benefit of this
-behavior certainly depends on the ratio between the computation time for a
-single process compared to the overhead to query the data base.
-
-However, when changing the processes (e.g. coding a new time integration
-scheme, a new constitutive model), the workflow system should rather behave
-like a built system (such as make) - only recomputing the steps that are
-changed or that depend on these changes. In particular for complex problems,
-this allows to work with complex dependencies without manually triggering
-computations and results in automatically recomputing only the relevant parts
-. An example is a paper with multiple figures that each is a result of
-complex simulations that in itself depend on a set of general modules that
-are developed in the paper. The "erroneus" runs are usually not interesting
-and should be overwritten.
-
-How this is handled varies between the tools. Some always recompute the
-complete workflow marked in the matrix by an **R**\ ecompute, others allow
-to create a new entry in the data provenance graph and link the previous
-result (without the need to recompute already existing results) marked in the
-matrix as **L**\ ink. Finally, make-like tools recreate only the parts
-that are not up-to-date labeled as **U**\ pdate. Note that the latter
-usually reduces the overhead to store multiple instances of the workflow, but
-at the same time also prevents - without additional effort (e.g. when
-executing in different folders) computing multiple instances of the same
-workflow.
-
-
-.. _requirements_gui:
-
-Graphical user interface
-------------------------
-Independent of a particular execution of the workflow, the workflow system may provide facilities to visualize the graph of the workflow, indicating the
-mutual dependencies of the individual processes and the direction of the flow of data. One can think of this graph as the template for the data provenance
-graph. This visualization can help in conveying the logic behind a particular workflow, making it easier for other researchers to understand and possibly
-incorporate it into their own research. The latter requires that the workflow system is able to handle hierarchical workflows, i.e. it needs to support
-sub-workflows as processes inside another workflow. Beyond a mere visualization, a graphical user interface may allow for visually connecting different
-workflows into a new one by means of drag & drop. An example for this is the [Rabix Composer](https://github.com/rabix/composer), which allows for the composition of workflows
-written in CWL.
-
-Evaluation criteria:
-
-1. The workflow system provides no means to visualize the workflow
-2. The workflow system or third-party tools allow to visualize the workflow definition
-3. The workflow system or third-party tools provide a graphical user interface that enables users to graphically create workflows
-
-.. _requirements_hierarchical:
-
-Hierarchical composition of workflows
--------------------------------------
-A workflow consists of a mapping between a set of inputs (could be empty) and
-a set of outputs, whereas in between a number of sequential processes are
-performed. Connecting the output of one workflow to the input of another
-workflow results in a new, longer workflow. This is particularly relevant in
-situations, where multiple people share a common set of procedures (e.g.
-common pre- and postprocessing routines). In this case, copying the
-preprocessing workflow into another one is certainly always possible, but
-does not allow to jointly perform modifications and work with different
-versions. If the workflow system supports a hierarchical embedding of one
-workflow into another one, the property is labeled as + (otherwise -). This
-also requries to define separate compute environments for each sub-workflow
-(e.g. docker/singularity or conda), because each sub-workflow might use
-different tools or even the same tools but with different versions (e.g.
-python2 vs. python3), so executing all sub-workflows in the same environment
-might not be possible.
-
-.. _requirements_interfaces:
-
-Process interfaces
-------------------
-Each process in a workflow has some input and output data.
-In a traditional file based pipeline the output of one process is input to the other.
-However, it is often more convenient to pass non-file output (e.g. float or integer values) directly from one process to the other without the creation of intermediate files.
-In this case, it is desirable that the workflow tool is able to check for the validity of the data (e.g. the correct data type) to be processed.
-Furthermore, this clearly defines the interface for a process and which input values may be changed.
-This way, a third person is able to understand how to work with, adapt and extend the workflow/process.
-In contrast, in a file based pipeline this is usually not the case, since a dependency in form of a file does not give information about the type of data contained in that file.
-
-Evaluation criteria:
-
-1. The workflow system is purely file-based and does not define interface formats.
-2. The workflow system has a file and non-file based interface, where the non-file based inputs are well defined.
-3. The workflow system has a file and non-file based interface, where both the file and non-file based inputs are well defined.
-
-.. _requirements_platform:
-
-Platform for publishing and sharing workflows
----------------------------------------------
-The benefit of a workflow system is already significant when using it for
-individual research such as the development of my paper or reproducing the
-paper someone else has written, when their data processing pipeline is fully
-reproducible and documented and published with the publication. However, the
-benefit can be even more increased if people are able to jointly work on
-(sub-)workflows together. In particular, when a hierarchical workflow system
-is used. Even though workflows can easily be shared together with the work (e
-.g. in a repository), it might be beneficial to provide a platform that
-allows to publish documented workflows with a search and versioning
-functionality. This feature is not part of the requirement matrix to compare
-the different tools, but we consider a documentation of these platforms (if
-existing) in the subsequent section important source of information as a good
-starting point for further research (exchange).
-
-.. _requirements_evaluation:
-
-Evaluation
-----------
-
-.. https://www.unicode-search.net/unicode-namesearch.pl?term=CIRCLE
-.. ● BLACK CIRCLE
-.. ○ WHITE CIRCLE
-.. 🔴 large red circle to indicate important requirement for user story
-
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
-| Tool | Execution | Monitoring | Provenance | Compute Environment | GUI | Composition | Process Interfaces | Up-to-dateness | Ease-of-first-use |
-+============================+===========+============+============+=====================+=====+=============+====================+================+===================+
-| AiiDA | ●●● | ●● | ●●● | ●○○ | ●●○ | ●●○ | ●●○ | L | ●○○ |
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
-| CWL | ●●○ | ●● | ●●○ | ●●● | ●●● | ●●● | ●●● | R | ●●○ |
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
-| Doit | ●○○ | ●○ | ●○○ | ●○○ | ●○○ | ●●○ | ●○○ | U | ●●● |
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
-| Nextflow | ●●○ | ●○ | ●●○ | ●●● | ●●○ | ●●● | ●○○ | L | ●●● |
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
-| Snakemake | ●●○ | ●○ | ●●○ | ●●● | ●●○ | ●●● | ●○○ | U | ●●● |
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
-| User Story | Execution | Monitoring | Provenance | Compute Environment | GUI | Composition | Process Interfaces | Up-to-dateness | Ease-of-first-use |
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
-| Paper | | | | 🔴 | | | | 🔴 | 🔴 |
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
-| Joint research | | | 🔴 | 🔴 | | 🔴 | 🔴 | 🔴 | |
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
-| Complex hier. computations | 🔴 | 🔴 | 🔴 | 🔴 | | 🔴 | | | |
-+----------------------------+-----------+------------+------------+---------------------+-----+-------------+--------------------+----------------+-------------------+
diff --git a/docs/tools.rst b/docs/tools.rst
deleted file mode 100644
index 407f658f..00000000
--- a/docs/tools.rst
+++ /dev/null
@@ -1,111 +0,0 @@
-.. _developers:
-
-List of tools
-=============
-This is a list of preselected tools based on possible requirements at the start of
-the project.
-
-.. contents::
-
-AiiDA/AiiDAlab
---------------
-* `AiiDA Website `_
-* `AiiDALab Website `_
-* `Main Paper `_ (AiiDA >= 1.0) with (so it seems) main authors
- Sebastiaan P. Huber and Spyros Zoupanos (no longer active member of the AiiDA team). Correspondence and requests for materials should be addressed to S.P.H. or Giovanni Pizzi.
-* `AiiDA mailing list `_ for general questions about AiiDA.
-* `Github Issue Tracker `_ for bug reports.
-
-Argo
-----
-* `Argo website `_
-
-
-Common Workflow Language
-------------------------
-* `CWL website `_
-* `Discourse group `_ as recommended place to ask questions about CWL.
-* `CWL paper `_
-* Besides the discourse group one could get in contact via `Gitter `_, `Twitter `_ or `google mailinglist `_
-
-
-Dlite
------
-* `Dlite website `_
-
-
-Gnu guix
---------
-* `Gnu guix website `_
-* `guix mailing list `_
-
-
-Kadi4Mat
---------
-* `Kadi4Mat website `_
-
-
-Kepler
-------
-* `Kepler website `_
-
-
-Nextflow
---------
-* `Nextflow website `_
-* `Nextflow docs `_
-
-
-Pegasus
--------
-* `Pegasus website `_
-* `Pegasus Contact `_ via Slack or mailing lists
-
-
-Pydoit
-------
-* `Pydoit website `_
-* created and maintained by Eduardo Schettino
-* `Discussion Group `_
-* Info from the discussion group: Unless you are looking for paid support, do **not** send private emails to the project maintainer.
-
-
-Pyiron
-------
-* `Pyiron website `_
-* Contact form on the `team website `_. Find also link to email of e.g.
- lead developer or project lead here.
-
-
-Reana
------
-* `Reana website `_
-
-
-Snakemake
----------
-* `Snakemake website `_
-* `Credits `_ shows list of
- development team members and lead Johannes Köster
-* *rolling* `paper `_ which is updated regularly.
-
-
-Swift
------
-* `Swift website `_
-
-
-Taverna
--------
-* `Tavernas website `_
-
-
-Workflow hub
-------------
-* `Workflow hub website `_
-
-
-Related lists
--------------
-* `Awesome materials informatics `_
-* `Awesome pipeline `_
diff --git a/docs/userstories.rst b/docs/userstories.rst
deleted file mode 100644
index 8b25b5e4..00000000
--- a/docs/userstories.rst
+++ /dev/null
@@ -1,72 +0,0 @@
-.. _userstories:
-
-User stories
-============
- "A user story is an informal, general explanation of a software feature written from the perspective of the end user.
- Its purpose is to articulate how a software feature will provide value to the customer."
-
- "As a [persona], I [want to], [so that]."
- (Source: `Atlassian `_)
-
-The concept of a user story as a component of agile software development is used here to describe a certain application scenario.
-We aim to deduce :ref:`requirements` from challenges posed in the respective user stories below.
-
-.. _user_story_1:
-
-Reproducible paper
-------------------
- *As a researcher, I want to share my paper, such that others are able to reproduce the results.*
-
-In this user story, the main objective is to guarantee reproducibility of the results described in a scientific publication.
-To do so, other scientists need to be able to comprehend and rerun each process involved in the research workflow (numerical analysis, postprocessing, etc.).
-
-There are several challenges in meeting the goal above.
-First of all, the code has to be published.
-This can happen in the form of a tarball (e.g. via `Zenodo `_), or by employing version control on the code base and making the repository publicly accessible (e.g. via `GitHub `_).
-This makes it possible to refer to a particular version of the code used at the time of publication.
-As research software is also understood as research data, appropriate metadata regarding the code and dependencies on third-party libraries should be provided.
-The metadata should contain all information necessary for peers to reinstantiate the compute environment to make the workflow usable.
-However, since software becomes out of date rather quickly this is a minimum requirement and it is preferred to build containers that package up all required pieces of software in a way that is portable, reproducible and ensures machine-independent execution.
-Finally, the fully automated implementation of the entire workflow is required to avoid any manual steps.
-Ideally, the whole paper can be reproduced by running a single command and the progress of the execution is monitored by the workflow tool.
-
-
-.. _user_story_2:
-
-Research group collaboration
-----------------------------
- *As part of a research group, I want to be able to interconnect and reuse components of several different workflows so that everyone may benefit from their colleagues' work.*
-
-Similar to the first user story the output of the workflow could be a scientific paper. However with this example, interdisciplinary workflows are addressed and the reusability of single components/modules is essential. Each process in the workflow may require a different expertise and hence modularity and a common framework is a necessary requirement for an efficient collaboration and reuse of other tools. Moreover, joint software development and the development of the scientific workflow itself play an important role.
-
-Herein, the following challenges might occur.
-First, the workflow may consist of heterogeneous models of different complexity, such as large computations requiring HPC potentially including many dependent substeps to be executed, preprocessing of experimental data or joint analysis in postprocessing step - all processes potentially running on different machines.
-These models then need to be embedded into a common framework.
-Without a common framework/interface the exchange of data between processes is challenging and requires manual adjustments.
-Second, during joint development of the workflow, it needs to be executable independent of the local machine.
-Therefore, the workflow tool must provide means to control (automatically install) the compute environment for specific processes of the workflow.
-This greatly enhances the reusability of a process and/or modules (chain of processes) of the workflow and guarantees the machine-independent execution.
-Another challenge are high computational costs.
-Here, the seamless integration of HPC systems is of high value.
-Moreover, for complex workflows containing computationally expensive processes, caching of results becomes relevant.
-Upon changes in the workflow only parts of the workflow which are not up-to-date are rerun.
-This saves computation time and may lead to a significant speed up in the time needed for development of the workflow.
-Finally, a hierarchically embedding of a versioned published workflow as a sub-workflow in another workflow is of great benefit - thus improvements within this sub-workflow can be easily integrated.
-
-.. _user_story_3:
-
-
-Complex hierarchical computations
----------------------------------
- *As a materials scientist, I want to be able to automate and manage complex workflows so I can keep track of all associated data.*
-
-In cases where screening or parameter sweeps are required, involving thousands of simulations, running these manually one by one is not feasible.
-In addition, these workflows are often complex, i.e. the output of one process is the input to a subsequent one - potentially with many levels of dependencies.
-
-Moreover, besides the automation of running the calculations, the inputs and outputs need to be managed.
-Not only the data and calculations should be stored to achieve reproducibility, but also the causal relationships between them, i.e. the full data provenance.
-
-Given the large amount of data (inputs/outputs), manually keeping track of the full provenance becomes infeasible.
-Therefore, the workflow tool must automatically track and record inputs, outputs and metadata of all processes in a database and allow to easily search for the relavant (meta-)data.
-Furthermore, fast queries of the database and automatic generation of the provenance graph are required features of the workflow tool.
-Due to the large computational effort the seamless integration of HPC systems is as well vital for this use case.
diff --git a/dodo.py b/dodo.py
deleted file mode 100644
index e460ef47..00000000
--- a/dodo.py
+++ /dev/null
@@ -1,13 +0,0 @@
-import pathlib
-
-
-def task_website():
- deps = ["index.rst", "conf.py"]
- docs = pathlib.Path("docs")
- for f in docs.glob("*.rst"):
- deps.append(str(f))
- return {
- "file_dep": deps,
- "actions": ["sphinx-build . website"],
- "verbosity": 2,
- }
diff --git a/environment.yml b/environment.yml
new file mode 100644
index 00000000..749859dd
--- /dev/null
+++ b/environment.yml
@@ -0,0 +1,7 @@
+channels:
+- conda-forge
+dependencies:
+- python =3.12
+- conda_subprocess =0.0.6
+- pip:
+ - python-workflow-definition==0.0.1
diff --git a/exemplary_workflow/README.md b/exemplary_workflow/README.md
deleted file mode 100644
index bcb0fb0d..00000000
--- a/exemplary_workflow/README.md
+++ /dev/null
@@ -1,23 +0,0 @@
-# Exemplary workflow
-As a minimal working example representative of workflows in computational science,
-the poisson equation is solved using the finite element method.
-The workflow consists of the following processes:
-
-1. Partition of the computational domain using [Gmsh](http://gmsh.info/),
-2. Conversion of the file format (into one readable by [FEniCS](https://fenicsproject.org/)) using [meshio](https://github.com/nschloe/meshio),
-3. Solution of the poisson equation using [FEniCS](https://fenicsproject.org/),
-4. Postprocessing using [ParaView](https://www.paraview.org/),
-5. Preparation of macro definitions,
-6. Generation of a PDF using [LaTeX](https://www.latex-project.org/), [Tectonic](https://tectonic-typesetting.github.io/en-US/) respectively.
-
-A more extensive description can be found in the [documentation](https://nfdi4ingscientificworkflowrequirements.readthedocs.io/en/latest/docs/exemplarywf.html).
-
-## Compute environment
-Details about how the compute environment is built (using [conda](https://docs.conda.io/en/latest/)) can be found in the respective sub-directory for each tool.
-
-## Headless operation
-The ParaView version used in the examples (see the [conda environment specification file](https://github.com/BAMresearch/NFDI4IngScientificWorkflowRequirements/blob/main/exemplary_workflow/source/envs/default_env.yaml)) is linked against EGL to also support offscreen rendering.
-Usually, these libraries exist in case of a desktop pc where an actual screen or monitor is available.
-However, for headless operation, i.e. when executing the exemplary workflow in a container, it is necessary to install aforementioned libraries since these are not installed automatically as a dependency by conda.
-We refer to the installation of the basic dependencies in our [github action](https://github.com/BAMresearch/NFDI4IngScientificWorkflowRequirements/blob/main/.github/actions/install-basic-deps/action.yml) to give an example.
-For more information about offscreen rendering with ParaView go to the [ParaView documentation](https://kitware.github.io/paraview-docs/latest/cxx/Offscreen.html).
diff --git a/exemplary_workflow/aiida/.gitignore b/exemplary_workflow/aiida/.gitignore
deleted file mode 100644
index 7c58f1ae..00000000
--- a/exemplary_workflow/aiida/.gitignore
+++ /dev/null
@@ -1,5 +0,0 @@
-outfile*
-*.pvd
-*.vtu
-*.msh
-*.pdf
diff --git a/exemplary_workflow/aiida/README.md b/exemplary_workflow/aiida/README.md
deleted file mode 100644
index abe6ffb2..00000000
--- a/exemplary_workflow/aiida/README.md
+++ /dev/null
@@ -1,85 +0,0 @@
-# AiiDA
-This directory contains an implementation of the exemplary workflow with [AiiDA](https://www.aiida.net/).
-
-## Implementation
-Since the implementation of workflows in AiiDA is quite different from the other file
-based workflow managers (like e.g. snakemake or nextflow), we briefly comment on the different options
-and design choices in AiiDA.
-
-### Calculation functions
-According to the [documentation](https://aiida.readthedocs.io/projects/aiida-core/en/latest/topics/calculations/concepts.html#calculation-functions):
-> The calcfunction in AiiDA is a function decorator that transforms a regular python function in a calculation process, which automatically stores the provenance of its output in the provenance graph when executed.
-
-Typically `calcfunction`s are used for short running processes to be run on the local machine, like preprocessing and postprocessing steps.
-One could think of a workaround, using `os.subprocess` inside a `calcfunction` to run the processes of the exemplary workflow.
-However, `calcfunction`s are not intended to be used to run external codes and the use of `os.subprocess` is discouraged since in this case the provenance cannot be properly captured by AiiDA.
-
-### Calculation jobs
-> ... not all computations are well suited to be implemented as a python function, but rather are implemented as a separate code, external to AiiDA. To interface an external code with the engine of AiiDA, the CalcJob process class was introduced
-
-The `CalcJob` is designed to run a `Code` on *any* computer through AiiDA.
-While this is very powerful, apart from installing the `Code` on the other computer it is necessary to setup the `code` with AiiDA and [write a plugin](https://aiida.readthedocs.io/projects/aiida-core/en/latest/howto/plugin_codes.html) which instructs AiiDA how to run the external `Code`.
-For long running processes (computationally expensive tasks) this is worth the while, but for simple
-shell commands the effort is too high.
-
-### AiiDA shell plugin
-The [AiiDA shell plugin](https://github.com/sphuber/aiida-shell) was developed to make it easier to run simple shell commands with AiiDA.
-This way any command line tool (external code) installed on the *computer* can be run without the need to write a plugin.
-Moreover, the `ShellJob` inherits from `CalcJob` and thus it is possible to run commands on remote computers.
-Instructions on how to setup a remote computer can be found in this [how-to-guide](https://aiida.readthedocs.io/projects/aiida-core/en/latest/howto/run_codes.html#how-to-set-up-a-computer).
-
-## Installation
-Please follow the instructions in the [documentation](https://aiida.readthedocs.io/projects/aiida-core/en/latest/)
-to make yourself familiar with the installation process.
-It is recommended to use the system-wide installation method, where you first install prerequisite
-services using a package manager (e. g. on Ubuntu)
-```sh
-sudo apt install \
- git python3-dev python3-pip \
- postgresql postgresql-server-dev-all postgresql-client rabbitmq-server
-```
-Next, we prepare a conda environment with all the software required to run the exemplary workflow.
-```sh
-conda env create --name aiida_env --file ../source/envs/default_env.yaml
-conda activate aiida_env
-```
-Make sure that the python version is greater than 3.8, since this is required by the `aiida-shell` plugin.
-Next we install the `aiida-shell` plugin, which will automatically install AiiDA as a dependency.
-Make sure that your conda environment is activated as above and run the following commands.
-```sh
-pip install aiida-shell==0.2.0
-```
-Finally, run
-```sh
-verdi quicksetup
-```
-to setup a profile and see if everything was installed correctly by running
-```sh
-verdi status
-```
-
-## Running the exemplary workflow
-If you are using `conda`, activate your environment.
-```
-conda activate aiida_env
-```
-Make the workflow script executable (`chmod +x ./exemplary_workflow.py`) and run it with
-```
-./exemplary_workflow.py
-```
-By default all `ShellJob`s are run on the `localhost`.
-Some useful commands to inspect the status of the processes run and their results stored in the database are listed below.
-```
-verdi process list -a # lists all processes
-verdi process show # show info about process
-verdi process report # log messages if something went wrong
-verdi node show # show info about node
-verdi node graph generate # generate provenance graph for node
-```
-The provenance graph can be created in `.png` format with the command
-```
-verdi node graph generate --output-format png
-```
-and is shown below.
-
-