diff --git a/CHANGELOG.md b/CHANGELOG.md index 915c0178b35..0f7a08c43b1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,9 @@ +# openpipelines 0.12.4 + +## BUG FIXES + +* `transform/log1p`: fix `--input_layer` argument not functionning (PR #678). + # openpipelines 0.12.3 ## BUG FIXES diff --git a/src/transform/log1p/run_test.py b/src/transform/log1p/run_test.py index 1de97d01bde..d8f416bf48f 100644 --- a/src/transform/log1p/run_test.py +++ b/src/transform/log1p/run_test.py @@ -1,16 +1,19 @@ from os import path import mudata as mu import numpy as np +import scanpy as sc +import pandas as pd import sys import pytest import sys +import uuid from operator import attrgetter ## VIASH START meta = { 'functionality_name': 'lognorm', 'resources_dir': 'resources_test/', - 'config': '/home/di/code/openpipeline/src/transform/log1p/config.vsh.yaml', + 'config': './src/transform/log1p/config.vsh.yaml', 'executable': "../../target/docker/transform/log1p/log1p" } @@ -18,12 +21,29 @@ ## VIASH END @pytest.fixture -def input_path(): - return f"{meta['resources_dir']}/pbmc_1k_protein_v3/pbmc_1k_protein_v3_filtered_feature_bc_matrix.h5mu" +def input_data(): + return mu.read_h5mu(f"{meta['resources_dir']}/pbmc_1k_protein_v3/pbmc_1k_protein_v3_filtered_feature_bc_matrix.h5mu").copy() + +@pytest.fixture +def random_h5mu_path(tmp_path): + def wrapper(): + unique_filename = f"{str(uuid.uuid4())}.h5mu" + temp_file = tmp_path / unique_filename + return temp_file + return wrapper @pytest.mark.parametrize("output_layer", [None, "log_normalized"]) -def test_1logp(run_component, input_path, output_layer): - output = "output.h5mu" +@pytest.mark.parametrize("input_layer", [None, "normalized"]) +def test_1logp(run_component, input_data, output_layer, input_layer, random_h5mu_path): + output = random_h5mu_path() + if input_layer: + mod = input_data.mod["rna"] + mod.layers[input_layer] = mod.X.copy() + # Overwrite the original layer to make sure + # it is not accidentally used as input layer. + mod.X[:] = 0 + input_path = random_h5mu_path() + input_data.write(input_path) run_args = [ "--input", input_path, "--output", output, @@ -31,6 +51,8 @@ def test_1logp(run_component, input_path, output_layer): ] if output_layer: run_args.extend(["--output_layer", output_layer]) + if input_layer: + run_args.extend(["--input_layer", input_layer]) run_component(run_args) get_output_layer = attrgetter("X") if not output_layer else lambda x: getattr(x, 'layers')[output_layer] @@ -49,16 +71,31 @@ def test_1logp(run_component, input_path, output_layer): assert rna_in.shape == rna_out.shape, "Should have same shape as before" assert prot_in.shape == prot_out.shape, "Should have same shape as before" + input_layer_data = rna_in.X if not input_layer else rna_in.layers[input_layer] + assert np.mean(input_layer_data) != np.mean(get_output_layer(rna_out)), "Expression should have changed" - assert np.mean(rna_in.X) != np.mean(get_output_layer(rna_out)), "Expression should have changed" - - nz_row, nz_col = rna_in.X.nonzero() - row_corr = np.corrcoef(rna_in.X[nz_row[0],:].toarray().flatten(), get_output_layer(rna_out)[nz_row[0],:].toarray().flatten())[0,1] - col_corr = np.corrcoef(rna_in.X[:,nz_col[0]].toarray().flatten(), get_output_layer(rna_out)[:,nz_col[0]].toarray().flatten())[0,1] + nz_row, nz_col = input_layer_data.nonzero() + row_corr = np.corrcoef(input_layer_data[nz_row[0],:].toarray().flatten(), + get_output_layer(rna_out)[nz_row[0],:].toarray().flatten())[0,1] + col_corr = np.corrcoef(input_layer_data[:,nz_col[0]].toarray().flatten(), + get_output_layer(rna_out)[:,nz_col[0]].toarray().flatten())[0,1] assert row_corr > .1 assert col_corr > .1 assert 'log1p' in rna_out.uns + # Make sure that the original input layer has not been overwritten + layers_to_test = [None] + list(rna_in.layers.keys()) + for layer in layers_to_test: + if layer != output_layer: + in_data = sc.get.var_df(rna_in, + keys=rna_in.obs_names.to_list(), + layer=layer) + out_data = sc.get.var_df(rna_out, + keys=rna_in.obs_names.to_list(), + layer=layer) + pd.testing.assert_frame_equal(in_data, out_data) + + if __name__ == '__main__': sys.exit(pytest.main([__file__])) \ No newline at end of file diff --git a/src/transform/log1p/script.py b/src/transform/log1p/script.py index 44e53caa159..69c10ae400f 100644 --- a/src/transform/log1p/script.py +++ b/src/transform/log1p/script.py @@ -1,5 +1,6 @@ import scanpy as sc import mudata as mu +import anndata as ad import sys ## VIASH START @@ -39,12 +40,24 @@ def setup_logger(): mod = par["modality"] logger.info("Performing log transformation on modality %s", mod) data = mdata.mod[mod] -new_layer = sc.pp.log1p(data, - base=par["base"], - copy=True if par['output_layer'] else False) -if new_layer: - data.layers[par['output_layer']] = new_layer.X - data.uns['log1p'] = new_layer.uns['log1p'] + +# Make our own copy with not a lot of data +# this avoid excessive memory usage and accidental overwrites +input_layer = data.layers[par["input_layer"]] \ + if par["input_layer"] else data.X +data_for_scanpy = ad.AnnData(X=input_layer.copy()) +sc.pp.log1p(data_for_scanpy, + base=par["base"], + layer=None, # use X + copy=False) # allow overwrites in the copy that was made + +# Scanpy will overwrite the input layer. +# So fetch input layer from the copy and use it to populate the output slot +if par["output_layer"]: + data.layers[par["output_layer"]] = data_for_scanpy.X +else: + data.X = data_for_scanpy.X +data.uns['log1p'] = data_for_scanpy.uns['log1p'].copy() logger.info("Writing to file %s", par["output"]) mdata.write_h5mu(filename=par["output"], compression=par["output_compression"]) diff --git a/target/docker/annotate/popv/.config.vsh.yaml b/target/docker/annotate/popv/.config.vsh.yaml index d595e6e7d5c..15eff3b4fd6 100644 --- a/target/docker/annotate/popv/.config.vsh.yaml +++ b/target/docker/annotate/popv/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "popv" namespace: "annotate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Matthias Beyens" roles: @@ -341,6 +341,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/annotate/popv" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/annotate/popv/popv" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/annotate/popv/popv b/target/docker/annotate/popv/popv index 5d1d6c02249..8c038fd27da 100755 --- a/target/docker/annotate/popv/popv +++ b/target/docker/annotate/popv/popv @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# popv 0.12.3 +# popv 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "popv 0.12.3" + echo "popv 0.12.4" echo "" echo "Performs popular major vote cell typing on single cell sequence data using" echo "multiple algorithms. Note that this is a one-shot version of PopV." @@ -488,10 +488,10 @@ RUN cd /opt && git clone --depth 1 https://github.com/YosefLab/PopV.git && \ LABEL org.opencontainers.image.authors="Matthias Beyens, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component annotate popv" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -642,7 +642,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "popv 0.12.3" + echo "popv 0.12.4" exit ;; --input) diff --git a/target/docker/cluster/leiden/.config.vsh.yaml b/target/docker/cluster/leiden/.config.vsh.yaml index b108de8f22e..220615dbe8b 100644 --- a/target/docker/cluster/leiden/.config.vsh.yaml +++ b/target/docker/cluster/leiden/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "leiden" namespace: "cluster" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -214,6 +214,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/cluster/leiden" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/cluster/leiden/leiden" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/cluster/leiden/leiden b/target/docker/cluster/leiden/leiden index d8b5d51ec91..e3767f80c35 100755 --- a/target/docker/cluster/leiden/leiden +++ b/target/docker/cluster/leiden/leiden @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# leiden 0.12.3 +# leiden 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "leiden 0.12.3" + echo "leiden 0.12.4" echo "" echo "Cluster cells using the Leiden algorithm [Traag18] implemented in the Scanpy" echo "framework [Wolf18]." @@ -445,10 +445,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer" LABEL org.opencontainers.image.description="Companion container for running component cluster leiden" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -599,7 +599,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "leiden 0.12.3" + echo "leiden 0.12.4" exit ;; --input) diff --git a/target/docker/compression/compress_h5mu/.config.vsh.yaml b/target/docker/compression/compress_h5mu/.config.vsh.yaml index 82c3f368ea4..b08d577be0d 100644 --- a/target/docker/compression/compress_h5mu/.config.vsh.yaml +++ b/target/docker/compression/compress_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "compress_h5mu" namespace: "compression" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -162,6 +162,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/compression/compress_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/compression/compress_h5mu/compress_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/compression/compress_h5mu/compress_h5mu b/target/docker/compression/compress_h5mu/compress_h5mu index e4f20ce9654..82235ebb9cd 100755 --- a/target/docker/compression/compress_h5mu/compress_h5mu +++ b/target/docker/compression/compress_h5mu/compress_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# compress_h5mu 0.12.3 +# compress_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "compress_h5mu 0.12.3" + echo "compress_h5mu 0.12.4" echo "" echo "Compress a MuData file." echo "" @@ -408,10 +408,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component compression compress_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -562,7 +562,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "compress_h5mu 0.12.3" + echo "compress_h5mu 0.12.4" exit ;; --input) diff --git a/target/docker/compression/tar_extract/.config.vsh.yaml b/target/docker/compression/tar_extract/.config.vsh.yaml index e6ea892a4ca..c8c59aad973 100644 --- a/target/docker/compression/tar_extract/.config.vsh.yaml +++ b/target/docker/compression/tar_extract/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "tar_extract" namespace: "compression" - version: "0.12.3" + version: "0.12.4" arguments: - type: "file" name: "--input" @@ -101,6 +101,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/compression/tar_extract" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/compression/tar_extract/tar_extract" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/compression/tar_extract/tar_extract b/target/docker/compression/tar_extract/tar_extract index 6ae8af87aee..1cdb1af3d8f 100755 --- a/target/docker/compression/tar_extract/tar_extract +++ b/target/docker/compression/tar_extract/tar_extract @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# tar_extract 0.12.3 +# tar_extract 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -155,7 +155,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "tar_extract 0.12.3" + echo "tar_extract 0.12.4" echo "" echo "Extract files from a tar archive" echo "" @@ -406,10 +406,10 @@ ENTRYPOINT [] RUN : LABEL org.opencontainers.image.description="Companion container for running component compression tar_extract" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -560,7 +560,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "tar_extract 0.12.3" + echo "tar_extract 0.12.4" exit ;; --input) diff --git a/target/docker/convert/from_10xh5_to_h5mu/.config.vsh.yaml b/target/docker/convert/from_10xh5_to_h5mu/.config.vsh.yaml index 253211db1ac..dd2b34f4f53 100644 --- a/target/docker/convert/from_10xh5_to_h5mu/.config.vsh.yaml +++ b/target/docker/convert/from_10xh5_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_10xh5_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -267,6 +267,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_10xh5_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_10xh5_to_h5mu/from_10xh5_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/convert/from_10xh5_to_h5mu/from_10xh5_to_h5mu b/target/docker/convert/from_10xh5_to_h5mu/from_10xh5_to_h5mu index bd646a2b95b..550e88b3314 100755 --- a/target/docker/convert/from_10xh5_to_h5mu/from_10xh5_to_h5mu +++ b/target/docker/convert/from_10xh5_to_h5mu/from_10xh5_to_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# from_10xh5_to_h5mu 0.12.3 +# from_10xh5_to_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "from_10xh5_to_h5mu 0.12.3" + echo "from_10xh5_to_h5mu 0.12.4" echo "" echo "Converts a 10x h5 into an h5mu file." echo "" @@ -430,10 +430,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component convert from_10xh5_to_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -584,7 +584,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "from_10xh5_to_h5mu 0.12.3" + echo "from_10xh5_to_h5mu 0.12.4" exit ;; --input) diff --git a/target/docker/convert/from_10xmtx_to_h5mu/.config.vsh.yaml b/target/docker/convert/from_10xmtx_to_h5mu/.config.vsh.yaml index 9828a537c5f..385c749938f 100644 --- a/target/docker/convert/from_10xmtx_to_h5mu/.config.vsh.yaml +++ b/target/docker/convert/from_10xmtx_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_10xmtx_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -161,6 +161,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_10xmtx_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_10xmtx_to_h5mu/from_10xmtx_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/convert/from_10xmtx_to_h5mu/from_10xmtx_to_h5mu b/target/docker/convert/from_10xmtx_to_h5mu/from_10xmtx_to_h5mu index 12d25de3753..9fe75cf4e9a 100755 --- a/target/docker/convert/from_10xmtx_to_h5mu/from_10xmtx_to_h5mu +++ b/target/docker/convert/from_10xmtx_to_h5mu/from_10xmtx_to_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# from_10xmtx_to_h5mu 0.12.3 +# from_10xmtx_to_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "from_10xmtx_to_h5mu 0.12.3" + echo "from_10xmtx_to_h5mu 0.12.4" echo "" echo "Converts a 10x mtx into an h5mu file." echo "" @@ -408,10 +408,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component convert from_10xmtx_to_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:31Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -562,7 +562,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "from_10xmtx_to_h5mu 0.12.3" + echo "from_10xmtx_to_h5mu 0.12.4" exit ;; --input) diff --git a/target/docker/convert/from_bd_to_10x_molecular_barcode_tags/.config.vsh.yaml b/target/docker/convert/from_bd_to_10x_molecular_barcode_tags/.config.vsh.yaml index 7e86c69dedb..023da9c9d56 100644 --- a/target/docker/convert/from_bd_to_10x_molecular_barcode_tags/.config.vsh.yaml +++ b/target/docker/convert/from_bd_to_10x_molecular_barcode_tags/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_bd_to_10x_molecular_barcode_tags" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -154,6 +154,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_bd_to_10x_molecular_barcode_tags" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_bd_to_10x_molecular_barcode_tags/from_bd_to_10x_molecular_barcode_tags" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/convert/from_bd_to_10x_molecular_barcode_tags/from_bd_to_10x_molecular_barcode_tags b/target/docker/convert/from_bd_to_10x_molecular_barcode_tags/from_bd_to_10x_molecular_barcode_tags index 00300515cc3..908fb18b338 100755 --- a/target/docker/convert/from_bd_to_10x_molecular_barcode_tags/from_bd_to_10x_molecular_barcode_tags +++ b/target/docker/convert/from_bd_to_10x_molecular_barcode_tags/from_bd_to_10x_molecular_barcode_tags @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# from_bd_to_10x_molecular_barcode_tags 0.12.3 +# from_bd_to_10x_molecular_barcode_tags 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "from_bd_to_10x_molecular_barcode_tags 0.12.3" + echo "from_bd_to_10x_molecular_barcode_tags 0.12.4" echo "" echo "Convert the molecular barcode sequence SAM tag from BD format (MA) to 10X format" echo "(UB)." @@ -409,10 +409,10 @@ RUN apt-get update && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component convert from_bd_to_10x_molecular_barcode_tags" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:31Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -563,7 +563,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "from_bd_to_10x_molecular_barcode_tags 0.12.3" + echo "from_bd_to_10x_molecular_barcode_tags 0.12.4" exit ;; --input) diff --git a/target/docker/convert/from_bdrhap_to_h5mu/.config.vsh.yaml b/target/docker/convert/from_bdrhap_to_h5mu/.config.vsh.yaml index 7ecfaba2de3..27665fa8042 100644 --- a/target/docker/convert/from_bdrhap_to_h5mu/.config.vsh.yaml +++ b/target/docker/convert/from_bdrhap_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_bdrhap_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -176,6 +176,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_bdrhap_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_bdrhap_to_h5mu/from_bdrhap_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/convert/from_bdrhap_to_h5mu/from_bdrhap_to_h5mu b/target/docker/convert/from_bdrhap_to_h5mu/from_bdrhap_to_h5mu index 5914e1e8f9a..f2dad083d85 100755 --- a/target/docker/convert/from_bdrhap_to_h5mu/from_bdrhap_to_h5mu +++ b/target/docker/convert/from_bdrhap_to_h5mu/from_bdrhap_to_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# from_bdrhap_to_h5mu 0.12.3 +# from_bdrhap_to_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "from_bdrhap_to_h5mu 0.12.3" + echo "from_bdrhap_to_h5mu 0.12.4" echo "" echo "Convert the output of a BD Rhapsody WTA pipeline to a MuData h5 file." echo "" @@ -414,10 +414,10 @@ RUN Rscript -e 'if (!requireNamespace("remotes", quietly = TRUE)) install.packag LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component convert from_bdrhap_to_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -568,7 +568,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "from_bdrhap_to_h5mu 0.12.3" + echo "from_bdrhap_to_h5mu 0.12.4" exit ;; --id) diff --git a/target/docker/convert/from_cellranger_multi_to_h5mu/.config.vsh.yaml b/target/docker/convert/from_cellranger_multi_to_h5mu/.config.vsh.yaml index 2c96fe3a4b8..64a11fc3936 100644 --- a/target/docker/convert/from_cellranger_multi_to_h5mu/.config.vsh.yaml +++ b/target/docker/convert/from_cellranger_multi_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_cellranger_multi_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -185,6 +185,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_cellranger_multi_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_cellranger_multi_to_h5mu/from_cellranger_multi_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/convert/from_cellranger_multi_to_h5mu/from_cellranger_multi_to_h5mu b/target/docker/convert/from_cellranger_multi_to_h5mu/from_cellranger_multi_to_h5mu index 4e46b6c0232..40b520ff768 100755 --- a/target/docker/convert/from_cellranger_multi_to_h5mu/from_cellranger_multi_to_h5mu +++ b/target/docker/convert/from_cellranger_multi_to_h5mu/from_cellranger_multi_to_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# from_cellranger_multi_to_h5mu 0.12.3 +# from_cellranger_multi_to_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "from_cellranger_multi_to_h5mu 0.12.3" + echo "from_cellranger_multi_to_h5mu 0.12.4" echo "" echo "Converts the output from cellranger multi to a single .h5mu file." echo "By default, will map the following library type names to modality names:" @@ -427,10 +427,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component convert from_cellranger_multi_to_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -581,7 +581,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "from_cellranger_multi_to_h5mu 0.12.3" + echo "from_cellranger_multi_to_h5mu 0.12.4" exit ;; --input) diff --git a/target/docker/convert/from_h5ad_to_h5mu/.config.vsh.yaml b/target/docker/convert/from_h5ad_to_h5mu/.config.vsh.yaml index 1572fd33043..4476227852a 100644 --- a/target/docker/convert/from_h5ad_to_h5mu/.config.vsh.yaml +++ b/target/docker/convert/from_h5ad_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_h5ad_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -172,6 +172,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_h5ad_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_h5ad_to_h5mu/from_h5ad_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/convert/from_h5ad_to_h5mu/from_h5ad_to_h5mu b/target/docker/convert/from_h5ad_to_h5mu/from_h5ad_to_h5mu index cd92d16c44c..a0ebe55ebfb 100755 --- a/target/docker/convert/from_h5ad_to_h5mu/from_h5ad_to_h5mu +++ b/target/docker/convert/from_h5ad_to_h5mu/from_h5ad_to_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# from_h5ad_to_h5mu 0.12.3 +# from_h5ad_to_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "from_h5ad_to_h5mu 0.12.3" + echo "from_h5ad_to_h5mu 0.12.4" echo "" echo "Converts a single layer h5ad file into a single MuData object" echo "" @@ -412,10 +412,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer" LABEL org.opencontainers.image.description="Companion container for running component convert from_h5ad_to_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -566,7 +566,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "from_h5ad_to_h5mu 0.12.3" + echo "from_h5ad_to_h5mu 0.12.4" exit ;; --input) diff --git a/target/docker/convert/from_h5mu_to_h5ad/.config.vsh.yaml b/target/docker/convert/from_h5mu_to_h5ad/.config.vsh.yaml index e754fa369be..9acb567c6cf 100644 --- a/target/docker/convert/from_h5mu_to_h5ad/.config.vsh.yaml +++ b/target/docker/convert/from_h5mu_to_h5ad/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_h5mu_to_h5ad" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -177,6 +177,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_h5mu_to_h5ad" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/from_h5mu_to_h5ad/from_h5mu_to_h5ad" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/convert/from_h5mu_to_h5ad/from_h5mu_to_h5ad b/target/docker/convert/from_h5mu_to_h5ad/from_h5mu_to_h5ad index 2ac562b3688..4fc2410420d 100755 --- a/target/docker/convert/from_h5mu_to_h5ad/from_h5mu_to_h5ad +++ b/target/docker/convert/from_h5mu_to_h5ad/from_h5mu_to_h5ad @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# from_h5mu_to_h5ad 0.12.3 +# from_h5mu_to_h5ad 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "from_h5mu_to_h5ad 0.12.3" + echo "from_h5mu_to_h5ad 0.12.4" echo "" echo "Converts a h5mu file into a h5ad file." echo "" @@ -413,10 +413,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component convert from_h5mu_to_h5ad" -LABEL org.opencontainers.image.created="2024-01-25T10:13:54Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -567,7 +567,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "from_h5mu_to_h5ad 0.12.3" + echo "from_h5mu_to_h5ad 0.12.4" exit ;; --input) diff --git a/target/docker/convert/velocyto_to_h5mu/.config.vsh.yaml b/target/docker/convert/velocyto_to_h5mu/.config.vsh.yaml index b84e91db02f..8e58dbe4439 100644 --- a/target/docker/convert/velocyto_to_h5mu/.config.vsh.yaml +++ b/target/docker/convert/velocyto_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "velocyto_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -250,6 +250,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/velocyto_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/convert/velocyto_to_h5mu/velocyto_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/convert/velocyto_to_h5mu/velocyto_to_h5mu b/target/docker/convert/velocyto_to_h5mu/velocyto_to_h5mu index f379266f1ae..9aa87a4d355 100755 --- a/target/docker/convert/velocyto_to_h5mu/velocyto_to_h5mu +++ b/target/docker/convert/velocyto_to_h5mu/velocyto_to_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# velocyto_to_h5mu 0.12.3 +# velocyto_to_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -160,7 +160,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "velocyto_to_h5mu 0.12.3" + echo "velocyto_to_h5mu 0.12.4" echo "" echo "Convert a velocyto loom file to a h5mu file." echo "" @@ -440,10 +440,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont, Robrecht Cannoodt, Angela Oliveira Pisco" LABEL org.opencontainers.image.description="Companion container for running component convert velocyto_to_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -594,7 +594,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "velocyto_to_h5mu 0.12.3" + echo "velocyto_to_h5mu 0.12.4" exit ;; --input_loom) diff --git a/target/docker/correction/cellbender_remove_background/.config.vsh.yaml b/target/docker/correction/cellbender_remove_background/.config.vsh.yaml index 27e08715776..da33a50ad22 100644 --- a/target/docker/correction/cellbender_remove_background/.config.vsh.yaml +++ b/target/docker/correction/cellbender_remove_background/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellbender_remove_background" namespace: "correction" - version: "0.12.3" + version: "0.12.4" argument_groups: - name: "Inputs" arguments: @@ -632,6 +632,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/correction/cellbender_remove_background" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/correction/cellbender_remove_background/cellbender_remove_background" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/correction/cellbender_remove_background/cellbender_remove_background b/target/docker/correction/cellbender_remove_background/cellbender_remove_background index 28bee30b0b8..43e24f1f8aa 100755 --- a/target/docker/correction/cellbender_remove_background/cellbender_remove_background +++ b/target/docker/correction/cellbender_remove_background/cellbender_remove_background @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# cellbender_remove_background 0.12.3 +# cellbender_remove_background 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -155,7 +155,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "cellbender_remove_background 0.12.3" + echo "cellbender_remove_background 0.12.4" echo "" echo "Eliminating technical artifacts from high-throughput single-cell RNA sequencing" echo "data." @@ -671,10 +671,10 @@ RUN pip install --upgrade pip && \ pip install --upgrade --no-cache-dir "mudata~=0.2.1" "cellbender~=0.3.0" LABEL org.opencontainers.image.description="Companion container for running component correction cellbender_remove_background" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -825,7 +825,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "cellbender_remove_background 0.12.3" + echo "cellbender_remove_background 0.12.4" exit ;; --input) diff --git a/target/docker/correction/cellbender_remove_background_v0_2/.config.vsh.yaml b/target/docker/correction/cellbender_remove_background_v0_2/.config.vsh.yaml index f797ad4313d..6184a725817 100644 --- a/target/docker/correction/cellbender_remove_background_v0_2/.config.vsh.yaml +++ b/target/docker/correction/cellbender_remove_background_v0_2/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellbender_remove_background_v0_2" namespace: "correction" - version: "0.12.3" + version: "0.12.4" argument_groups: - name: "Inputs" arguments: @@ -401,6 +401,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/correction/cellbender_remove_background_v0_2" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/correction/cellbender_remove_background_v0_2/cellbender_remove_background_v0_2" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/correction/cellbender_remove_background_v0_2/cellbender_remove_background_v0_2 b/target/docker/correction/cellbender_remove_background_v0_2/cellbender_remove_background_v0_2 index 3d153df830e..be9ee819e84 100755 --- a/target/docker/correction/cellbender_remove_background_v0_2/cellbender_remove_background_v0_2 +++ b/target/docker/correction/cellbender_remove_background_v0_2/cellbender_remove_background_v0_2 @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# cellbender_remove_background_v0_2 0.12.3 +# cellbender_remove_background_v0_2 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -155,7 +155,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "cellbender_remove_background_v0_2 0.12.3" + echo "cellbender_remove_background_v0_2 0.12.4" echo "" echo "Eliminating technical artifacts from high-throughput single-cell RNA sequencing" echo "data." @@ -537,10 +537,10 @@ RUN pip install --upgrade pip && \ pip install --upgrade --no-cache-dir "mudata~=0.2.3" "anndata~=0.9.1" "muon==0.1.5" "tables==3.8.0" "cellbender==0.2.1" LABEL org.opencontainers.image.description="Companion container for running component correction cellbender_remove_background_v0_2" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -691,7 +691,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "cellbender_remove_background_v0_2 0.12.3" + echo "cellbender_remove_background_v0_2 0.12.4" exit ;; --input) diff --git a/target/docker/dataflow/concat/.config.vsh.yaml b/target/docker/dataflow/concat/.config.vsh.yaml index 3f0e4975e9e..a8813a29f15 100644 --- a/target/docker/dataflow/concat/.config.vsh.yaml +++ b/target/docker/dataflow/concat/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "concat" namespace: "dataflow" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -217,6 +217,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/dataflow/concat" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/dataflow/concat/concat" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/dataflow/concat/concat b/target/docker/dataflow/concat/concat index d274156f9f5..bfa064a4f9b 100755 --- a/target/docker/dataflow/concat/concat +++ b/target/docker/dataflow/concat/concat @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# concat 0.12.3 +# concat 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "concat 0.12.3" + echo "concat 0.12.4" echo "" echo "Concatenates several uni-modal samples in .h5mu files into a single file." echo "" @@ -437,10 +437,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component dataflow concat" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -591,7 +591,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "concat 0.12.3" + echo "concat 0.12.4" exit ;; --input) diff --git a/target/docker/dataflow/merge/.config.vsh.yaml b/target/docker/dataflow/merge/.config.vsh.yaml index 65b92df4ab3..7b965881161 100644 --- a/target/docker/dataflow/merge/.config.vsh.yaml +++ b/target/docker/dataflow/merge/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "merge" namespace: "dataflow" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -170,6 +170,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/dataflow/merge" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/dataflow/merge/merge" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/dataflow/merge/merge b/target/docker/dataflow/merge/merge index 680184215dc..19c4b37af72 100755 --- a/target/docker/dataflow/merge/merge +++ b/target/docker/dataflow/merge/merge @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# merge 0.12.3 +# merge 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "merge 0.12.3" + echo "merge 0.12.4" echo "" echo "Combine one or more single-modality .h5mu files together into one .h5mu file." echo "" @@ -409,10 +409,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component dataflow merge" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -563,7 +563,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "merge 0.12.3" + echo "merge 0.12.4" exit ;; --input) diff --git a/target/docker/dataflow/split_modalities/.config.vsh.yaml b/target/docker/dataflow/split_modalities/.config.vsh.yaml index a32bc5f622b..0a83aef3c14 100644 --- a/target/docker/dataflow/split_modalities/.config.vsh.yaml +++ b/target/docker/dataflow/split_modalities/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "split_modalities" namespace: "dataflow" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -209,6 +209,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/dataflow/split_modalities" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/dataflow/split_modalities/split_modalities" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/dataflow/split_modalities/split_modalities b/target/docker/dataflow/split_modalities/split_modalities index 3fa63d3502b..9d998fc08d3 100755 --- a/target/docker/dataflow/split_modalities/split_modalities +++ b/target/docker/dataflow/split_modalities/split_modalities @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# split_modalities 0.12.3 +# split_modalities 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "split_modalities 0.12.3" + echo "split_modalities 0.12.4" echo "" echo "Split the modalities from a single .h5mu multimodal sample into seperate .h5mu" echo "files." @@ -421,10 +421,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component dataflow split_modalities" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -575,7 +575,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "split_modalities 0.12.3" + echo "split_modalities 0.12.4" exit ;; --input) diff --git a/target/docker/demux/bcl2fastq/.config.vsh.yaml b/target/docker/demux/bcl2fastq/.config.vsh.yaml index 9d0adfe6947..1ac741f2011 100644 --- a/target/docker/demux/bcl2fastq/.config.vsh.yaml +++ b/target/docker/demux/bcl2fastq/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "bcl2fastq" namespace: "demux" - version: "0.12.3" + version: "0.12.4" authors: - name: "Toni Verbeiren" roles: @@ -164,6 +164,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/demux/bcl2fastq" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/demux/bcl2fastq/bcl2fastq" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/demux/bcl2fastq/bcl2fastq b/target/docker/demux/bcl2fastq/bcl2fastq index 81fdbb33bc7..9adacf6733f 100755 --- a/target/docker/demux/bcl2fastq/bcl2fastq +++ b/target/docker/demux/bcl2fastq/bcl2fastq @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# bcl2fastq 0.12.3 +# bcl2fastq 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "bcl2fastq 0.12.3" + echo "bcl2fastq 0.12.4" echo "" echo "Convert bcl files to fastq files using bcl2fastq." echo "" @@ -410,10 +410,10 @@ ENTRYPOINT [] RUN : LABEL org.opencontainers.image.authors="Toni Verbeiren" LABEL org.opencontainers.image.description="Companion container for running component demux bcl2fastq" -LABEL org.opencontainers.image.created="2024-01-25T10:14:00Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -564,7 +564,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "bcl2fastq 0.12.3" + echo "bcl2fastq 0.12.4" exit ;; --input) diff --git a/target/docker/demux/bcl_convert/.config.vsh.yaml b/target/docker/demux/bcl_convert/.config.vsh.yaml index c682f50ee63..334e9c2b201 100644 --- a/target/docker/demux/bcl_convert/.config.vsh.yaml +++ b/target/docker/demux/bcl_convert/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "bcl_convert" namespace: "demux" - version: "0.12.3" + version: "0.12.4" authors: - name: "Toni Verbeiren" roles: @@ -184,6 +184,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/demux/bcl_convert" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/demux/bcl_convert/bcl_convert" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/demux/bcl_convert/bcl_convert b/target/docker/demux/bcl_convert/bcl_convert index 3217d86dd53..4f669bf93fa 100755 --- a/target/docker/demux/bcl_convert/bcl_convert +++ b/target/docker/demux/bcl_convert/bcl_convert @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# bcl_convert 0.12.3 +# bcl_convert 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "bcl_convert 0.12.3" + echo "bcl_convert 0.12.4" echo "" echo "Convert bcl files to fastq files using bcl-convert." echo "Information about upgrading from bcl2fastq via" @@ -417,10 +417,10 @@ ENTRYPOINT [] RUN : LABEL org.opencontainers.image.authors="Toni Verbeiren, Marijke Van Moerbeke" LABEL org.opencontainers.image.description="Companion container for running component demux bcl_convert" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -571,7 +571,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "bcl_convert 0.12.3" + echo "bcl_convert 0.12.4" exit ;; --input) diff --git a/target/docker/demux/cellranger_mkfastq/.config.vsh.yaml b/target/docker/demux/cellranger_mkfastq/.config.vsh.yaml index 193a6e627b8..5da62b8341f 100644 --- a/target/docker/demux/cellranger_mkfastq/.config.vsh.yaml +++ b/target/docker/demux/cellranger_mkfastq/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellranger_mkfastq" namespace: "demux" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -202,6 +202,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/demux/cellranger_mkfastq" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/demux/cellranger_mkfastq/cellranger_mkfastq" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/demux/cellranger_mkfastq/cellranger_mkfastq b/target/docker/demux/cellranger_mkfastq/cellranger_mkfastq index 785ee6aab85..f94c854979b 100755 --- a/target/docker/demux/cellranger_mkfastq/cellranger_mkfastq +++ b/target/docker/demux/cellranger_mkfastq/cellranger_mkfastq @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# cellranger_mkfastq 0.12.3 +# cellranger_mkfastq 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -160,7 +160,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "cellranger_mkfastq 0.12.3" + echo "cellranger_mkfastq 0.12.4" echo "" echo "Demultiplex raw sequencing data" echo "" @@ -416,10 +416,10 @@ ENTRYPOINT [] RUN apt-get update && apt-get upgrade -y LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Samuel D'Souza, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component demux cellranger_mkfastq" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -570,7 +570,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "cellranger_mkfastq 0.12.3" + echo "cellranger_mkfastq 0.12.4" exit ;; --input) diff --git a/target/docker/dimred/pca/.config.vsh.yaml b/target/docker/dimred/pca/.config.vsh.yaml index 09d7e977c32..41baca73075 100644 --- a/target/docker/dimred/pca/.config.vsh.yaml +++ b/target/docker/dimred/pca/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "pca" namespace: "dimred" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -248,6 +248,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/dimred/pca" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/dimred/pca/pca" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/dimred/pca/pca b/target/docker/dimred/pca/pca index 028c6e95b8f..f5ed0c878f0 100755 --- a/target/docker/dimred/pca/pca +++ b/target/docker/dimred/pca/pca @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# pca 0.12.3 +# pca 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "pca 0.12.3" + echo "pca 0.12.4" echo "" echo "Computes PCA coordinates, loadings and variance decomposition. Uses the" echo "implementation of scikit-learn [Pedregosa11]." @@ -450,10 +450,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer" LABEL org.opencontainers.image.description="Companion container for running component dimred pca" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:31Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -604,7 +604,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "pca 0.12.3" + echo "pca 0.12.4" exit ;; --input) diff --git a/target/docker/dimred/umap/.config.vsh.yaml b/target/docker/dimred/umap/.config.vsh.yaml index c67e3e8cc0b..beeddcaa40b 100644 --- a/target/docker/dimred/umap/.config.vsh.yaml +++ b/target/docker/dimred/umap/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "umap" namespace: "dimred" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -307,6 +307,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/dimred/umap" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/dimred/umap/umap" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/dimred/umap/umap b/target/docker/dimred/umap/umap index 7b88be04993..629f8ddf870 100755 --- a/target/docker/dimred/umap/umap +++ b/target/docker/dimred/umap/umap @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# umap 0.12.3 +# umap 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "umap 0.12.3" + echo "umap 0.12.4" echo "" echo "UMAP (Uniform Manifold Approximation and Projection) is a manifold learning" echo "technique suitable for visualizing high-dimensional data. Besides tending to be" @@ -487,10 +487,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer" LABEL org.opencontainers.image.description="Companion container for running component dimred umap" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:31Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -641,7 +641,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "umap 0.12.3" + echo "umap 0.12.4" exit ;; --input) diff --git a/target/docker/download/download_file/.config.vsh.yaml b/target/docker/download/download_file/.config.vsh.yaml index 86a0c8b5142..65dd0f3deb9 100644 --- a/target/docker/download/download_file/.config.vsh.yaml +++ b/target/docker/download/download_file/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "download_file" namespace: "download" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -133,6 +133,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/download/download_file" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/download/download_file/download_file" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/download/download_file/download_file b/target/docker/download/download_file/download_file index c95c6155173..3c5c12f2954 100755 --- a/target/docker/download/download_file/download_file +++ b/target/docker/download/download_file/download_file @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# download_file 0.12.3 +# download_file 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "download_file 0.12.3" + echo "download_file 0.12.4" echo "" echo "Download a file." echo "" @@ -409,10 +409,10 @@ ENTRYPOINT [] RUN : LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component download download_file" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -563,7 +563,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "download_file 0.12.3" + echo "download_file 0.12.4" exit ;; --input) diff --git a/target/docker/download/sync_test_resources/.config.vsh.yaml b/target/docker/download/sync_test_resources/.config.vsh.yaml index c6815328d22..97a117275d2 100644 --- a/target/docker/download/sync_test_resources/.config.vsh.yaml +++ b/target/docker/download/sync_test_resources/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "sync_test_resources" namespace: "download" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -165,6 +165,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/download/sync_test_resources" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/download/sync_test_resources/sync_test_resources" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/download/sync_test_resources/sync_test_resources b/target/docker/download/sync_test_resources/sync_test_resources index 16ef6e1ce1c..8ebba8fac99 100755 --- a/target/docker/download/sync_test_resources/sync_test_resources +++ b/target/docker/download/sync_test_resources/sync_test_resources @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# sync_test_resources 0.12.3 +# sync_test_resources 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "sync_test_resources 0.12.3" + echo "sync_test_resources 0.12.4" echo "" echo "Synchronise the test resources from s3://openpipelines-data to resources_test" echo "" @@ -423,10 +423,10 @@ RUN yum install -y procps && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component download sync_test_resources" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -577,7 +577,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "sync_test_resources 0.12.3" + echo "sync_test_resources 0.12.4" exit ;; --input) diff --git a/target/docker/files/make_params/.config.vsh.yaml b/target/docker/files/make_params/.config.vsh.yaml index 8b3297655b7..809cf66177e 100644 --- a/target/docker/files/make_params/.config.vsh.yaml +++ b/target/docker/files/make_params/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "make_params" namespace: "files" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -215,6 +215,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/files/make_params" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/files/make_params/make_params" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/files/make_params/make_params b/target/docker/files/make_params/make_params index c59e9ee4a20..60503d61e0b 100755 --- a/target/docker/files/make_params/make_params +++ b/target/docker/files/make_params/make_params @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# make_params 0.12.3 +# make_params 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "make_params 0.12.3" + echo "make_params 0.12.4" echo "" echo "Looks for files in a directory and turn it in a params file." echo "" @@ -429,10 +429,10 @@ ENTRYPOINT [] RUN : LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component files make_params" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -583,7 +583,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "make_params 0.12.3" + echo "make_params 0.12.4" exit ;; --base_dir) diff --git a/target/docker/filter/delimit_fraction/.config.vsh.yaml b/target/docker/filter/delimit_fraction/.config.vsh.yaml index d16f34cc3ee..3c64b182903 100644 --- a/target/docker/filter/delimit_fraction/.config.vsh.yaml +++ b/target/docker/filter/delimit_fraction/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "delimit_fraction" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -236,6 +236,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/delimit_fraction" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/delimit_fraction/delimit_fraction" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/filter/delimit_fraction/delimit_fraction b/target/docker/filter/delimit_fraction/delimit_fraction index 4c18c9a5133..6c1fde1cd2e 100755 --- a/target/docker/filter/delimit_fraction/delimit_fraction +++ b/target/docker/filter/delimit_fraction/delimit_fraction @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# delimit_fraction 0.12.3 +# delimit_fraction 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "delimit_fraction 0.12.3" + echo "delimit_fraction 0.12.4" echo "" echo "Turns a column containing values between 0 and 1 into a boolean column based on" echo "thresholds." @@ -445,10 +445,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component filter delimit_fraction" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -599,7 +599,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "delimit_fraction 0.12.3" + echo "delimit_fraction 0.12.4" exit ;; --input) diff --git a/target/docker/filter/do_filter/.config.vsh.yaml b/target/docker/filter/do_filter/.config.vsh.yaml index 3b73a95236b..32536f480c6 100644 --- a/target/docker/filter/do_filter/.config.vsh.yaml +++ b/target/docker/filter/do_filter/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "do_filter" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -197,6 +197,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/do_filter" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/do_filter/do_filter" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/filter/do_filter/do_filter b/target/docker/filter/do_filter/do_filter index bd18ba5afe6..02b8b2f2eb3 100755 --- a/target/docker/filter/do_filter/do_filter +++ b/target/docker/filter/do_filter/do_filter @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# do_filter 0.12.3 +# do_filter 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "do_filter 0.12.3" + echo "do_filter 0.12.4" echo "" echo "Remove observations and variables based on specified .obs and .var columns." echo "" @@ -423,10 +423,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component filter do_filter" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -577,7 +577,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "do_filter 0.12.3" + echo "do_filter 0.12.4" exit ;; --input) diff --git a/target/docker/filter/filter_with_counts/.config.vsh.yaml b/target/docker/filter/filter_with_counts/.config.vsh.yaml index 199eeea87cc..0f926af6628 100644 --- a/target/docker/filter/filter_with_counts/.config.vsh.yaml +++ b/target/docker/filter/filter_with_counts/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "filter_with_counts" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -290,6 +290,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/filter_with_counts" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/filter_with_counts/filter_with_counts" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/filter/filter_with_counts/filter_with_counts b/target/docker/filter/filter_with_counts/filter_with_counts index 68ef1ee30bd..b51fa813a52 100755 --- a/target/docker/filter/filter_with_counts/filter_with_counts +++ b/target/docker/filter/filter_with_counts/filter_with_counts @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# filter_with_counts 0.12.3 +# filter_with_counts 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "filter_with_counts 0.12.3" + echo "filter_with_counts 0.12.4" echo "" echo "Filter scRNA-seq data based on the primary QC metrics." echo "This is based on both the UMI counts, the gene counts" @@ -463,10 +463,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component filter filter_with_counts" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -617,7 +617,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "filter_with_counts 0.12.3" + echo "filter_with_counts 0.12.4" exit ;; --input) diff --git a/target/docker/filter/filter_with_hvg/.config.vsh.yaml b/target/docker/filter/filter_with_hvg/.config.vsh.yaml index 4a0d796bf0c..f04dc6f5339 100644 --- a/target/docker/filter/filter_with_hvg/.config.vsh.yaml +++ b/target/docker/filter/filter_with_hvg/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "filter_with_hvg" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -347,6 +347,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/filter_with_hvg" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/filter_with_hvg/filter_with_hvg" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/filter/filter_with_hvg/filter_with_hvg b/target/docker/filter/filter_with_hvg/filter_with_hvg index b0031c94d3a..04341153a0f 100755 --- a/target/docker/filter/filter_with_hvg/filter_with_hvg +++ b/target/docker/filter/filter_with_hvg/filter_with_hvg @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# filter_with_hvg 0.12.3 +# filter_with_hvg 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "filter_with_hvg 0.12.3" + echo "filter_with_hvg 0.12.4" echo "" echo "Annotate highly variable genes [Satija15] [Zheng17] [Stuart19]." echo "" @@ -509,10 +509,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component filter filter_with_hvg" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -663,7 +663,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "filter_with_hvg 0.12.3" + echo "filter_with_hvg 0.12.4" exit ;; --input) diff --git a/target/docker/filter/filter_with_scrublet/.config.vsh.yaml b/target/docker/filter/filter_with_scrublet/.config.vsh.yaml index 3412282dd29..6e96116696f 100644 --- a/target/docker/filter/filter_with_scrublet/.config.vsh.yaml +++ b/target/docker/filter/filter_with_scrublet/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "filter_with_scrublet" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -299,6 +299,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/filter_with_scrublet" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/filter_with_scrublet/filter_with_scrublet" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/filter/filter_with_scrublet/filter_with_scrublet b/target/docker/filter/filter_with_scrublet/filter_with_scrublet index dfcfe09f054..bc86e9f391a 100755 --- a/target/docker/filter/filter_with_scrublet/filter_with_scrublet +++ b/target/docker/filter/filter_with_scrublet/filter_with_scrublet @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# filter_with_scrublet 0.12.3 +# filter_with_scrublet 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "filter_with_scrublet 0.12.3" + echo "filter_with_scrublet 0.12.4" echo "" echo "Doublet detection using the Scrublet method (Wolock, Lopez and Klein, 2019)." echo "The method tests for potential doublets by using the expression profiles of" @@ -484,10 +484,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component filter filter_with_scrublet" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -638,7 +638,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "filter_with_scrublet 0.12.3" + echo "filter_with_scrublet 0.12.4" exit ;; --input) diff --git a/target/docker/filter/remove_modality/.config.vsh.yaml b/target/docker/filter/remove_modality/.config.vsh.yaml index d09eeffbd9c..4be0398ed3d 100644 --- a/target/docker/filter/remove_modality/.config.vsh.yaml +++ b/target/docker/filter/remove_modality/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "remove_modality" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -166,6 +166,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/remove_modality" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/remove_modality/remove_modality" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/filter/remove_modality/remove_modality b/target/docker/filter/remove_modality/remove_modality index 47c305cbe2c..a81ff47b106 100755 --- a/target/docker/filter/remove_modality/remove_modality +++ b/target/docker/filter/remove_modality/remove_modality @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# remove_modality 0.12.3 +# remove_modality 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "remove_modality 0.12.3" + echo "remove_modality 0.12.4" echo "" echo "Remove a modality from a .h5mu file" echo "" @@ -412,10 +412,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component filter remove_modality" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -566,7 +566,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "remove_modality 0.12.3" + echo "remove_modality 0.12.4" exit ;; --input) diff --git a/target/docker/filter/subset_h5mu/.config.vsh.yaml b/target/docker/filter/subset_h5mu/.config.vsh.yaml index bfd2021f189..30470cb0a1c 100644 --- a/target/docker/filter/subset_h5mu/.config.vsh.yaml +++ b/target/docker/filter/subset_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "subset_h5mu" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -182,6 +182,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/subset_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/filter/subset_h5mu/subset_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/filter/subset_h5mu/subset_h5mu b/target/docker/filter/subset_h5mu/subset_h5mu index b43a9d3a3c1..5fd3e710500 100755 --- a/target/docker/filter/subset_h5mu/subset_h5mu +++ b/target/docker/filter/subset_h5mu/subset_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# subset_h5mu 0.12.3 +# subset_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "subset_h5mu 0.12.3" + echo "subset_h5mu 0.12.4" echo "" echo "Create a subset of a mudata file by selecting the first number of observations" echo "" @@ -418,10 +418,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component filter subset_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -572,7 +572,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "subset_h5mu 0.12.3" + echo "subset_h5mu 0.12.4" exit ;; --input) diff --git a/target/docker/integrate/harmonypy/.config.vsh.yaml b/target/docker/integrate/harmonypy/.config.vsh.yaml index ccec2aa8068..c697eda8b02 100644 --- a/target/docker/integrate/harmonypy/.config.vsh.yaml +++ b/target/docker/integrate/harmonypy/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "harmonypy" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -235,6 +235,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/harmonypy" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/harmonypy/harmonypy" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/integrate/harmonypy/harmonypy b/target/docker/integrate/harmonypy/harmonypy index f5f252f0a9e..04916d61871 100755 --- a/target/docker/integrate/harmonypy/harmonypy +++ b/target/docker/integrate/harmonypy/harmonypy @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# harmonypy 0.12.3 +# harmonypy 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "harmonypy 0.12.3" + echo "harmonypy 0.12.4" echo "" echo "Performs Harmony integration based as described in" echo "https://github.com/immunogenomics/harmony. Based on an implementation in python" @@ -436,10 +436,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component integrate harmonypy" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -590,7 +590,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "harmonypy 0.12.3" + echo "harmonypy 0.12.4" exit ;; --input) diff --git a/target/docker/integrate/scanorama/.config.vsh.yaml b/target/docker/integrate/scanorama/.config.vsh.yaml index 76c83623e16..580c9fb886b 100644 --- a/target/docker/integrate/scanorama/.config.vsh.yaml +++ b/target/docker/integrate/scanorama/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scanorama" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -278,6 +278,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/scanorama" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/scanorama/scanorama" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/integrate/scanorama/scanorama b/target/docker/integrate/scanorama/scanorama index 1d9ebd61fad..6d35e1ce725 100755 --- a/target/docker/integrate/scanorama/scanorama +++ b/target/docker/integrate/scanorama/scanorama @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# scanorama 0.12.3 +# scanorama 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "scanorama 0.12.3" + echo "scanorama 0.12.4" echo "" echo "Use Scanorama to integrate different experiments." echo "" @@ -457,10 +457,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer, Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component integrate scanorama" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -611,7 +611,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "scanorama 0.12.3" + echo "scanorama 0.12.4" exit ;; --input) diff --git a/target/docker/integrate/scarches/.config.vsh.yaml b/target/docker/integrate/scarches/.config.vsh.yaml index e1b64d597ce..5db3a55381d 100644 --- a/target/docker/integrate/scarches/.config.vsh.yaml +++ b/target/docker/integrate/scarches/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scarches" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" info: @@ -326,6 +326,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/scarches" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/scarches/scarches" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/integrate/scarches/scarches b/target/docker/integrate/scarches/scarches index 0dfe18f77c7..bd92aa388ac 100755 --- a/target/docker/integrate/scarches/scarches +++ b/target/docker/integrate/scarches/scarches @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# scarches 0.12.3 +# scarches 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "scarches 0.12.3" + echo "scarches 0.12.4" echo "" echo "Performs reference mapping with scArches" echo "" @@ -481,10 +481,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Vladimir Shitov" LABEL org.opencontainers.image.description="Companion container for running component integrate scarches" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:31Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -635,7 +635,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "scarches 0.12.3" + echo "scarches 0.12.4" exit ;; --input) diff --git a/target/docker/integrate/scvi/.config.vsh.yaml b/target/docker/integrate/scvi/.config.vsh.yaml index d52c24f5d8b..511d72e19bd 100644 --- a/target/docker/integrate/scvi/.config.vsh.yaml +++ b/target/docker/integrate/scvi/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scvi" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Malte D. Luecken" roles: @@ -586,6 +586,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/scvi" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/scvi/scvi" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/integrate/scvi/scvi b/target/docker/integrate/scvi/scvi index eda834a8674..2d1089faf51 100755 --- a/target/docker/integrate/scvi/scvi +++ b/target/docker/integrate/scvi/scvi @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# scvi 0.12.3 +# scvi 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -160,7 +160,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "scvi 0.12.3" + echo "scvi 0.12.4" echo "" echo "Performs scvi integration as done in the human lung cell atlas" echo "https://github.com/LungCellAtlas/HLCA" @@ -614,10 +614,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Malte D. Luecken, Dries Schaumont, Matthias Beyens" LABEL org.opencontainers.image.description="Companion container for running component integrate scvi" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -768,7 +768,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "scvi 0.12.3" + echo "scvi 0.12.4" exit ;; --input) diff --git a/target/docker/integrate/totalvi/.config.vsh.yaml b/target/docker/integrate/totalvi/.config.vsh.yaml index deebb486875..40477f1eb6f 100644 --- a/target/docker/integrate/totalvi/.config.vsh.yaml +++ b/target/docker/integrate/totalvi/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "totalvi" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" info: @@ -343,6 +343,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/totalvi" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/integrate/totalvi/totalvi" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/integrate/totalvi/totalvi b/target/docker/integrate/totalvi/totalvi index 1fe55f29974..09760596b9a 100755 --- a/target/docker/integrate/totalvi/totalvi +++ b/target/docker/integrate/totalvi/totalvi @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# totalvi 0.12.3 +# totalvi 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "totalvi 0.12.3" + echo "totalvi 0.12.4" echo "" echo "Performs mapping to the reference by totalvi model:" echo "https://docs.scvi-tools.org/en/stable/tutorials/notebooks/scarches_scvi_tools.html#Reference-mapping-with-TOTALVI" @@ -485,10 +485,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Vladimir Shitov" LABEL org.opencontainers.image.description="Companion container for running component integrate totalvi" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -639,7 +639,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "totalvi 0.12.3" + echo "totalvi 0.12.4" exit ;; --input) diff --git a/target/docker/interactive/run_cellxgene/.config.vsh.yaml b/target/docker/interactive/run_cellxgene/.config.vsh.yaml index c0c1dca36fd..60c08874498 100644 --- a/target/docker/interactive/run_cellxgene/.config.vsh.yaml +++ b/target/docker/interactive/run_cellxgene/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "run_cellxgene" namespace: "interactive" - version: "0.12.3" + version: "0.12.4" arguments: - type: "file" name: "--input" @@ -78,6 +78,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/interactive/run_cellxgene" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/interactive/run_cellxgene/run_cellxgene" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/interactive/run_cellxgene/run_cellxgene b/target/docker/interactive/run_cellxgene/run_cellxgene index 47e66cbc109..85f9bbd4977 100755 --- a/target/docker/interactive/run_cellxgene/run_cellxgene +++ b/target/docker/interactive/run_cellxgene/run_cellxgene @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# run_cellxgene 0.12.3 +# run_cellxgene 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -155,7 +155,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "run_cellxgene 0.12.3" + echo "run_cellxgene 0.12.4" echo "" echo "" echo "" @@ -400,10 +400,10 @@ RUN pip install --upgrade pip && \ pip install --upgrade --no-cache-dir "cellxgene" LABEL org.opencontainers.image.description="Companion container for running component interactive run_cellxgene" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -554,7 +554,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "run_cellxgene 0.12.3" + echo "run_cellxgene 0.12.4" exit ;; --input) diff --git a/target/docker/interactive/run_cirrocumulus/.config.vsh.yaml b/target/docker/interactive/run_cirrocumulus/.config.vsh.yaml index 3e76e98ee2c..f36ed6d29c3 100644 --- a/target/docker/interactive/run_cirrocumulus/.config.vsh.yaml +++ b/target/docker/interactive/run_cirrocumulus/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "run_cirrocumulus" namespace: "interactive" - version: "0.12.3" + version: "0.12.4" arguments: - type: "file" name: "--input" @@ -80,6 +80,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/interactive/run_cirrocumulus" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/interactive/run_cirrocumulus/run_cirrocumulus" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/interactive/run_cirrocumulus/run_cirrocumulus b/target/docker/interactive/run_cirrocumulus/run_cirrocumulus index 65a3c5ad76d..04ed627d274 100755 --- a/target/docker/interactive/run_cirrocumulus/run_cirrocumulus +++ b/target/docker/interactive/run_cirrocumulus/run_cirrocumulus @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# run_cirrocumulus 0.12.3 +# run_cirrocumulus 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -155,7 +155,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "run_cirrocumulus 0.12.3" + echo "run_cirrocumulus 0.12.4" echo "" echo "" echo "" @@ -400,10 +400,10 @@ RUN pip install --upgrade pip && \ pip install --upgrade --no-cache-dir "requests" "aiohttp" "cirrocumulus" LABEL org.opencontainers.image.description="Companion container for running component interactive run_cirrocumulus" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -554,7 +554,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "run_cirrocumulus 0.12.3" + echo "run_cirrocumulus 0.12.4" exit ;; --input) diff --git a/target/docker/interpret/lianapy/.config.vsh.yaml b/target/docker/interpret/lianapy/.config.vsh.yaml index 1d24676cd7a..56962ccb2fc 100644 --- a/target/docker/interpret/lianapy/.config.vsh.yaml +++ b/target/docker/interpret/lianapy/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "lianapy" namespace: "interpret" - version: "0.12.3" + version: "0.12.4" authors: - name: "Mauro Saporita" roles: @@ -308,6 +308,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/interpret/lianapy" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/interpret/lianapy/lianapy" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/interpret/lianapy/lianapy b/target/docker/interpret/lianapy/lianapy index 0cd6aaf5700..aa23191428d 100755 --- a/target/docker/interpret/lianapy/lianapy +++ b/target/docker/interpret/lianapy/lianapy @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# lianapy 0.12.3 +# lianapy 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "lianapy 0.12.3" + echo "lianapy 0.12.4" echo "" echo "Performs LIANA integration based as described in" echo "https://github.com/saezlab/liana-py" @@ -470,10 +470,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Mauro Saporita, Povilas Gibas" LABEL org.opencontainers.image.description="Companion container for running component interpret lianapy" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -624,7 +624,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "lianapy 0.12.3" + echo "lianapy 0.12.4" exit ;; --input) diff --git a/target/docker/labels_transfer/knn/.config.vsh.yaml b/target/docker/labels_transfer/knn/.config.vsh.yaml index b45adbd8b4e..2e9921112e2 100644 --- a/target/docker/labels_transfer/knn/.config.vsh.yaml +++ b/target/docker/labels_transfer/knn/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "knn" namespace: "labels_transfer" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" roles: @@ -374,6 +374,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/labels_transfer/knn" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/labels_transfer/knn/knn" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/labels_transfer/knn/knn b/target/docker/labels_transfer/knn/knn index 32eb89ffe31..46373358882 100755 --- a/target/docker/labels_transfer/knn/knn +++ b/target/docker/labels_transfer/knn/knn @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# knn 0.12.3 +# knn 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "knn 0.12.3" + echo "knn 0.12.4" echo "" echo "Performs label transfer from reference to query using KNN classifier" echo "" @@ -468,10 +468,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Vladimir Shitov" LABEL org.opencontainers.image.description="Companion container for running component labels_transfer knn" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -622,7 +622,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "knn 0.12.3" + echo "knn 0.12.4" exit ;; --input) diff --git a/target/docker/labels_transfer/xgboost/.config.vsh.yaml b/target/docker/labels_transfer/xgboost/.config.vsh.yaml index 1ff97a896f4..aa05bdf00d4 100644 --- a/target/docker/labels_transfer/xgboost/.config.vsh.yaml +++ b/target/docker/labels_transfer/xgboost/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "xgboost" namespace: "labels_transfer" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" roles: @@ -589,6 +589,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/labels_transfer/xgboost" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/labels_transfer/xgboost/xgboost" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/labels_transfer/xgboost/xgboost b/target/docker/labels_transfer/xgboost/xgboost index acbe5e5cfc8..cf2ad39a785 100755 --- a/target/docker/labels_transfer/xgboost/xgboost +++ b/target/docker/labels_transfer/xgboost/xgboost @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# xgboost 0.12.3 +# xgboost 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "xgboost 0.12.3" + echo "xgboost 0.12.4" echo "" echo "Performs label transfer from reference to query using XGBoost classifier" echo "" @@ -595,10 +595,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Vladimir Shitov" LABEL org.opencontainers.image.description="Companion container for running component labels_transfer xgboost" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -749,7 +749,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "xgboost 0.12.3" + echo "xgboost 0.12.4" exit ;; --input) diff --git a/target/docker/mapping/bd_rhapsody/.config.vsh.yaml b/target/docker/mapping/bd_rhapsody/.config.vsh.yaml index 84f54badcb1..6ad28624ee3 100644 --- a/target/docker/mapping/bd_rhapsody/.config.vsh.yaml +++ b/target/docker/mapping/bd_rhapsody/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "bd_rhapsody" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -412,6 +412,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/bd_rhapsody" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/bd_rhapsody/bd_rhapsody" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/bd_rhapsody/bd_rhapsody b/target/docker/mapping/bd_rhapsody/bd_rhapsody index 6bb7983fb3e..60def456329 100755 --- a/target/docker/mapping/bd_rhapsody/bd_rhapsody +++ b/target/docker/mapping/bd_rhapsody/bd_rhapsody @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# bd_rhapsody 0.12.3 +# bd_rhapsody 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "bd_rhapsody 0.12.3" + echo "bd_rhapsody 0.12.4" echo "" echo "A wrapper for the BD Rhapsody Analysis CWL v1.10.1 pipeline." echo "" @@ -532,10 +532,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component mapping bd_rhapsody" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -686,7 +686,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "bd_rhapsody 0.12.3" + echo "bd_rhapsody 0.12.4" exit ;; --mode) diff --git a/target/docker/mapping/cellranger_count/.config.vsh.yaml b/target/docker/mapping/cellranger_count/.config.vsh.yaml index ade2b61c89d..389d033b2a2 100644 --- a/target/docker/mapping/cellranger_count/.config.vsh.yaml +++ b/target/docker/mapping/cellranger_count/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellranger_count" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -261,6 +261,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/cellranger_count" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/cellranger_count/cellranger_count" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/cellranger_count/cellranger_count b/target/docker/mapping/cellranger_count/cellranger_count index 45bd87d3a16..8aa688967d3 100755 --- a/target/docker/mapping/cellranger_count/cellranger_count +++ b/target/docker/mapping/cellranger_count/cellranger_count @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# cellranger_count 0.12.3 +# cellranger_count 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -160,7 +160,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "cellranger_count 0.12.3" + echo "cellranger_count 0.12.4" echo "" echo "Align fastq files using Cell Ranger count." echo "" @@ -451,10 +451,10 @@ ENTRYPOINT [] RUN apt update && apt upgrade -y LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Samuel D'Souza, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component mapping cellranger_count" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -605,7 +605,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "cellranger_count 0.12.3" + echo "cellranger_count 0.12.4" exit ;; --input) diff --git a/target/docker/mapping/cellranger_count_split/.config.vsh.yaml b/target/docker/mapping/cellranger_count_split/.config.vsh.yaml index 18b8445f5cf..2ad5c0f0b1d 100644 --- a/target/docker/mapping/cellranger_count_split/.config.vsh.yaml +++ b/target/docker/mapping/cellranger_count_split/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellranger_count_split" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -213,6 +213,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/cellranger_count_split" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/cellranger_count_split/cellranger_count_split" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/cellranger_count_split/cellranger_count_split b/target/docker/mapping/cellranger_count_split/cellranger_count_split index 2efd9a25590..413dbf8e9bc 100755 --- a/target/docker/mapping/cellranger_count_split/cellranger_count_split +++ b/target/docker/mapping/cellranger_count_split/cellranger_count_split @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# cellranger_count_split 0.12.3 +# cellranger_count_split 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -160,7 +160,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "cellranger_count_split 0.12.3" + echo "cellranger_count_split 0.12.4" echo "" echo "Split 10x Cell Ranger output directory into separate output fields." echo "" @@ -418,10 +418,10 @@ ENTRYPOINT [] RUN apt update && apt upgrade -y LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Samuel D'Souza, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component mapping cellranger_count_split" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -572,7 +572,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "cellranger_count_split 0.12.3" + echo "cellranger_count_split 0.12.4" exit ;; --input) diff --git a/target/docker/mapping/cellranger_multi/.config.vsh.yaml b/target/docker/mapping/cellranger_multi/.config.vsh.yaml index 3829e2bcd8d..f593c404969 100644 --- a/target/docker/mapping/cellranger_multi/.config.vsh.yaml +++ b/target/docker/mapping/cellranger_multi/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellranger_multi" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -418,6 +418,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/cellranger_multi" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/cellranger_multi/cellranger_multi" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/cellranger_multi/cellranger_multi b/target/docker/mapping/cellranger_multi/cellranger_multi index 16128636037..9822c2b2112 100755 --- a/target/docker/mapping/cellranger_multi/cellranger_multi +++ b/target/docker/mapping/cellranger_multi/cellranger_multi @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# cellranger_multi 0.12.3 +# cellranger_multi 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -160,7 +160,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "cellranger_multi 0.12.3" + echo "cellranger_multi 0.12.4" echo "" echo "Align fastq files using Cell Ranger multi." echo "" @@ -532,10 +532,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Robrecht Cannoodt, Dries De Maeyer" LABEL org.opencontainers.image.description="Companion container for running component mapping cellranger_multi" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -686,7 +686,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "cellranger_multi 0.12.3" + echo "cellranger_multi 0.12.4" exit ;; --input) diff --git a/target/docker/mapping/htseq_count/.config.vsh.yaml b/target/docker/mapping/htseq_count/.config.vsh.yaml index 1bd957c81c8..9a52757cf62 100644 --- a/target/docker/mapping/htseq_count/.config.vsh.yaml +++ b/target/docker/mapping/htseq_count/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "htseq_count" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -413,6 +413,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/htseq_count" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/htseq_count/htseq_count" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/htseq_count/htseq_count b/target/docker/mapping/htseq_count/htseq_count index 2399b9f2a37..32d499c22f2 100755 --- a/target/docker/mapping/htseq_count/htseq_count +++ b/target/docker/mapping/htseq_count/htseq_count @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# htseq_count 0.12.3 +# htseq_count 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "htseq_count 0.12.3" + echo "htseq_count 0.12.4" echo "" echo "Quantify gene expression for subsequent testing for differential expression." echo "" @@ -526,10 +526,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt, Angela Oliveira Pisco" LABEL org.opencontainers.image.description="Companion container for running component mapping htseq_count" -LABEL org.opencontainers.image.created="2024-01-25T10:14:00Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -680,7 +680,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "htseq_count 0.12.3" + echo "htseq_count 0.12.4" exit ;; --input) diff --git a/target/docker/mapping/htseq_count_to_h5mu/.config.vsh.yaml b/target/docker/mapping/htseq_count_to_h5mu/.config.vsh.yaml index f08aa9ac37f..8589683cbae 100644 --- a/target/docker/mapping/htseq_count_to_h5mu/.config.vsh.yaml +++ b/target/docker/mapping/htseq_count_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "htseq_count_to_h5mu" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -204,6 +204,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/htseq_count_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/htseq_count_to_h5mu/htseq_count_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/htseq_count_to_h5mu/htseq_count_to_h5mu b/target/docker/mapping/htseq_count_to_h5mu/htseq_count_to_h5mu index f75d95ef0dd..3b68d87030c 100755 --- a/target/docker/mapping/htseq_count_to_h5mu/htseq_count_to_h5mu +++ b/target/docker/mapping/htseq_count_to_h5mu/htseq_count_to_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# htseq_count_to_h5mu 0.12.3 +# htseq_count_to_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "htseq_count_to_h5mu 0.12.3" + echo "htseq_count_to_h5mu 0.12.4" echo "" echo "Convert the htseq table to a h5mu." echo "" @@ -421,10 +421,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt, Angela Oliveira Pisco" LABEL org.opencontainers.image.description="Companion container for running component mapping htseq_count_to_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -575,7 +575,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "htseq_count_to_h5mu 0.12.3" + echo "htseq_count_to_h5mu 0.12.4" exit ;; --input_id) diff --git a/target/docker/mapping/multi_star/.config.vsh.yaml b/target/docker/mapping/multi_star/.config.vsh.yaml index e66b519f23d..8c53bc3ffcd 100644 --- a/target/docker/mapping/multi_star/.config.vsh.yaml +++ b/target/docker/mapping/multi_star/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "multi_star" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -3075,6 +3075,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/multi_star" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/multi_star/multi_star" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/multi_star/multi_star b/target/docker/mapping/multi_star/multi_star index 236aecae1eb..067d52b4274 100755 --- a/target/docker/mapping/multi_star/multi_star +++ b/target/docker/mapping/multi_star/multi_star @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# multi_star 0.12.3 +# multi_star 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "multi_star 0.12.3" + echo "multi_star 0.12.4" echo "" echo "Align fastq files using STAR." echo "" @@ -1861,10 +1861,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component mapping multi_star" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:31Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -2015,7 +2015,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "multi_star 0.12.3" + echo "multi_star 0.12.4" exit ;; --input_id) diff --git a/target/docker/mapping/multi_star_to_h5mu/.config.vsh.yaml b/target/docker/mapping/multi_star_to_h5mu/.config.vsh.yaml index c0f10e359c8..650943f4a64 100644 --- a/target/docker/mapping/multi_star_to_h5mu/.config.vsh.yaml +++ b/target/docker/mapping/multi_star_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "multi_star_to_h5mu" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -174,6 +174,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/multi_star_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/multi_star_to_h5mu/multi_star_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/multi_star_to_h5mu/multi_star_to_h5mu b/target/docker/mapping/multi_star_to_h5mu/multi_star_to_h5mu index d9c9fb15726..cc20dfe2277 100755 --- a/target/docker/mapping/multi_star_to_h5mu/multi_star_to_h5mu +++ b/target/docker/mapping/multi_star_to_h5mu/multi_star_to_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# multi_star_to_h5mu 0.12.3 +# multi_star_to_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "multi_star_to_h5mu 0.12.3" + echo "multi_star_to_h5mu 0.12.4" echo "" echo "Convert the output of \`multi_star\` to a h5mu." echo "" @@ -410,10 +410,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt, Angela Oliveira Pisco" LABEL org.opencontainers.image.description="Companion container for running component mapping multi_star_to_h5mu" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -564,7 +564,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "multi_star_to_h5mu 0.12.3" + echo "multi_star_to_h5mu 0.12.4" exit ;; --input) diff --git a/target/docker/mapping/samtools_sort/.config.vsh.yaml b/target/docker/mapping/samtools_sort/.config.vsh.yaml index 3aec09b023a..6d9b998df68 100644 --- a/target/docker/mapping/samtools_sort/.config.vsh.yaml +++ b/target/docker/mapping/samtools_sort/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "samtools_sort" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -265,6 +265,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/samtools_sort" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/samtools_sort/samtools_sort" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/samtools_sort/samtools_sort b/target/docker/mapping/samtools_sort/samtools_sort index a85d26a34ed..72d60fdc9b4 100755 --- a/target/docker/mapping/samtools_sort/samtools_sort +++ b/target/docker/mapping/samtools_sort/samtools_sort @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# samtools_sort 0.12.3 +# samtools_sort 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "samtools_sort 0.12.3" + echo "samtools_sort 0.12.4" echo "" echo "Sort and (optionally) index alignments." echo "" @@ -466,10 +466,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt, Angela Oliveira Pisco" LABEL org.opencontainers.image.description="Companion container for running component mapping samtools_sort" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:31Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -620,7 +620,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "samtools_sort 0.12.3" + echo "samtools_sort 0.12.4" exit ;; --input) diff --git a/target/docker/mapping/star_align/.config.vsh.yaml b/target/docker/mapping/star_align/.config.vsh.yaml index 1787020a5c0..b88e343adcd 100644 --- a/target/docker/mapping/star_align/.config.vsh.yaml +++ b/target/docker/mapping/star_align/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "star_align" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -2530,6 +2530,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/star_align" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/star_align/star_align" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/star_align/star_align b/target/docker/mapping/star_align/star_align index b05bbaaa1e4..655d3b08039 100755 --- a/target/docker/mapping/star_align/star_align +++ b/target/docker/mapping/star_align/star_align @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# star_align 0.12.3 +# star_align 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "star_align 0.12.3" + echo "star_align 0.12.4" echo "" echo "Align fastq files using STAR." echo "" @@ -1789,10 +1789,10 @@ RUN apt-get update && \ LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component mapping star_align" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -1943,7 +1943,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "star_align 0.12.3" + echo "star_align 0.12.4" exit ;; --input) diff --git a/target/docker/mapping/star_align_v273a/.config.vsh.yaml b/target/docker/mapping/star_align_v273a/.config.vsh.yaml index 15a0341df41..6553ece2d26 100644 --- a/target/docker/mapping/star_align_v273a/.config.vsh.yaml +++ b/target/docker/mapping/star_align_v273a/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "star_align_v273a" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -2530,6 +2530,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/star_align_v273a" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/star_align_v273a/star_align_v273a" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/star_align_v273a/star_align_v273a b/target/docker/mapping/star_align_v273a/star_align_v273a index 3e3d0f0f339..e4ecaa46e27 100755 --- a/target/docker/mapping/star_align_v273a/star_align_v273a +++ b/target/docker/mapping/star_align_v273a/star_align_v273a @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# star_align_v273a 0.12.3 +# star_align_v273a 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "star_align_v273a 0.12.3" + echo "star_align_v273a 0.12.4" echo "" echo "Align fastq files using STAR." echo "" @@ -1789,10 +1789,10 @@ RUN apt-get update && \ LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component mapping star_align_v273a" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -1943,7 +1943,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "star_align_v273a 0.12.3" + echo "star_align_v273a 0.12.4" exit ;; --input) diff --git a/target/docker/mapping/star_build_reference/.config.vsh.yaml b/target/docker/mapping/star_build_reference/.config.vsh.yaml index 27a4226e047..a13c0be97f5 100644 --- a/target/docker/mapping/star_build_reference/.config.vsh.yaml +++ b/target/docker/mapping/star_build_reference/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "star_build_reference" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -185,6 +185,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/star_build_reference" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/mapping/star_build_reference/star_build_reference" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/mapping/star_build_reference/star_build_reference b/target/docker/mapping/star_build_reference/star_build_reference index 7e74bc45069..698f440b73e 100755 --- a/target/docker/mapping/star_build_reference/star_build_reference +++ b/target/docker/mapping/star_build_reference/star_build_reference @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# star_build_reference 0.12.3 +# star_build_reference 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "star_build_reference 0.12.3" + echo "star_build_reference 0.12.4" echo "" echo "Create a reference for STAR from a set of fasta files." echo "" @@ -437,10 +437,10 @@ RUN apt-get update && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component mapping star_build_reference" -LABEL org.opencontainers.image.created="2024-01-25T10:14:00Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -591,7 +591,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "star_build_reference 0.12.3" + echo "star_build_reference 0.12.4" exit ;; --genome_fasta) diff --git a/target/docker/metadata/add_id/.config.vsh.yaml b/target/docker/metadata/add_id/.config.vsh.yaml index d8140155307..6ba77619407 100644 --- a/target/docker/metadata/add_id/.config.vsh.yaml +++ b/target/docker/metadata/add_id/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "add_id" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -192,6 +192,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/add_id" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/add_id/add_id" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/metadata/add_id/add_id b/target/docker/metadata/add_id/add_id index 663e58f4349..8e74aaf6664 100755 --- a/target/docker/metadata/add_id/add_id +++ b/target/docker/metadata/add_id/add_id @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# add_id 0.12.3 +# add_id 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "add_id 0.12.3" + echo "add_id 0.12.4" echo "" echo "Add id of .obs. Also allows to make .obs_names (the .obs index) unique" echo "by prefixing the values with an unique id per .h5mu file." @@ -422,10 +422,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component metadata add_id" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -576,7 +576,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "add_id 0.12.3" + echo "add_id 0.12.4" exit ;; --input) diff --git a/target/docker/metadata/grep_annotation_column/.config.vsh.yaml b/target/docker/metadata/grep_annotation_column/.config.vsh.yaml index 594bfcfaa9e..72ca34538cf 100644 --- a/target/docker/metadata/grep_annotation_column/.config.vsh.yaml +++ b/target/docker/metadata/grep_annotation_column/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "grep_annotation_column" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -239,6 +239,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/grep_annotation_column" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/grep_annotation_column/grep_annotation_column" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/metadata/grep_annotation_column/grep_annotation_column b/target/docker/metadata/grep_annotation_column/grep_annotation_column index 39e2db2b16d..05d31ccc9fd 100755 --- a/target/docker/metadata/grep_annotation_column/grep_annotation_column +++ b/target/docker/metadata/grep_annotation_column/grep_annotation_column @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# grep_annotation_column 0.12.3 +# grep_annotation_column 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "grep_annotation_column 0.12.3" + echo "grep_annotation_column 0.12.4" echo "" echo "Perform a regex lookup on a column from the annotation matrices .obs or .var." echo "The annotation matrix can originate from either a modality, or all modalities" @@ -448,10 +448,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component metadata grep_annotation_column" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -602,7 +602,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "grep_annotation_column 0.12.3" + echo "grep_annotation_column 0.12.4" exit ;; --input) diff --git a/target/docker/metadata/join_csv/.config.vsh.yaml b/target/docker/metadata/join_csv/.config.vsh.yaml index beac67bda0a..54de10b4438 100644 --- a/target/docker/metadata/join_csv/.config.vsh.yaml +++ b/target/docker/metadata/join_csv/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "join_csv" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -224,6 +224,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/join_csv" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/join_csv/join_csv" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/metadata/join_csv/join_csv b/target/docker/metadata/join_csv/join_csv index 5fac93215fd..79307c1ff13 100755 --- a/target/docker/metadata/join_csv/join_csv +++ b/target/docker/metadata/join_csv/join_csv @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# join_csv 0.12.3 +# join_csv 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "join_csv 0.12.3" + echo "join_csv 0.12.4" echo "" echo "Join a csv containing metadata to the .obs or .var field of a mudata file." echo "" @@ -438,10 +438,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component metadata join_csv" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -592,7 +592,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "join_csv 0.12.3" + echo "join_csv 0.12.4" exit ;; --input) diff --git a/target/docker/metadata/join_uns_to_obs/.config.vsh.yaml b/target/docker/metadata/join_uns_to_obs/.config.vsh.yaml index 6efbf4fd030..be91ea1b603 100644 --- a/target/docker/metadata/join_uns_to_obs/.config.vsh.yaml +++ b/target/docker/metadata/join_uns_to_obs/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "join_uns_to_obs" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" arguments: - type: "file" name: "--input" @@ -166,6 +166,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/join_uns_to_obs" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/join_uns_to_obs/join_uns_to_obs" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/metadata/join_uns_to_obs/join_uns_to_obs b/target/docker/metadata/join_uns_to_obs/join_uns_to_obs index e59ad3a0701..f0a3260d1d2 100755 --- a/target/docker/metadata/join_uns_to_obs/join_uns_to_obs +++ b/target/docker/metadata/join_uns_to_obs/join_uns_to_obs @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# join_uns_to_obs 0.12.3 +# join_uns_to_obs 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -155,7 +155,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "join_uns_to_obs 0.12.3" + echo "join_uns_to_obs 0.12.4" echo "" echo "Join a data frame of length 1 (1 row index value) in .uns containing metadata to" echo "the .obs of a mudata file." @@ -413,10 +413,10 @@ RUN pip install --upgrade pip && \ pip install --upgrade --no-cache-dir "mudata~=0.2.3" "anndata~=0.9.1" LABEL org.opencontainers.image.description="Companion container for running component metadata join_uns_to_obs" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -567,7 +567,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "join_uns_to_obs 0.12.3" + echo "join_uns_to_obs 0.12.4" exit ;; --input) diff --git a/target/docker/metadata/move_obsm_to_obs/.config.vsh.yaml b/target/docker/metadata/move_obsm_to_obs/.config.vsh.yaml index 121582d4551..fc8f7351b44 100644 --- a/target/docker/metadata/move_obsm_to_obs/.config.vsh.yaml +++ b/target/docker/metadata/move_obsm_to_obs/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "move_obsm_to_obs" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -187,6 +187,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/move_obsm_to_obs" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/metadata/move_obsm_to_obs/move_obsm_to_obs" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/metadata/move_obsm_to_obs/move_obsm_to_obs b/target/docker/metadata/move_obsm_to_obs/move_obsm_to_obs index 9a91bbbad0d..9e3bfd667a7 100755 --- a/target/docker/metadata/move_obsm_to_obs/move_obsm_to_obs +++ b/target/docker/metadata/move_obsm_to_obs/move_obsm_to_obs @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# move_obsm_to_obs 0.12.3 +# move_obsm_to_obs 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "move_obsm_to_obs 0.12.3" + echo "move_obsm_to_obs 0.12.4" echo "" echo "Move a matrix from .obsm to .obs. Newly created columns in .obs will" echo "be created from the .obsm key suffixed with an underscore and the name of the" @@ -421,10 +421,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component metadata move_obsm_to_obs" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -575,7 +575,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "move_obsm_to_obs 0.12.3" + echo "move_obsm_to_obs 0.12.4" exit ;; --input) diff --git a/target/docker/neighbors/bbknn/.config.vsh.yaml b/target/docker/neighbors/bbknn/.config.vsh.yaml index 87dff641d9b..db545e6970d 100644 --- a/target/docker/neighbors/bbknn/.config.vsh.yaml +++ b/target/docker/neighbors/bbknn/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "bbknn" namespace: "neighbors" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -284,6 +284,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/neighbors/bbknn" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/neighbors/bbknn/bbknn" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/neighbors/bbknn/bbknn b/target/docker/neighbors/bbknn/bbknn index 12cada2b7c8..ba17ae94b90 100755 --- a/target/docker/neighbors/bbknn/bbknn +++ b/target/docker/neighbors/bbknn/bbknn @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# bbknn 0.12.3 +# bbknn 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "bbknn 0.12.3" + echo "bbknn 0.12.4" echo "" echo "BBKNN network generation" echo "" @@ -463,10 +463,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer, Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component neighbors bbknn" -LABEL org.opencontainers.image.created="2024-01-25T10:14:00Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -617,7 +617,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "bbknn 0.12.3" + echo "bbknn 0.12.4" exit ;; --input) diff --git a/target/docker/neighbors/find_neighbors/.config.vsh.yaml b/target/docker/neighbors/find_neighbors/.config.vsh.yaml index 4aaa7d85d25..6419d2b4187 100644 --- a/target/docker/neighbors/find_neighbors/.config.vsh.yaml +++ b/target/docker/neighbors/find_neighbors/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "find_neighbors" namespace: "neighbors" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -304,6 +304,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/neighbors/find_neighbors" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/neighbors/find_neighbors/find_neighbors" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/neighbors/find_neighbors/find_neighbors b/target/docker/neighbors/find_neighbors/find_neighbors index 110a9f5995e..e915895639e 100755 --- a/target/docker/neighbors/find_neighbors/find_neighbors +++ b/target/docker/neighbors/find_neighbors/find_neighbors @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# find_neighbors 0.12.3 +# find_neighbors 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "find_neighbors 0.12.3" + echo "find_neighbors 0.12.4" echo "" echo "Compute a neighborhood graph of observations [McInnes18]." echo "" @@ -468,10 +468,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component neighbors find_neighbors" -LABEL org.opencontainers.image.created="2024-01-25T10:14:00Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -622,7 +622,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "find_neighbors 0.12.3" + echo "find_neighbors 0.12.4" exit ;; --input) diff --git a/target/docker/process_10xh5/filter_10xh5/.config.vsh.yaml b/target/docker/process_10xh5/filter_10xh5/.config.vsh.yaml index 44bf64da637..0eb1d0d237a 100644 --- a/target/docker/process_10xh5/filter_10xh5/.config.vsh.yaml +++ b/target/docker/process_10xh5/filter_10xh5/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "filter_10xh5" namespace: "process_10xh5" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -190,6 +190,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/process_10xh5/filter_10xh5" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/process_10xh5/filter_10xh5/filter_10xh5" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/process_10xh5/filter_10xh5/filter_10xh5 b/target/docker/process_10xh5/filter_10xh5/filter_10xh5 index c8c56f847ba..91c73c35138 100755 --- a/target/docker/process_10xh5/filter_10xh5/filter_10xh5 +++ b/target/docker/process_10xh5/filter_10xh5/filter_10xh5 @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# filter_10xh5 0.12.3 +# filter_10xh5 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "filter_10xh5 0.12.3" + echo "filter_10xh5 0.12.4" echo "" echo "Filter a 10x h5 dataset." echo "" @@ -431,10 +431,10 @@ RUN Rscript -e 'if (!requireNamespace("remotes", quietly = TRUE)) install.packag LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component process_10xh5 filter_10xh5" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -585,7 +585,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "filter_10xh5 0.12.3" + echo "filter_10xh5 0.12.4" exit ;; --input) diff --git a/target/docker/qc/calculate_qc_metrics/.config.vsh.yaml b/target/docker/qc/calculate_qc_metrics/.config.vsh.yaml index 8e31355e8d3..a8a836e46d5 100644 --- a/target/docker/qc/calculate_qc_metrics/.config.vsh.yaml +++ b/target/docker/qc/calculate_qc_metrics/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "calculate_qc_metrics" namespace: "qc" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -230,6 +230,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/qc/calculate_qc_metrics" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/qc/calculate_qc_metrics/calculate_qc_metrics" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/qc/calculate_qc_metrics/calculate_qc_metrics b/target/docker/qc/calculate_qc_metrics/calculate_qc_metrics index ac2dc69a960..893b303ce13 100755 --- a/target/docker/qc/calculate_qc_metrics/calculate_qc_metrics +++ b/target/docker/qc/calculate_qc_metrics/calculate_qc_metrics @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# calculate_qc_metrics 0.12.3 +# calculate_qc_metrics 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "calculate_qc_metrics 0.12.3" + echo "calculate_qc_metrics 0.12.4" echo "" echo "Add basic quality control metrics to an .h5mu file." echo "" @@ -457,10 +457,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component qc calculate_qc_metrics" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -611,7 +611,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "calculate_qc_metrics 0.12.3" + echo "calculate_qc_metrics 0.12.4" exit ;; --input) diff --git a/target/docker/qc/fastqc/.config.vsh.yaml b/target/docker/qc/fastqc/.config.vsh.yaml index 5b9bc7c206c..9c3584841ef 100644 --- a/target/docker/qc/fastqc/.config.vsh.yaml +++ b/target/docker/qc/fastqc/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "fastqc" namespace: "qc" - version: "0.12.3" + version: "0.12.4" arguments: - type: "string" name: "--mode" @@ -151,6 +151,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/qc/fastqc" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/qc/fastqc/fastqc" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/qc/fastqc/fastqc b/target/docker/qc/fastqc/fastqc index 7cfbcb08d90..886de003bf4 100755 --- a/target/docker/qc/fastqc/fastqc +++ b/target/docker/qc/fastqc/fastqc @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# fastqc 0.12.3 +# fastqc 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -155,7 +155,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "fastqc 0.12.3" + echo "fastqc 0.12.4" echo "" echo "Fastqc component, please see" echo "https://www.bioinformatics.babraham.ac.uk/projects/fastqc/. This component can" @@ -410,10 +410,10 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/* LABEL org.opencontainers.image.description="Companion container for running component qc fastqc" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -564,7 +564,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "fastqc 0.12.3" + echo "fastqc 0.12.4" exit ;; --mode) diff --git a/target/docker/qc/multiqc/.config.vsh.yaml b/target/docker/qc/multiqc/.config.vsh.yaml index 29ac6ccd722..783bf195eab 100644 --- a/target/docker/qc/multiqc/.config.vsh.yaml +++ b/target/docker/qc/multiqc/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "multiqc" namespace: "qc" - version: "0.12.3" + version: "0.12.4" arguments: - type: "file" name: "--input" @@ -135,6 +135,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/qc/multiqc" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/qc/multiqc/multiqc" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/qc/multiqc/multiqc b/target/docker/qc/multiqc/multiqc index 78551b56046..042737333e5 100755 --- a/target/docker/qc/multiqc/multiqc +++ b/target/docker/qc/multiqc/multiqc @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# multiqc 0.12.3 +# multiqc 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -155,7 +155,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "multiqc 0.12.3" + echo "multiqc 0.12.4" echo "" echo "MultiQC aggregates results from bioinformatics analyses across many samples into" echo "a single report." @@ -403,10 +403,10 @@ RUN pip install --upgrade pip && \ pip install --upgrade --no-cache-dir "multiqc" LABEL org.opencontainers.image.description="Companion container for running component qc multiqc" -LABEL org.opencontainers.image.created="2024-01-25T10:13:56Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -557,7 +557,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "multiqc 0.12.3" + echo "multiqc 0.12.4" exit ;; --input) diff --git a/target/docker/query/cellxgene_census/.config.vsh.yaml b/target/docker/query/cellxgene_census/.config.vsh.yaml index 5d70f14e279..f09ceac9ce5 100644 --- a/target/docker/query/cellxgene_census/.config.vsh.yaml +++ b/target/docker/query/cellxgene_census/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellxgene_census" namespace: "query" - version: "0.12.3" + version: "0.12.4" authors: - name: "Matthias Beyens" info: @@ -255,6 +255,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/query/cellxgene_census" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/query/cellxgene_census/cellxgene_census" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/query/cellxgene_census/cellxgene_census b/target/docker/query/cellxgene_census/cellxgene_census index 216f6adccf7..996cd214d1b 100755 --- a/target/docker/query/cellxgene_census/cellxgene_census +++ b/target/docker/query/cellxgene_census/cellxgene_census @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# cellxgene_census 0.12.3 +# cellxgene_census 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "cellxgene_census 0.12.3" + echo "cellxgene_census 0.12.4" echo "" echo "Query CellxGene Census or user-specified TileDBSoma object, and eventually fetch" echo "cell and gene metadata or/and expression counts." @@ -452,10 +452,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Matthias Beyens, Dries De Maeyer" LABEL org.opencontainers.image.description="Companion container for running component query cellxgene_census" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -606,7 +606,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "cellxgene_census 0.12.3" + echo "cellxgene_census 0.12.4" exit ;; --input_database) diff --git a/target/docker/reference/build_bdrhap_reference/.config.vsh.yaml b/target/docker/reference/build_bdrhap_reference/.config.vsh.yaml index b5a7481b59d..69818247350 100644 --- a/target/docker/reference/build_bdrhap_reference/.config.vsh.yaml +++ b/target/docker/reference/build_bdrhap_reference/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "build_bdrhap_reference" namespace: "reference" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -181,6 +181,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/reference/build_bdrhap_reference" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/reference/build_bdrhap_reference/build_bdrhap_reference" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/reference/build_bdrhap_reference/build_bdrhap_reference b/target/docker/reference/build_bdrhap_reference/build_bdrhap_reference index e5be7c7e6c9..0b546f14ddd 100755 --- a/target/docker/reference/build_bdrhap_reference/build_bdrhap_reference +++ b/target/docker/reference/build_bdrhap_reference/build_bdrhap_reference @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# build_bdrhap_reference 0.12.3 +# build_bdrhap_reference 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "build_bdrhap_reference 0.12.3" + echo "build_bdrhap_reference 0.12.4" echo "" echo "Compile a reference into a STAR index compatible with the BD Rhapsody pipeline." echo "" @@ -406,10 +406,10 @@ RUN apt-get update && \ LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component reference build_bdrhap_reference" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -560,7 +560,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "build_bdrhap_reference 0.12.3" + echo "build_bdrhap_reference 0.12.4" exit ;; --genome_fasta) diff --git a/target/docker/reference/build_cellranger_reference/.config.vsh.yaml b/target/docker/reference/build_cellranger_reference/.config.vsh.yaml index 37e084ee631..01c85c3b5f3 100644 --- a/target/docker/reference/build_cellranger_reference/.config.vsh.yaml +++ b/target/docker/reference/build_cellranger_reference/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "build_cellranger_reference" namespace: "reference" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -182,6 +182,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/reference/build_cellranger_reference" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/reference/build_cellranger_reference/build_cellranger_reference" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/reference/build_cellranger_reference/build_cellranger_reference b/target/docker/reference/build_cellranger_reference/build_cellranger_reference index b9488372256..894c0d9411a 100755 --- a/target/docker/reference/build_cellranger_reference/build_cellranger_reference +++ b/target/docker/reference/build_cellranger_reference/build_cellranger_reference @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# build_cellranger_reference 0.12.3 +# build_cellranger_reference 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "build_cellranger_reference 0.12.3" + echo "build_cellranger_reference 0.12.4" echo "" echo "Build a Cell Ranger-compatible reference folder from user-supplied genome FASTA" echo "and gene GTF files. Creates a new folder named after the genome." @@ -407,10 +407,10 @@ RUN apt-get update && \ LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component reference build_cellranger_reference" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -561,7 +561,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "build_cellranger_reference 0.12.3" + echo "build_cellranger_reference 0.12.4" exit ;; --genome_fasta) diff --git a/target/docker/reference/make_reference/.config.vsh.yaml b/target/docker/reference/make_reference/.config.vsh.yaml index 96b46117140..7efc898dde8 100644 --- a/target/docker/reference/make_reference/.config.vsh.yaml +++ b/target/docker/reference/make_reference/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "make_reference" namespace: "reference" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -207,6 +207,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/reference/make_reference" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/reference/make_reference/make_reference" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/reference/make_reference/make_reference b/target/docker/reference/make_reference/make_reference index e42d725f88d..856d22f21fb 100755 --- a/target/docker/reference/make_reference/make_reference +++ b/target/docker/reference/make_reference/make_reference @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# make_reference 0.12.3 +# make_reference 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "make_reference 0.12.3" + echo "make_reference 0.12.4" echo "" echo "Preprocess and build a transcriptome reference." echo "" @@ -428,10 +428,10 @@ RUN apt-get update && \ LABEL org.opencontainers.image.authors="Angela Oliveira Pisco, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component reference make_reference" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -582,7 +582,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "make_reference 0.12.3" + echo "make_reference 0.12.4" exit ;; --genome_fasta) diff --git a/target/docker/report/mermaid/.config.vsh.yaml b/target/docker/report/mermaid/.config.vsh.yaml index 37e563fa2ed..41d1813b2c0 100644 --- a/target/docker/report/mermaid/.config.vsh.yaml +++ b/target/docker/report/mermaid/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "mermaid" namespace: "report" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -180,6 +180,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/report/mermaid" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/report/mermaid/mermaid" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/report/mermaid/mermaid b/target/docker/report/mermaid/mermaid index 77fec013964..5b031628abf 100755 --- a/target/docker/report/mermaid/mermaid +++ b/target/docker/report/mermaid/mermaid @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# mermaid 0.12.3 +# mermaid 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "mermaid 0.12.3" + echo "mermaid 0.12.4" echo "" echo "Generates a network from mermaid code." echo "" @@ -423,10 +423,10 @@ RUN apt-get update && \ LABEL org.opencontainers.image.authors="Dries De Maeyer" LABEL org.opencontainers.image.description="Companion container for running component report mermaid" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:32Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -577,7 +577,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "mermaid 0.12.3" + echo "mermaid 0.12.4" exit ;; --input) diff --git a/target/docker/transfer/publish/.config.vsh.yaml b/target/docker/transfer/publish/.config.vsh.yaml index d52f774e80f..5c781ff2f79 100644 --- a/target/docker/transfer/publish/.config.vsh.yaml +++ b/target/docker/transfer/publish/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "publish" namespace: "transfer" - version: "0.12.3" + version: "0.12.4" authors: - name: "Toni Verbeiren" roles: @@ -120,6 +120,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/transfer/publish" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/transfer/publish/publish" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/transfer/publish/publish b/target/docker/transfer/publish/publish index d6f5f305068..6abf7e1016f 100755 --- a/target/docker/transfer/publish/publish +++ b/target/docker/transfer/publish/publish @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# publish 0.12.3 +# publish 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "publish 0.12.3" + echo "publish 0.12.4" echo "" echo "Publish an artifact and optionally rename with parameters" echo "" @@ -395,10 +395,10 @@ ENTRYPOINT [] RUN : LABEL org.opencontainers.image.authors="Toni Verbeiren" LABEL org.opencontainers.image.description="Companion container for running component transfer publish" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:33Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -549,7 +549,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "publish 0.12.3" + echo "publish 0.12.4" exit ;; --input) diff --git a/target/docker/transform/clr/.config.vsh.yaml b/target/docker/transform/clr/.config.vsh.yaml index 3943f5cddda..ef4880b8baa 100644 --- a/target/docker/transform/clr/.config.vsh.yaml +++ b/target/docker/transform/clr/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "clr" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -183,6 +183,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/clr" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/clr/clr" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/transform/clr/clr b/target/docker/transform/clr/clr index 65f8f44ee05..02405a94a19 100755 --- a/target/docker/transform/clr/clr +++ b/target/docker/transform/clr/clr @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# clr 0.12.3 +# clr 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "clr 0.12.3" + echo "clr 0.12.4" echo "" echo "Perform CLR normalization on CITE-seq data (Stoeckius et al., 2017)." echo "" @@ -417,10 +417,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component transform clr" -LABEL org.opencontainers.image.created="2024-01-25T10:13:54Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -571,7 +571,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "clr 0.12.3" + echo "clr 0.12.4" exit ;; --input) diff --git a/target/docker/transform/delete_layer/.config.vsh.yaml b/target/docker/transform/delete_layer/.config.vsh.yaml index 3ee51374380..ae03b21b777 100644 --- a/target/docker/transform/delete_layer/.config.vsh.yaml +++ b/target/docker/transform/delete_layer/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "delete_layer" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -191,6 +191,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/delete_layer" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/delete_layer/delete_layer" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/transform/delete_layer/delete_layer b/target/docker/transform/delete_layer/delete_layer index 326a2403ebb..77aeb3f0801 100755 --- a/target/docker/transform/delete_layer/delete_layer +++ b/target/docker/transform/delete_layer/delete_layer @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# delete_layer 0.12.3 +# delete_layer 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "delete_layer 0.12.3" + echo "delete_layer 0.12.4" echo "" echo "Delete an anndata layer from one or more modalities." echo "" @@ -421,10 +421,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component transform delete_layer" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -575,7 +575,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "delete_layer 0.12.3" + echo "delete_layer 0.12.4" exit ;; --input) diff --git a/target/docker/transform/log1p/.config.vsh.yaml b/target/docker/transform/log1p/.config.vsh.yaml index 2399737e5c9..a238fff4438 100644 --- a/target/docker/transform/log1p/.config.vsh.yaml +++ b/target/docker/transform/log1p/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "log1p" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -220,6 +220,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/log1p" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/log1p/log1p" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/transform/log1p/log1p b/target/docker/transform/log1p/log1p index 545cf35f8bd..0ef73146337 100755 --- a/target/docker/transform/log1p/log1p +++ b/target/docker/transform/log1p/log1p @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# log1p 0.12.3 +# log1p 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "log1p 0.12.3" + echo "log1p 0.12.4" echo "" echo "Logarithmize the data matrix. Computes X = log(X + 1), where log denotes the" echo "natural logarithm unless a different base is given." @@ -427,10 +427,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component transform log1p" -LABEL org.opencontainers.image.created="2024-01-25T10:13:55Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -581,7 +581,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "log1p 0.12.3" + echo "log1p 0.12.4" exit ;; --input) @@ -965,6 +965,7 @@ trap interrupt INT SIGINT cat > "\$tempscript" << 'VIASHMAIN' import scanpy as sc import mudata as mu +import anndata as ad import sys ## VIASH START @@ -1021,12 +1022,24 @@ mdata.var_names_make_unique() mod = par["modality"] logger.info("Performing log transformation on modality %s", mod) data = mdata.mod[mod] -new_layer = sc.pp.log1p(data, - base=par["base"], - copy=True if par['output_layer'] else False) -if new_layer: - data.layers[par['output_layer']] = new_layer.X - data.uns['log1p'] = new_layer.uns['log1p'] + +# Make our own copy with not a lot of data +# this avoid excessive memory usage and accidental overwrites +input_layer = data.layers[par["input_layer"]] \\ + if par["input_layer"] else data.X +data_for_scanpy = ad.AnnData(X=input_layer.copy()) +sc.pp.log1p(data_for_scanpy, + base=par["base"], + layer=None, # use X + copy=False) # allow overwrites in the copy that was made + +# Scanpy will overwrite the input layer. +# So fetch input layer from the copy and use it to populate the output slot +if par["output_layer"]: + data.layers[par["output_layer"]] = data_for_scanpy.X +else: + data.X = data_for_scanpy.X +data.uns['log1p'] = data_for_scanpy.uns['log1p'].copy() logger.info("Writing to file %s", par["output"]) mdata.write_h5mu(filename=par["output"], compression=par["output_compression"]) diff --git a/target/docker/transform/normalize_total/.config.vsh.yaml b/target/docker/transform/normalize_total/.config.vsh.yaml index 251aa7c1ece..169fe0cb648 100644 --- a/target/docker/transform/normalize_total/.config.vsh.yaml +++ b/target/docker/transform/normalize_total/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "normalize_total" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -237,6 +237,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/normalize_total" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/normalize_total/normalize_total" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/transform/normalize_total/normalize_total b/target/docker/transform/normalize_total/normalize_total index bb35c7c2a20..05ad7cedbba 100755 --- a/target/docker/transform/normalize_total/normalize_total +++ b/target/docker/transform/normalize_total/normalize_total @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# normalize_total 0.12.3 +# normalize_total 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "normalize_total 0.12.3" + echo "normalize_total 0.12.4" echo "" echo "Normalize counts per cell." echo "" @@ -445,10 +445,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries De Maeyer, Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component transform normalize_total" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:36Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -599,7 +599,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "normalize_total 0.12.3" + echo "normalize_total 0.12.4" exit ;; --input) diff --git a/target/docker/transform/regress_out/.config.vsh.yaml b/target/docker/transform/regress_out/.config.vsh.yaml index 855d304927a..201da61a153 100644 --- a/target/docker/transform/regress_out/.config.vsh.yaml +++ b/target/docker/transform/regress_out/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "regress_out" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -190,6 +190,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/regress_out" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/regress_out/regress_out" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/transform/regress_out/regress_out b/target/docker/transform/regress_out/regress_out index 847f0f0405f..6397997c031 100755 --- a/target/docker/transform/regress_out/regress_out +++ b/target/docker/transform/regress_out/regress_out @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# regress_out 0.12.3 +# regress_out 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "regress_out 0.12.3" + echo "regress_out 0.12.4" echo "" echo "Regress out (mostly) unwanted sources of variation." echo "Uses simple linear regression. This is inspired by Seurat's regressOut function" @@ -423,10 +423,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component transform regress_out" -LABEL org.opencontainers.image.created="2024-01-25T10:13:59Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:35Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -577,7 +577,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "regress_out 0.12.3" + echo "regress_out 0.12.4" exit ;; --input) diff --git a/target/docker/transform/scale/.config.vsh.yaml b/target/docker/transform/scale/.config.vsh.yaml index d41bde0466b..f6f4229ab84 100644 --- a/target/docker/transform/scale/.config.vsh.yaml +++ b/target/docker/transform/scale/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scale" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -200,6 +200,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/scale" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/transform/scale/scale" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/transform/scale/scale b/target/docker/transform/scale/scale index ffa4c94baef..10b71ccc27d 100755 --- a/target/docker/transform/scale/scale +++ b/target/docker/transform/scale/scale @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# scale 0.12.3 +# scale 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "scale 0.12.3" + echo "scale 0.12.4" echo "" echo "Scale data to unit variance and zero mean." echo "" @@ -424,10 +424,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component transform scale" -LABEL org.opencontainers.image.created="2024-01-25T10:14:00Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -578,7 +578,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "scale 0.12.3" + echo "scale 0.12.4" exit ;; --input) diff --git a/target/docker/velocity/scvelo/.config.vsh.yaml b/target/docker/velocity/scvelo/.config.vsh.yaml index 15437092c10..33086cd345a 100644 --- a/target/docker/velocity/scvelo/.config.vsh.yaml +++ b/target/docker/velocity/scvelo/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scvelo" namespace: "velocity" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -271,6 +271,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/velocity/scvelo" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/velocity/scvelo/scvelo" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/velocity/scvelo/scvelo b/target/docker/velocity/scvelo/scvelo index 21662a9f061..66876000ed6 100755 --- a/target/docker/velocity/scvelo/scvelo +++ b/target/docker/velocity/scvelo/scvelo @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# scvelo 0.12.3 +# scvelo 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "scvelo 0.12.3" + echo "scvelo 0.12.4" echo "" echo "Inputs:" echo " --input" @@ -464,10 +464,10 @@ RUN pip install --upgrade pip && \ LABEL org.opencontainers.image.authors="Dries Schaumont" LABEL org.opencontainers.image.description="Companion container for running component velocity scvelo" -LABEL org.opencontainers.image.created="2024-01-25T10:13:57Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -618,7 +618,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "scvelo 0.12.3" + echo "scvelo 0.12.4" exit ;; --input) diff --git a/target/docker/velocity/velocyto/.config.vsh.yaml b/target/docker/velocity/velocyto/.config.vsh.yaml index ab3aaef9215..bbbced35d7f 100644 --- a/target/docker/velocity/velocyto/.config.vsh.yaml +++ b/target/docker/velocity/velocyto/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "velocyto" namespace: "velocity" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -220,6 +220,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/docker/velocity/velocyto" executable: "/home/runner/work/openpipeline/openpipeline/target/docker/velocity/velocyto/velocyto" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/docker/velocity/velocyto/velocyto b/target/docker/velocity/velocyto/velocyto index d01f23cb3ed..d6ba0d392cf 100755 --- a/target/docker/velocity/velocyto/velocyto +++ b/target/docker/velocity/velocyto/velocyto @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# velocyto 0.12.3 +# velocyto 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "velocyto 0.12.3" + echo "velocyto 0.12.4" echo "" echo "Runs the velocity analysis on a BAM file, outputting a loom file." echo "" @@ -430,10 +430,10 @@ RUN apt-get update && \ LABEL org.opencontainers.image.authors="Robrecht Cannoodt" LABEL org.opencontainers.image.description="Companion container for running component velocity velocyto" -LABEL org.opencontainers.image.created="2024-01-25T10:13:58Z" +LABEL org.opencontainers.image.created="2024-01-31T09:08:34Z" LABEL org.opencontainers.image.source="https://github.com/openpipelines-bio/openpipeline" -LABEL org.opencontainers.image.revision="827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" -LABEL org.opencontainers.image.version="0.12.3" +LABEL org.opencontainers.image.revision="a075b9f384e200b357c4c85801062a980ddb3383" +LABEL org.opencontainers.image.version="0.12.4" VIASHDOCKER } @@ -584,7 +584,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "velocyto 0.12.3" + echo "velocyto 0.12.4" exit ;; --input) diff --git a/target/native/compression/compress_h5mu/.config.vsh.yaml b/target/native/compression/compress_h5mu/.config.vsh.yaml index 796957e4b21..6b026bef48e 100644 --- a/target/native/compression/compress_h5mu/.config.vsh.yaml +++ b/target/native/compression/compress_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "compress_h5mu" namespace: "compression" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -162,6 +162,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/compression/compress_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/native/compression/compress_h5mu/compress_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/compression/compress_h5mu/compress_h5mu b/target/native/compression/compress_h5mu/compress_h5mu index 71f693b7868..243c31599f1 100755 --- a/target/native/compression/compress_h5mu/compress_h5mu +++ b/target/native/compression/compress_h5mu/compress_h5mu @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# compress_h5mu 0.12.3 +# compress_h5mu 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "compress_h5mu 0.12.3" + echo "compress_h5mu 0.12.4" echo "" echo "Compress a MuData file." echo "" @@ -202,7 +202,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "compress_h5mu 0.12.3" + echo "compress_h5mu 0.12.4" exit ;; --input) diff --git a/target/native/compression/tar_extract/.config.vsh.yaml b/target/native/compression/tar_extract/.config.vsh.yaml index 54961584036..a8d02187d2b 100644 --- a/target/native/compression/tar_extract/.config.vsh.yaml +++ b/target/native/compression/tar_extract/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "tar_extract" namespace: "compression" - version: "0.12.3" + version: "0.12.4" arguments: - type: "file" name: "--input" @@ -101,6 +101,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/compression/tar_extract" executable: "/home/runner/work/openpipeline/openpipeline/target/native/compression/tar_extract/tar_extract" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/compression/tar_extract/tar_extract b/target/native/compression/tar_extract/tar_extract index 166c374c0a7..bd4457f7d71 100755 --- a/target/native/compression/tar_extract/tar_extract +++ b/target/native/compression/tar_extract/tar_extract @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# tar_extract 0.12.3 +# tar_extract 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -155,7 +155,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "tar_extract 0.12.3" + echo "tar_extract 0.12.4" echo "" echo "Extract files from a tar archive" echo "" @@ -207,7 +207,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "tar_extract 0.12.3" + echo "tar_extract 0.12.4" exit ;; --input) diff --git a/target/native/dataflow/concat/.config.vsh.yaml b/target/native/dataflow/concat/.config.vsh.yaml index a8c84537551..c5c31e51ea8 100644 --- a/target/native/dataflow/concat/.config.vsh.yaml +++ b/target/native/dataflow/concat/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "concat" namespace: "dataflow" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -217,6 +217,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/dataflow/concat" executable: "/home/runner/work/openpipeline/openpipeline/target/native/dataflow/concat/concat" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/dataflow/concat/concat b/target/native/dataflow/concat/concat index bf13d879091..e7aa2a1172f 100755 --- a/target/native/dataflow/concat/concat +++ b/target/native/dataflow/concat/concat @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# concat 0.12.3 +# concat 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "concat 0.12.3" + echo "concat 0.12.4" echo "" echo "Concatenates several uni-modal samples in .h5mu files into a single file." echo "" @@ -231,7 +231,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "concat 0.12.3" + echo "concat 0.12.4" exit ;; --input) diff --git a/target/native/dataflow/merge/.config.vsh.yaml b/target/native/dataflow/merge/.config.vsh.yaml index ae5c396225e..31a20481cd7 100644 --- a/target/native/dataflow/merge/.config.vsh.yaml +++ b/target/native/dataflow/merge/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "merge" namespace: "dataflow" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -170,6 +170,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/dataflow/merge" executable: "/home/runner/work/openpipeline/openpipeline/target/native/dataflow/merge/merge" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/dataflow/merge/merge b/target/native/dataflow/merge/merge index 19c8c65e64a..d4173d9a2d0 100755 --- a/target/native/dataflow/merge/merge +++ b/target/native/dataflow/merge/merge @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# merge 0.12.3 +# merge 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "merge 0.12.3" + echo "merge 0.12.4" echo "" echo "Combine one or more single-modality .h5mu files together into one .h5mu file." echo "" @@ -203,7 +203,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "merge 0.12.3" + echo "merge 0.12.4" exit ;; --input) diff --git a/target/native/dataflow/split_modalities/.config.vsh.yaml b/target/native/dataflow/split_modalities/.config.vsh.yaml index 9e246e14997..0db6e52f07f 100644 --- a/target/native/dataflow/split_modalities/.config.vsh.yaml +++ b/target/native/dataflow/split_modalities/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "split_modalities" namespace: "dataflow" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -209,6 +209,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/dataflow/split_modalities" executable: "/home/runner/work/openpipeline/openpipeline/target/native/dataflow/split_modalities/split_modalities" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/dataflow/split_modalities/split_modalities b/target/native/dataflow/split_modalities/split_modalities index 0f9a5adcc6d..88ccf44939f 100755 --- a/target/native/dataflow/split_modalities/split_modalities +++ b/target/native/dataflow/split_modalities/split_modalities @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# split_modalities 0.12.3 +# split_modalities 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -159,7 +159,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "split_modalities 0.12.3" + echo "split_modalities 0.12.4" echo "" echo "Split the modalities from a single .h5mu multimodal sample into seperate .h5mu" echo "files." @@ -215,7 +215,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "split_modalities 0.12.3" + echo "split_modalities 0.12.4" exit ;; --input) diff --git a/target/native/download/sync_test_resources/.config.vsh.yaml b/target/native/download/sync_test_resources/.config.vsh.yaml index 00597bdad52..abb700f24ee 100644 --- a/target/native/download/sync_test_resources/.config.vsh.yaml +++ b/target/native/download/sync_test_resources/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "sync_test_resources" namespace: "download" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -165,6 +165,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/download/sync_test_resources" executable: "/home/runner/work/openpipeline/openpipeline/target/native/download/sync_test_resources/sync_test_resources" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/download/sync_test_resources/sync_test_resources b/target/native/download/sync_test_resources/sync_test_resources index 519ee9a476f..c4f466060d6 100755 --- a/target/native/download/sync_test_resources/sync_test_resources +++ b/target/native/download/sync_test_resources/sync_test_resources @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# sync_test_resources 0.12.3 +# sync_test_resources 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "sync_test_resources 0.12.3" + echo "sync_test_resources 0.12.4" echo "" echo "Synchronise the test resources from s3://openpipelines-data to resources_test" echo "" @@ -220,7 +220,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "sync_test_resources 0.12.3" + echo "sync_test_resources 0.12.4" exit ;; --input) diff --git a/target/native/integrate/scarches/.config.vsh.yaml b/target/native/integrate/scarches/.config.vsh.yaml index 5cd42ef7185..cbd87585423 100644 --- a/target/native/integrate/scarches/.config.vsh.yaml +++ b/target/native/integrate/scarches/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scarches" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" info: @@ -326,6 +326,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/integrate/scarches" executable: "/home/runner/work/openpipeline/openpipeline/target/native/integrate/scarches/scarches" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/integrate/scarches/scarches b/target/native/integrate/scarches/scarches index 50478f24076..6b20b89e070 100755 --- a/target/native/integrate/scarches/scarches +++ b/target/native/integrate/scarches/scarches @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# scarches 0.12.3 +# scarches 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "scarches 0.12.3" + echo "scarches 0.12.4" echo "" echo "Performs reference mapping with scArches" echo "" @@ -279,7 +279,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "scarches 0.12.3" + echo "scarches 0.12.4" exit ;; --input) diff --git a/target/native/integrate/totalvi/.config.vsh.yaml b/target/native/integrate/totalvi/.config.vsh.yaml index 54eeb4e7f9b..5745ee4f154 100644 --- a/target/native/integrate/totalvi/.config.vsh.yaml +++ b/target/native/integrate/totalvi/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "totalvi" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" info: @@ -343,6 +343,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/integrate/totalvi" executable: "/home/runner/work/openpipeline/openpipeline/target/native/integrate/totalvi/totalvi" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/integrate/totalvi/totalvi b/target/native/integrate/totalvi/totalvi index a6e4b85ef67..2ee6f3964b5 100755 --- a/target/native/integrate/totalvi/totalvi +++ b/target/native/integrate/totalvi/totalvi @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# totalvi 0.12.3 +# totalvi 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "totalvi 0.12.3" + echo "totalvi 0.12.4" echo "" echo "Performs mapping to the reference by totalvi model:" echo "https://docs.scvi-tools.org/en/stable/tutorials/notebooks/scarches_scvi_tools.html#Reference-mapping-with-TOTALVI" @@ -279,7 +279,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "totalvi 0.12.3" + echo "totalvi 0.12.4" exit ;; --input) diff --git a/target/native/labels_transfer/knn/.config.vsh.yaml b/target/native/labels_transfer/knn/.config.vsh.yaml index 0b6913a03af..1a0c760789a 100644 --- a/target/native/labels_transfer/knn/.config.vsh.yaml +++ b/target/native/labels_transfer/knn/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "knn" namespace: "labels_transfer" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" roles: @@ -374,6 +374,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/labels_transfer/knn" executable: "/home/runner/work/openpipeline/openpipeline/target/native/labels_transfer/knn/knn" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/labels_transfer/knn/knn b/target/native/labels_transfer/knn/knn index ac701e326a7..36a8e31d880 100755 --- a/target/native/labels_transfer/knn/knn +++ b/target/native/labels_transfer/knn/knn @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# knn 0.12.3 +# knn 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "knn 0.12.3" + echo "knn 0.12.4" echo "" echo "Performs label transfer from reference to query using KNN classifier" echo "" @@ -255,7 +255,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "knn 0.12.3" + echo "knn 0.12.4" exit ;; --input) diff --git a/target/native/labels_transfer/xgboost/.config.vsh.yaml b/target/native/labels_transfer/xgboost/.config.vsh.yaml index 73880630f3e..3dac82b291e 100644 --- a/target/native/labels_transfer/xgboost/.config.vsh.yaml +++ b/target/native/labels_transfer/xgboost/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "xgboost" namespace: "labels_transfer" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" roles: @@ -589,6 +589,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/labels_transfer/xgboost" executable: "/home/runner/work/openpipeline/openpipeline/target/native/labels_transfer/xgboost/xgboost" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/labels_transfer/xgboost/xgboost b/target/native/labels_transfer/xgboost/xgboost index b71872e8cca..a11c2cf1841 100755 --- a/target/native/labels_transfer/xgboost/xgboost +++ b/target/native/labels_transfer/xgboost/xgboost @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# xgboost 0.12.3 +# xgboost 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "xgboost 0.12.3" + echo "xgboost 0.12.4" echo "" echo "Performs label transfer from reference to query using XGBoost classifier" echo "" @@ -382,7 +382,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "xgboost 0.12.3" + echo "xgboost 0.12.4" exit ;; --input) diff --git a/target/native/metadata/add_id/.config.vsh.yaml b/target/native/metadata/add_id/.config.vsh.yaml index 5f6d540e16b..23e8b23932e 100644 --- a/target/native/metadata/add_id/.config.vsh.yaml +++ b/target/native/metadata/add_id/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "add_id" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -192,6 +192,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/metadata/add_id" executable: "/home/runner/work/openpipeline/openpipeline/target/native/metadata/add_id/add_id" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/metadata/add_id/add_id b/target/native/metadata/add_id/add_id index 7f1a1c3f839..820a157e10f 100755 --- a/target/native/metadata/add_id/add_id +++ b/target/native/metadata/add_id/add_id @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# add_id 0.12.3 +# add_id 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "add_id 0.12.3" + echo "add_id 0.12.4" echo "" echo "Add id of .obs. Also allows to make .obs_names (the .obs index) unique" echo "by prefixing the values with an unique id per .h5mu file." @@ -216,7 +216,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "add_id 0.12.3" + echo "add_id 0.12.4" exit ;; --input) diff --git a/target/native/metadata/grep_annotation_column/.config.vsh.yaml b/target/native/metadata/grep_annotation_column/.config.vsh.yaml index 68ce018021c..2570eb739ad 100644 --- a/target/native/metadata/grep_annotation_column/.config.vsh.yaml +++ b/target/native/metadata/grep_annotation_column/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "grep_annotation_column" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -239,6 +239,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/metadata/grep_annotation_column" executable: "/home/runner/work/openpipeline/openpipeline/target/native/metadata/grep_annotation_column/grep_annotation_column" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/metadata/grep_annotation_column/grep_annotation_column b/target/native/metadata/grep_annotation_column/grep_annotation_column index 804df779336..6d2acbcc2db 100755 --- a/target/native/metadata/grep_annotation_column/grep_annotation_column +++ b/target/native/metadata/grep_annotation_column/grep_annotation_column @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# grep_annotation_column 0.12.3 +# grep_annotation_column 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "grep_annotation_column 0.12.3" + echo "grep_annotation_column 0.12.4" echo "" echo "Perform a regex lookup on a column from the annotation matrices .obs or .var." echo "The annotation matrix can originate from either a modality, or all modalities" @@ -242,7 +242,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "grep_annotation_column 0.12.3" + echo "grep_annotation_column 0.12.4" exit ;; --input) diff --git a/target/native/transform/scale/.config.vsh.yaml b/target/native/transform/scale/.config.vsh.yaml index fb77d3602b9..8d4174da81c 100644 --- a/target/native/transform/scale/.config.vsh.yaml +++ b/target/native/transform/scale/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scale" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -200,6 +200,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/transform/scale" executable: "/home/runner/work/openpipeline/openpipeline/target/native/transform/scale/scale" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/transform/scale/scale b/target/native/transform/scale/scale index 3d52ae6be83..ac5ad7f7631 100755 --- a/target/native/transform/scale/scale +++ b/target/native/transform/scale/scale @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# scale 0.12.3 +# scale 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "scale 0.12.3" + echo "scale 0.12.4" echo "" echo "Scale data to unit variance and zero mean." echo "" @@ -218,7 +218,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "scale 0.12.3" + echo "scale 0.12.4" exit ;; --input) diff --git a/target/native/velocity/scvelo/.config.vsh.yaml b/target/native/velocity/scvelo/.config.vsh.yaml index 6675c39e806..29da507ebb1 100644 --- a/target/native/velocity/scvelo/.config.vsh.yaml +++ b/target/native/velocity/scvelo/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scvelo" namespace: "velocity" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -271,6 +271,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/velocity/scvelo" executable: "/home/runner/work/openpipeline/openpipeline/target/native/velocity/scvelo/scvelo" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/velocity/scvelo/scvelo b/target/native/velocity/scvelo/scvelo index 43d2bcb2f8b..164b1dfd685 100755 --- a/target/native/velocity/scvelo/scvelo +++ b/target/native/velocity/scvelo/scvelo @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# scvelo 0.12.3 +# scvelo 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "scvelo 0.12.3" + echo "scvelo 0.12.4" echo "" echo "Inputs:" echo " --input" @@ -258,7 +258,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "scvelo 0.12.3" + echo "scvelo 0.12.4" exit ;; --input) diff --git a/target/native/velocity/velocyto/.config.vsh.yaml b/target/native/velocity/velocyto/.config.vsh.yaml index c0ad44c1c4e..aa6436d9c6e 100644 --- a/target/native/velocity/velocyto/.config.vsh.yaml +++ b/target/native/velocity/velocyto/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "velocyto" namespace: "velocity" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -220,6 +220,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/native/velocity/velocyto" executable: "/home/runner/work/openpipeline/openpipeline/target/native/velocity/velocyto/velocyto" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/native/velocity/velocyto/velocyto b/target/native/velocity/velocyto/velocyto index 252d813ba0f..b23f53938a7 100755 --- a/target/native/velocity/velocyto/velocyto +++ b/target/native/velocity/velocyto/velocyto @@ -1,6 +1,6 @@ #!/usr/bin/env bash -# velocyto 0.12.3 +# velocyto 0.12.4 # # This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative # work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -158,7 +158,7 @@ VIASH_META_TEMP_DIR="$VIASH_TEMP" # ViashHelp: Display helpful explanation about this executable function ViashHelp { - echo "velocyto 0.12.3" + echo "velocyto 0.12.4" echo "" echo "Runs the velocity analysis on a BAM file, outputting a loom file." echo "" @@ -217,7 +217,7 @@ while [[ $# -gt 0 ]]; do shift 1 ;; --version) - echo "velocyto 0.12.3" + echo "velocyto 0.12.4" exit ;; --input) diff --git a/target/nextflow/annotate/popv/.config.vsh.yaml b/target/nextflow/annotate/popv/.config.vsh.yaml index 7518edac8f4..7d13e317a86 100644 --- a/target/nextflow/annotate/popv/.config.vsh.yaml +++ b/target/nextflow/annotate/popv/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "popv" namespace: "annotate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Matthias Beyens" roles: @@ -341,6 +341,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/annotate/popv" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/annotate/popv/popv" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/annotate/popv/main.nf b/target/nextflow/annotate/popv/main.nf index f950802b925..e0bf5943414 100644 --- a/target/nextflow/annotate/popv/main.nf +++ b/target/nextflow/annotate/popv/main.nf @@ -1,4 +1,4 @@ -// popv 0.12.3 +// popv 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "popv", "namespace" : "annotate", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Matthias Beyens", @@ -459,9 +459,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/annotate/popv", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/annotate/popv/nextflow.config b/target/nextflow/annotate/popv/nextflow.config index a4a942690c4..d618d269b9b 100644 --- a/target/nextflow/annotate/popv/nextflow.config +++ b/target/nextflow/annotate/popv/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'popv' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Performs popular major vote cell typing on single cell sequence data using multiple algorithms. Note that this is a one-shot version of PopV.' author = 'Matthias Beyens, Robrecht Cannoodt' } diff --git a/target/nextflow/annotate/popv/nextflow_schema.json b/target/nextflow/annotate/popv/nextflow_schema.json index a610d2385f8..0749ce0b0fd 100644 --- a/target/nextflow/annotate/popv/nextflow_schema.json +++ b/target/nextflow/annotate/popv/nextflow_schema.json @@ -1,171 +1,251 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "popv", - "description": "Performs popular major vote cell typing on single cell sequence data using multiple algorithms. Note that this is a one-shot version of PopV.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "popv", +"description": "Performs popular major vote cell typing on single cell sequence data using multiple algorithms. Note that this is a one-shot version of PopV.", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "Arguments related to the input (aka query) dataset.", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file." - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. Which modality to process", - "help_text": "Type: `string`, default: `rna`. Which modality to process.", - "default": "rna" - }, - - "input_layer": { - "type": "string", - "description": "Type: `string`. Which layer to use", - "help_text": "Type: `string`. Which layer to use. If no value is provided, the counts are assumed to be in the `.X` slot. Otherwise, count data is expected to be in `.layers[input_layer]`." - }, - - "input_obs_batch": { - "type": "string", - "description": "Type: `string`. Key in obs field of input adata for batch information", - "help_text": "Type: `string`. Key in obs field of input adata for batch information. If no value is provided, batch label is assumed to be unknown." - }, - - "input_var_subset": { - "type": "string", - "description": "Type: `string`. Subset the input object with this column", - "help_text": "Type: `string`. Subset the input object with this column." - }, - - "input_obs_label": { - "type": "string", - "description": "Type: `string`. Key in obs field of input adata for label information", - "help_text": "Type: `string`. Key in obs field of input adata for label information. This is only used for training scANVI. Unlabelled cells should be set to `\"unknown_celltype_label\"`." - }, - - "unknown_celltype_label": { - "type": "string", - "description": "Type: `string`, default: `unknown`. If `input_obs_label` is specified, cells with this value will be treated as unknown and will be predicted by the model", - "help_text": "Type: `string`, default: `unknown`. If `input_obs_label` is specified, cells with this value will be treated as unknown and will be predicted by the model.", - "default": "unknown" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "Output arguments.", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "Arguments related to the input (aka query) dataset.", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file." - } - - } - }, - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "Other arguments.", - "properties": { - - "methods": { - "type": "string", - "description": "Type: List of `string`, required, example: `knn_on_scvi:scanvi`, multiple_sep: `\":\"`, choices: ``celltypist`, `knn_on_bbknn`, `knn_on_scanorama`, `knn_on_scvi`, `onclass`, `rf`, `scanvi`, `svm``. Methods to call cell types", - "help_text": "Type: List of `string`, required, example: `knn_on_scvi:scanvi`, multiple_sep: `\":\"`, choices: ``celltypist`, `knn_on_bbknn`, `knn_on_scanorama`, `knn_on_scvi`, `onclass`, `rf`, `scanvi`, `svm``. Methods to call cell types. By default, runs to knn_on_scvi and scanvi.", - "enum": ["celltypist", "knn_on_bbknn", "knn_on_scanorama", "knn_on_scvi", "onclass", "rf", "scanvi", "svm"] + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. Which modality to process", + "help_text": "Type: `string`, default: `rna`. Which modality to process." + , + "default": "rna" + } + + + , + "input_layer": { + "type": + "string", + "description": "Type: `string`. Which layer to use", + "help_text": "Type: `string`. Which layer to use. If no value is provided, the counts are assumed to be in the `.X` slot. Otherwise, count data is expected to be in `.layers[input_layer]`." - } - - } - }, - "reference" : { - "title": "Reference", - "type": "object", - "description": "Arguments related to the reference dataset.", - "properties": { - - "reference": { - "type": "string", - "description": "Type: `file`, required, example: `TS_Bladder_filtered.h5ad`. User-provided reference tissue", - "help_text": "Type: `file`, required, example: `TS_Bladder_filtered.h5ad`. User-provided reference tissue. The data that will be used as reference to call cell types." - }, - - "reference_layer": { - "type": "string", - "description": "Type: `string`. Which layer to use", - "help_text": "Type: `string`. Which layer to use. If no value is provided, the counts are assumed to be in the `.X` slot. Otherwise, count data is expected to be in `.layers[reference_layer]`." - }, - - "reference_obs_label": { - "type": "string", - "description": "Type: `string`, default: `cell_ontology_class`. Key in obs field of reference AnnData with cell-type information", - "help_text": "Type: `string`, default: `cell_ontology_class`. Key in obs field of reference AnnData with cell-type information.", - "default": "cell_ontology_class" - }, - - "reference_obs_batch": { - "type": "string", - "description": "Type: `string`, default: `donor_assay`. Key in obs field of input adata for batch information", - "help_text": "Type: `string`, default: `donor_assay`. Key in obs field of input adata for batch information.", - "default": "donor_assay" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "input_obs_batch": { + "type": + "string", + "description": "Type: `string`. Key in obs field of input adata for batch information", + "help_text": "Type: `string`. Key in obs field of input adata for batch information. If no value is provided, batch label is assumed to be unknown." + + } + + + , + "input_var_subset": { + "type": + "string", + "description": "Type: `string`. Subset the input object with this column", + "help_text": "Type: `string`. Subset the input object with this column." + + } + + + , + "input_obs_label": { + "type": + "string", + "description": "Type: `string`. Key in obs field of input adata for label information", + "help_text": "Type: `string`. Key in obs field of input adata for label information. This is only used for training scANVI. Unlabelled cells should be set to `\"unknown_celltype_label\"`." + + } + + + , + "unknown_celltype_label": { + "type": + "string", + "description": "Type: `string`, default: `unknown`. If `input_obs_label` is specified, cells with this value will be treated as unknown and will be predicted by the model", + "help_text": "Type: `string`, default: `unknown`. If `input_obs_label` is specified, cells with this value will be treated as unknown and will be predicted by the model." + , + "default": "unknown" + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "Output arguments.", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "arguments" : { + "title": "Arguments", + "type": "object", + "description": "Other arguments.", + "properties": { + + + "methods": { + "type": + "string", + "description": "Type: List of `string`, required, example: `knn_on_scvi:scanvi`, multiple_sep: `\":\"`, choices: ``celltypist`, `knn_on_bbknn`, `knn_on_scanorama`, `knn_on_scvi`, `onclass`, `rf`, `scanvi`, `svm``. Methods to call cell types", + "help_text": "Type: List of `string`, required, example: `knn_on_scvi:scanvi`, multiple_sep: `\":\"`, choices: ``celltypist`, `knn_on_bbknn`, `knn_on_scanorama`, `knn_on_scvi`, `onclass`, `rf`, `scanvi`, `svm``. Methods to call cell types. By default, runs to knn_on_scvi and scanvi.", + "enum": ["celltypist", "knn_on_bbknn", "knn_on_scanorama", "knn_on_scvi", "onclass", "rf", "scanvi", "svm"] + + + } + + +} +}, + + + "reference" : { + "title": "Reference", + "type": "object", + "description": "Arguments related to the reference dataset.", + "properties": { + + + "reference": { + "type": + "string", + "description": "Type: `file`, required, example: `TS_Bladder_filtered.h5ad`. User-provided reference tissue", + "help_text": "Type: `file`, required, example: `TS_Bladder_filtered.h5ad`. User-provided reference tissue. The data that will be used as reference to call cell types." + + } + + + , + "reference_layer": { + "type": + "string", + "description": "Type: `string`. Which layer to use", + "help_text": "Type: `string`. Which layer to use. If no value is provided, the counts are assumed to be in the `.X` slot. Otherwise, count data is expected to be in `.layers[reference_layer]`." + + } + + + , + "reference_obs_label": { + "type": + "string", + "description": "Type: `string`, default: `cell_ontology_class`. Key in obs field of reference AnnData with cell-type information", + "help_text": "Type: `string`, default: `cell_ontology_class`. Key in obs field of reference AnnData with cell-type information." + , + "default": "cell_ontology_class" + } + + + , + "reference_obs_batch": { + "type": + "string", + "description": "Type: `string`, default: `donor_assay`. Key in obs field of input adata for batch information", + "help_text": "Type: `string`, default: `donor_assay`. Key in obs field of input adata for batch information." + , + "default": "donor_assay" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/arguments" + }, + + { + "$ref": "#/definitions/reference" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/reference" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/cluster/leiden/.config.vsh.yaml b/target/nextflow/cluster/leiden/.config.vsh.yaml index d25c28179c2..447c044a7a5 100644 --- a/target/nextflow/cluster/leiden/.config.vsh.yaml +++ b/target/nextflow/cluster/leiden/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "leiden" namespace: "cluster" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -214,6 +214,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/cluster/leiden" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/cluster/leiden/leiden" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/cluster/leiden/main.nf b/target/nextflow/cluster/leiden/main.nf index cdc45b85237..50eb4aec860 100644 --- a/target/nextflow/cluster/leiden/main.nf +++ b/target/nextflow/cluster/leiden/main.nf @@ -1,4 +1,4 @@ -// leiden 0.12.3 +// leiden 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "leiden", "namespace" : "cluster", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -285,9 +285,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/cluster/leiden", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/cluster/leiden/nextflow.config b/target/nextflow/cluster/leiden/nextflow.config index a5b92bcbd4c..a8c7150d552 100644 --- a/target/nextflow/cluster/leiden/nextflow.config +++ b/target/nextflow/cluster/leiden/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'leiden' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Cluster cells using the Leiden algorithm [Traag18] implemented in the Scanpy framework [Wolf18]. \nLeiden is an improved version of the Louvain algorithm [Blondel08]. \nIt has been proposed for single-cell analysis by [Levine15]. \nThis requires having ran `neighbors/find_neighbors` or `neighbors/bbknn` first.\n\nBlondel08: Blondel et al. (2008), Fast unfolding of communities in large networks, J. Stat. Mech. \nLevine15: Levine et al. (2015), Data-Driven Phenotypic Dissection of AML Reveals Progenitor-like Cells that Correlate with Prognosis, Cell. \nTraag18: Traag et al. (2018), From Louvain to Leiden: guaranteeing well-connected communities arXiv. \nWolf18: Wolf et al. (2018), Scanpy: large-scale single-cell gene expression data analysis, Genome Biology. \n' author = 'Dries De Maeyer' } diff --git a/target/nextflow/cluster/leiden/nextflow_schema.json b/target/nextflow/cluster/leiden/nextflow_schema.json index 4da3785970a..cc9d8382dae 100644 --- a/target/nextflow/cluster/leiden/nextflow_schema.json +++ b/target/nextflow/cluster/leiden/nextflow_schema.json @@ -1,94 +1,137 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "leiden", - "description": "Cluster cells using the Leiden algorithm [Traag18] implemented in the Scanpy framework [Wolf18]. \nLeiden is an improved version of the Louvain algorithm [Blondel08]. \nIt has been proposed for single-cell analysis by [Levine15]. \nThis requires having ran `neighbors/find_neighbors` or `neighbors/bbknn` first.\n\nBlondel08: Blondel et al. (2008), Fast unfolding of communities in large networks, J. Stat. Mech. \nLevine15: Levine et al. (2015), Data-Driven Phenotypic Dissection of AML Reveals Progenitor-like Cells that Correlate with Prognosis, Cell. \nTraag18: Traag et al. (2018), From Louvain to Leiden: guaranteeing well-connected communities arXiv. \nWolf18: Wolf et al. (2018), Scanpy: large-scale single-cell gene expression data analysis, Genome Biology. \n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "leiden", +"description": "Cluster cells using the Leiden algorithm [Traag18] implemented in the Scanpy framework [Wolf18]. \nLeiden is an improved version of the Louvain algorithm [Blondel08]. \nIt has been proposed for single-cell analysis by [Levine15]. \nThis requires having ran `neighbors/find_neighbors` or `neighbors/bbknn` first.\n\nBlondel08: Blondel et al. (2008), Fast unfolding of communities in large networks, J. Stat. Mech. \nLevine15: Levine et al. (2015), Data-Driven Phenotypic Dissection of AML Reveals Progenitor-like Cells that Correlate with Prognosis, Cell. \nTraag18: Traag et al. (2018), From Louvain to Leiden: guaranteeing well-connected communities arXiv. \nWolf18: Wolf et al. (2018), Scanpy: large-scale single-cell gene expression data analysis, Genome Biology. \n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input file." - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "obsp_connectivities": { - "type": "string", - "description": "Type: `string`, default: `connectivities`. In which ", - "help_text": "Type: `string`, default: `connectivities`. In which .obsp slot the neighbor connectivities can be found.", - "default": "connectivities" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input file." - }, - - "obsm_name": { - "type": "string", - "description": "Type: `string`, default: `leiden`. Name of the ", - "help_text": "Type: `string`, default: `leiden`. Name of the .obsm key under which to add the cluster labels.\nThe name of the columns in the matrix will correspond to the resolutions.\n", - "default": "leiden" - }, - - "resolution": { - "type": "string", - "description": "Type: List of `double`, required, default: `1`, multiple_sep: `\":\"`. A parameter value controlling the coarseness of the clustering", - "help_text": "Type: List of `double`, required, default: `1`, multiple_sep: `\":\"`. A parameter value controlling the coarseness of the clustering. Higher values lead to more clusters.\nMultiple values will result in clustering being performed multiple times.\n", - "default": "1" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "obsp_connectivities": { + "type": + "string", + "description": "Type: `string`, default: `connectivities`. In which ", + "help_text": "Type: `string`, default: `connectivities`. In which .obsp slot the neighbor connectivities can be found." + , + "default": "connectivities" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + + + } + + + , + "obsm_name": { + "type": + "string", + "description": "Type: `string`, default: `leiden`. Name of the ", + "help_text": "Type: `string`, default: `leiden`. Name of the .obsm key under which to add the cluster labels.\nThe name of the columns in the matrix will correspond to the resolutions.\n" + , + "default": "leiden" + } + + + , + "resolution": { + "type": + "string", + "description": "Type: List of `double`, required, default: `1`, multiple_sep: `\":\"`. A parameter value controlling the coarseness of the clustering", + "help_text": "Type: List of `double`, required, default: `1`, multiple_sep: `\":\"`. A parameter value controlling the coarseness of the clustering. Higher values lead to more clusters.\nMultiple values will result in clustering being performed multiple times.\n" + , + "default": "1" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/compression/compress_h5mu/.config.vsh.yaml b/target/nextflow/compression/compress_h5mu/.config.vsh.yaml index 925a68624c3..15d89236355 100644 --- a/target/nextflow/compression/compress_h5mu/.config.vsh.yaml +++ b/target/nextflow/compression/compress_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "compress_h5mu" namespace: "compression" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -162,6 +162,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/compression/compress_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/compression/compress_h5mu/compress_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/compression/compress_h5mu/main.nf b/target/nextflow/compression/compress_h5mu/main.nf index 8254d02b797..b3d9bb5ab84 100644 --- a/target/nextflow/compression/compress_h5mu/main.nf +++ b/target/nextflow/compression/compress_h5mu/main.nf @@ -1,4 +1,4 @@ -// compress_h5mu 0.12.3 +// compress_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "compress_h5mu", "namespace" : "compression", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -241,9 +241,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/compression/compress_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/compression/compress_h5mu/nextflow.config b/target/nextflow/compression/compress_h5mu/nextflow.config index d5ef2529754..f5e1490099d 100644 --- a/target/nextflow/compression/compress_h5mu/nextflow.config +++ b/target/nextflow/compression/compress_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'compress_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Compress a MuData file. \n' author = 'Dries Schaumont' } diff --git a/target/nextflow/compression/compress_h5mu/nextflow_schema.json b/target/nextflow/compression/compress_h5mu/nextflow_schema.json index 6f15cbff98d..1160871a2a7 100644 --- a/target/nextflow/compression/compress_h5mu/nextflow_schema.json +++ b/target/nextflow/compression/compress_h5mu/nextflow_schema.json @@ -1,67 +1,94 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "compress_h5mu", - "description": "Compress a MuData file. \n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "compress_h5mu", +"description": "Compress a MuData file. \n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `sample_path`. Path to the input ", - "help_text": "Type: `file`, required, example: `sample_path`. Path to the input .h5mu." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. location of output file", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. location of output file.", - "default": "$id.$key.output.output" - }, - - "compression": { - "type": "string", - "description": "Type: `string`, default: `gzip`, choices: ``lzf`, `gzip``. Compression type", - "help_text": "Type: `string`, default: `gzip`, choices: ``lzf`, `gzip``. Compression type.", - "enum": ["lzf", "gzip"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `sample_path`. Path to the input ", + "help_text": "Type: `file`, required, example: `sample_path`. Path to the input .h5mu." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. location of output file", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. location of output file." , - "default": "gzip" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "$id.$key.output.output" + } + + + , + "compression": { + "type": + "string", + "description": "Type: `string`, default: `gzip`, choices: ``lzf`, `gzip``. Compression type", + "help_text": "Type: `string`, default: `gzip`, choices: ``lzf`, `gzip``. Compression type.", + "enum": ["lzf", "gzip"] + + , + "default": "gzip" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/convert/from_10xh5_to_h5mu/.config.vsh.yaml b/target/nextflow/convert/from_10xh5_to_h5mu/.config.vsh.yaml index b951eb52bcc..63290facddc 100644 --- a/target/nextflow/convert/from_10xh5_to_h5mu/.config.vsh.yaml +++ b/target/nextflow/convert/from_10xh5_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_10xh5_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -267,6 +267,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_10xh5_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_10xh5_to_h5mu/from_10xh5_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/convert/from_10xh5_to_h5mu/main.nf b/target/nextflow/convert/from_10xh5_to_h5mu/main.nf index 7cbe56f7886..d79cb70ce42 100644 --- a/target/nextflow/convert/from_10xh5_to_h5mu/main.nf +++ b/target/nextflow/convert/from_10xh5_to_h5mu/main.nf @@ -1,4 +1,4 @@ -// from_10xh5_to_h5mu 0.12.3 +// from_10xh5_to_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "from_10xh5_to_h5mu", "namespace" : "convert", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -394,9 +394,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_10xh5_to_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/convert/from_10xh5_to_h5mu/nextflow.config b/target/nextflow/convert/from_10xh5_to_h5mu/nextflow.config index 8339a73c46f..c1b6673bca3 100644 --- a/target/nextflow/convert/from_10xh5_to_h5mu/nextflow.config +++ b/target/nextflow/convert/from_10xh5_to_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'from_10xh5_to_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Converts a 10x h5 into an h5mu file.\n' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/convert/from_10xh5_to_h5mu/nextflow_schema.json b/target/nextflow/convert/from_10xh5_to_h5mu/nextflow_schema.json index 394644d23fa..adaaccceefd 100644 --- a/target/nextflow/convert/from_10xh5_to_h5mu/nextflow_schema.json +++ b/target/nextflow/convert/from_10xh5_to_h5mu/nextflow_schema.json @@ -1,113 +1,162 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "from_10xh5_to_h5mu", - "description": "Converts a 10x h5 into an h5mu file.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "from_10xh5_to_h5mu", +"description": "Converts a 10x h5 into an h5mu file.\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `raw_feature_bc_matrix.h5`. A 10x h5 file as generated by Cell Ranger", - "help_text": "Type: `file`, required, example: `raw_feature_bc_matrix.h5`. A 10x h5 file as generated by Cell Ranger." - }, - - "input_metrics_summary": { - "type": "string", - "description": "Type: `file`, example: `metrics_cellranger.h5`. A metrics summary csv file as generated by Cell Ranger", - "help_text": "Type: `file`, example: `metrics_cellranger.h5`. A metrics summary csv file as generated by Cell Ranger." - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `raw_feature_bc_matrix.h5`. A 10x h5 file as generated by Cell Ranger", + "help_text": "Type: `file`, required, example: `raw_feature_bc_matrix.h5`. A 10x h5 file as generated by Cell Ranger." - }, - - "uns_metrics": { - "type": "string", - "description": "Type: `string`, default: `metrics_cellranger`. Name of the ", - "help_text": "Type: `string`, default: `metrics_cellranger`. Name of the .uns slot under which to QC metrics (if any).", - "default": "metrics_cellranger" - } - - } - }, - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "min_genes": { - "type": "integer", - "description": "Type: `integer`, example: `100`. Minimum number of counts required for a cell to pass filtering", - "help_text": "Type: `integer`, example: `100`. Minimum number of counts required for a cell to pass filtering." - }, - - "min_counts": { - "type": "integer", - "description": "Type: `integer`, example: `1000`. Minimum number of genes expressed required for a cell to pass filtering", - "help_text": "Type: `integer`, example: `1000`. Minimum number of genes expressed required for a cell to pass filtering." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "input_metrics_summary": { + "type": + "string", + "description": "Type: `file`, example: `metrics_cellranger.h5`. A metrics summary csv file as generated by Cell Ranger", + "help_text": "Type: `file`, example: `metrics_cellranger.h5`. A metrics summary csv file as generated by Cell Ranger." + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + + + } + + + , + "uns_metrics": { + "type": + "string", + "description": "Type: `string`, default: `metrics_cellranger`. Name of the ", + "help_text": "Type: `string`, default: `metrics_cellranger`. Name of the .uns slot under which to QC metrics (if any)." + , + "default": "metrics_cellranger" + } + + +} +}, + + + "arguments" : { + "title": "Arguments", + "type": "object", + "description": "No description", + "properties": { + + + "min_genes": { + "type": + "integer", + "description": "Type: `integer`, example: `100`. Minimum number of counts required for a cell to pass filtering", + "help_text": "Type: `integer`, example: `100`. Minimum number of counts required for a cell to pass filtering." + + } + + + , + "min_counts": { + "type": + "integer", + "description": "Type: `integer`, example: `1000`. Minimum number of genes expressed required for a cell to pass filtering", + "help_text": "Type: `integer`, example: `1000`. Minimum number of genes expressed required for a cell to pass filtering." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/convert/from_10xmtx_to_h5mu/.config.vsh.yaml b/target/nextflow/convert/from_10xmtx_to_h5mu/.config.vsh.yaml index 86116d80b03..f3ef72393c7 100644 --- a/target/nextflow/convert/from_10xmtx_to_h5mu/.config.vsh.yaml +++ b/target/nextflow/convert/from_10xmtx_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_10xmtx_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -161,6 +161,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_10xmtx_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_10xmtx_to_h5mu/from_10xmtx_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/convert/from_10xmtx_to_h5mu/main.nf b/target/nextflow/convert/from_10xmtx_to_h5mu/main.nf index 523ad8fcdf0..58116551ddd 100644 --- a/target/nextflow/convert/from_10xmtx_to_h5mu/main.nf +++ b/target/nextflow/convert/from_10xmtx_to_h5mu/main.nf @@ -1,4 +1,4 @@ -// from_10xmtx_to_h5mu 0.12.3 +// from_10xmtx_to_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "from_10xmtx_to_h5mu", "namespace" : "convert", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -238,9 +238,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_10xmtx_to_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/convert/from_10xmtx_to_h5mu/nextflow.config b/target/nextflow/convert/from_10xmtx_to_h5mu/nextflow.config index 0e1499d8295..d4664dd39df 100644 --- a/target/nextflow/convert/from_10xmtx_to_h5mu/nextflow.config +++ b/target/nextflow/convert/from_10xmtx_to_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'from_10xmtx_to_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Converts a 10x mtx into an h5mu file.\n' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/convert/from_10xmtx_to_h5mu/nextflow_schema.json b/target/nextflow/convert/from_10xmtx_to_h5mu/nextflow_schema.json index 374aff5dd93..0ddcb2e45f3 100644 --- a/target/nextflow/convert/from_10xmtx_to_h5mu/nextflow_schema.json +++ b/target/nextflow/convert/from_10xmtx_to_h5mu/nextflow_schema.json @@ -1,66 +1,93 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "from_10xmtx_to_h5mu", - "description": "Converts a 10x mtx into an h5mu file.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "from_10xmtx_to_h5mu", +"description": "Converts a 10x mtx into an h5mu file.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input_dir_containing_gz_files`. Input mtx folder", - "help_text": "Type: `file`, required, example: `input_dir_containing_gz_files`. Input mtx folder" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input_dir_containing_gz_files`. Input mtx folder", + "help_text": "Type: `file`, required, example: `input_dir_containing_gz_files`. Input mtx folder" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/.config.vsh.yaml b/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/.config.vsh.yaml index 05904731e0e..f8957c86293 100644 --- a/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/.config.vsh.yaml +++ b/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_bd_to_10x_molecular_barcode_tags" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -154,6 +154,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/from_bd_to_10x_molecular_barcode_tags" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/main.nf b/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/main.nf index 2b89e2d05fb..db8ae9f8601 100644 --- a/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/main.nf +++ b/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/main.nf @@ -1,4 +1,4 @@ -// from_bd_to_10x_molecular_barcode_tags 0.12.3 +// from_bd_to_10x_molecular_barcode_tags 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "from_bd_to_10x_molecular_barcode_tags", "namespace" : "convert", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -222,9 +222,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/nextflow.config b/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/nextflow.config index 0c032b2e72f..d2d37f774ed 100644 --- a/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/nextflow.config +++ b/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'from_bd_to_10x_molecular_barcode_tags' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Convert the molecular barcode sequence SAM tag from BD format (MA) to 10X format (UB).\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/nextflow_schema.json b/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/nextflow_schema.json index 76c9c72277d..14124b81b14 100644 --- a/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/nextflow_schema.json +++ b/target/nextflow/convert/from_bd_to_10x_molecular_barcode_tags/nextflow_schema.json @@ -1,71 +1,102 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "from_bd_to_10x_molecular_barcode_tags", - "description": "Convert the molecular barcode sequence SAM tag from BD format (MA) to 10X format (UB).\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "from_bd_to_10x_molecular_barcode_tags", +"description": "Convert the molecular barcode sequence SAM tag from BD format (MA) to 10X format (UB).\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.bam`. Input SAM or BAM file", - "help_text": "Type: `file`, required, example: `input.bam`. Input SAM or BAM file." - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.sam`, example: `output.sam`. Output alignment file", - "help_text": "Type: `file`, default: `$id.$key.output.sam`, example: `output.sam`. Output alignment file.", - "default": "$id.$key.output.sam" - }, - - "bam": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Output a BAM file", - "help_text": "Type: `boolean_true`, default: `false`. Output a BAM file.", - "default": "False" - }, - - "threads": { - "type": "integer", - "description": "Type: `integer`. Number of threads", - "help_text": "Type: `integer`. Number of threads" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.bam`. Input SAM or BAM file", + "help_text": "Type: `file`, required, example: `input.bam`. Input SAM or BAM file." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.sam`, example: `output.sam`. Output alignment file", + "help_text": "Type: `file`, default: `$id.$key.output.sam`, example: `output.sam`. Output alignment file." + , + "default": "$id.$key.output.sam" + } + + + , + "bam": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Output a BAM file", + "help_text": "Type: `boolean_true`, default: `false`. Output a BAM file." + , + "default": "False" + } + + + , + "threads": { + "type": + "integer", + "description": "Type: `integer`. Number of threads", + "help_text": "Type: `integer`. Number of threads" + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/convert/from_bdrhap_to_h5mu/.config.vsh.yaml b/target/nextflow/convert/from_bdrhap_to_h5mu/.config.vsh.yaml index 2eb7adf87f1..6b153d04dbf 100644 --- a/target/nextflow/convert/from_bdrhap_to_h5mu/.config.vsh.yaml +++ b/target/nextflow/convert/from_bdrhap_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_bdrhap_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -176,6 +176,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_bdrhap_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_bdrhap_to_h5mu/from_bdrhap_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/convert/from_bdrhap_to_h5mu/main.nf b/target/nextflow/convert/from_bdrhap_to_h5mu/main.nf index 18e88385f54..b17ff3b49c0 100644 --- a/target/nextflow/convert/from_bdrhap_to_h5mu/main.nf +++ b/target/nextflow/convert/from_bdrhap_to_h5mu/main.nf @@ -1,4 +1,4 @@ -// from_bdrhap_to_h5mu 0.12.3 +// from_bdrhap_to_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "from_bdrhap_to_h5mu", "namespace" : "convert", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -261,9 +261,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_bdrhap_to_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/convert/from_bdrhap_to_h5mu/nextflow.config b/target/nextflow/convert/from_bdrhap_to_h5mu/nextflow.config index 17c07a7c158..d7e032ca5aa 100644 --- a/target/nextflow/convert/from_bdrhap_to_h5mu/nextflow.config +++ b/target/nextflow/convert/from_bdrhap_to_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'from_bdrhap_to_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Convert the output of a BD Rhapsody WTA pipeline to a MuData h5 file.\n' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/convert/from_bdrhap_to_h5mu/nextflow_schema.json b/target/nextflow/convert/from_bdrhap_to_h5mu/nextflow_schema.json index 54cf703bb73..58616764b07 100644 --- a/target/nextflow/convert/from_bdrhap_to_h5mu/nextflow_schema.json +++ b/target/nextflow/convert/from_bdrhap_to_h5mu/nextflow_schema.json @@ -1,83 +1,117 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "from_bdrhap_to_h5mu", - "description": "Convert the output of a BD Rhapsody WTA pipeline to a MuData h5 file.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "from_bdrhap_to_h5mu", +"description": "Convert the output of a BD Rhapsody WTA pipeline to a MuData h5 file.\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "id": { - "type": "string", - "description": "Type: `string`, required, example: `my_id`. A sample ID", - "help_text": "Type: `string`, required, example: `my_id`. A sample ID." - }, - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input_dir/`. The output of a BD Rhapsody workflow", - "help_text": "Type: `file`, required, example: `input_dir/`. The output of a BD Rhapsody workflow." - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "id": { + "type": + "string", + "description": "Type: `string`, required, example: `my_id`. A sample ID", + "help_text": "Type: `string`, required, example: `my_id`. A sample ID." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input_dir/`. The output of a BD Rhapsody workflow", + "help_text": "Type: `file`, required, example: `input_dir/`. The output of a BD Rhapsody workflow." + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/convert/from_cellranger_multi_to_h5mu/.config.vsh.yaml b/target/nextflow/convert/from_cellranger_multi_to_h5mu/.config.vsh.yaml index 3637e858568..ab95a739da8 100644 --- a/target/nextflow/convert/from_cellranger_multi_to_h5mu/.config.vsh.yaml +++ b/target/nextflow/convert/from_cellranger_multi_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_cellranger_multi_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -185,6 +185,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_cellranger_multi_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_cellranger_multi_to_h5mu/from_cellranger_multi_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/convert/from_cellranger_multi_to_h5mu/main.nf b/target/nextflow/convert/from_cellranger_multi_to_h5mu/main.nf index 51e2c6b7e1c..7dd685c75ea 100644 --- a/target/nextflow/convert/from_cellranger_multi_to_h5mu/main.nf +++ b/target/nextflow/convert/from_cellranger_multi_to_h5mu/main.nf @@ -1,4 +1,4 @@ -// from_cellranger_multi_to_h5mu 0.12.3 +// from_cellranger_multi_to_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "from_cellranger_multi_to_h5mu", "namespace" : "convert", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -263,9 +263,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_cellranger_multi_to_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/convert/from_cellranger_multi_to_h5mu/nextflow.config b/target/nextflow/convert/from_cellranger_multi_to_h5mu/nextflow.config index 3a4fb9b8e62..59e1bd5b075 100644 --- a/target/nextflow/convert/from_cellranger_multi_to_h5mu/nextflow.config +++ b/target/nextflow/convert/from_cellranger_multi_to_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'from_cellranger_multi_to_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Converts the output from cellranger multi to a single .h5mu file.\nBy default, will map the following library type names to modality names:\n - Gene Expression: rna\n - Peaks: atac\n - Antibody Capture: prot\n - VDJ: vdj\n - VDJ-T: vdj_t\n - VDJ-B: vdj_b\n - CRISPR Guide Capture: crispr\n - Multiplexing Capture: hashing\n\nOther library types have their whitepace removed and dashes replaced by\nunderscores to generate the modality name.\n\nCurrently does not allow parsing the output from cell barcode demultiplexing.\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/convert/from_cellranger_multi_to_h5mu/nextflow_schema.json b/target/nextflow/convert/from_cellranger_multi_to_h5mu/nextflow_schema.json index aea87733525..bbe7a4d4618 100644 --- a/target/nextflow/convert/from_cellranger_multi_to_h5mu/nextflow_schema.json +++ b/target/nextflow/convert/from_cellranger_multi_to_h5mu/nextflow_schema.json @@ -1,73 +1,104 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "from_cellranger_multi_to_h5mu", - "description": "Converts the output from cellranger multi to a single .h5mu file.\nBy default, will map the following library type names to modality names:\n - Gene Expression: rna\n - Peaks: atac\n - Antibody Capture: prot\n - VDJ: vdj\n - VDJ-T: vdj_t\n - VDJ-B: vdj_b\n - CRISPR Guide Capture: crispr\n - Multiplexing Capture: hashing\n\nOther library types have their whitepace removed and dashes replaced by\nunderscores to generate the modality name.\n\nCurrently does not allow parsing the output from cell barcode demultiplexing.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "from_cellranger_multi_to_h5mu", +"description": "Converts the output from cellranger multi to a single .h5mu file.\nBy default, will map the following library type names to modality names:\n - Gene Expression: rna\n - Peaks: atac\n - Antibody Capture: prot\n - VDJ: vdj\n - VDJ-T: vdj_t\n - VDJ-B: vdj_b\n - CRISPR Guide Capture: crispr\n - Multiplexing Capture: hashing\n\nOther library types have their whitepace removed and dashes replaced by\nunderscores to generate the modality name.\n\nCurrently does not allow parsing the output from cell barcode demultiplexing.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input_dir_containing_modalities`. Input folder", - "help_text": "Type: `file`, required, example: `input_dir_containing_modalities`. Input folder. Must contain the output from a cellranger multi run." - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input_dir_containing_modalities`. Input folder", + "help_text": "Type: `file`, required, example: `input_dir_containing_modalities`. Input folder. Must contain the output from a cellranger multi run." - }, - - "uns_metrics": { - "type": "string", - "description": "Type: `string`, default: `metrics_cellranger`. Name of the ", - "help_text": "Type: `string`, default: `metrics_cellranger`. Name of the .uns slot under which to QC metrics (if any).", - "default": "metrics_cellranger" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + + + } + + + , + "uns_metrics": { + "type": + "string", + "description": "Type: `string`, default: `metrics_cellranger`. Name of the ", + "help_text": "Type: `string`, default: `metrics_cellranger`. Name of the .uns slot under which to QC metrics (if any)." + , + "default": "metrics_cellranger" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/convert/from_h5ad_to_h5mu/.config.vsh.yaml b/target/nextflow/convert/from_h5ad_to_h5mu/.config.vsh.yaml index debbdefa32d..93c279f3d96 100644 --- a/target/nextflow/convert/from_h5ad_to_h5mu/.config.vsh.yaml +++ b/target/nextflow/convert/from_h5ad_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_h5ad_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -172,6 +172,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_h5ad_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_h5ad_to_h5mu/from_h5ad_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/convert/from_h5ad_to_h5mu/main.nf b/target/nextflow/convert/from_h5ad_to_h5mu/main.nf index db66231f3de..03e89999922 100644 --- a/target/nextflow/convert/from_h5ad_to_h5mu/main.nf +++ b/target/nextflow/convert/from_h5ad_to_h5mu/main.nf @@ -1,4 +1,4 @@ -// from_h5ad_to_h5mu 0.12.3 +// from_h5ad_to_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "from_h5ad_to_h5mu", "namespace" : "convert", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -253,9 +253,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_h5ad_to_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/convert/from_h5ad_to_h5mu/nextflow.config b/target/nextflow/convert/from_h5ad_to_h5mu/nextflow.config index 80292de0c4a..fef430246d9 100644 --- a/target/nextflow/convert/from_h5ad_to_h5mu/nextflow.config +++ b/target/nextflow/convert/from_h5ad_to_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'from_h5ad_to_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Converts a single layer h5ad file into a single MuData object\n' author = 'Dries De Maeyer' } diff --git a/target/nextflow/convert/from_h5ad_to_h5mu/nextflow_schema.json b/target/nextflow/convert/from_h5ad_to_h5mu/nextflow_schema.json index 685e9bedaf3..8c302ca4a51 100644 --- a/target/nextflow/convert/from_h5ad_to_h5mu/nextflow_schema.json +++ b/target/nextflow/convert/from_h5ad_to_h5mu/nextflow_schema.json @@ -1,74 +1,105 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "from_h5ad_to_h5mu", - "description": "Converts a single layer h5ad file into a single MuData object\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "from_h5ad_to_h5mu", +"description": "Converts a single layer h5ad file into a single MuData object\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: List of `file`, required, default: `input.h5ad`, multiple_sep: `\":\"`. Input h5ad files", - "help_text": "Type: List of `file`, required, default: `input.h5ad`, multiple_sep: `\":\"`. Input h5ad files", - "default": "input.h5ad" - }, - - "modality": { - "type": "string", - "description": "Type: List of `string`, default: `rna`, multiple_sep: `\":\"`. ", - "help_text": "Type: List of `string`, default: `rna`, multiple_sep: `\":\"`. ", - "default": "rna" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`. Output MuData file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`. Output MuData file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: List of `file`, required, default: `input.h5ad`, multiple_sep: `\":\"`. Input h5ad files", + "help_text": "Type: List of `file`, required, default: `input.h5ad`, multiple_sep: `\":\"`. Input h5ad files" + , + "default": "input.h5ad" + } + + + , + "modality": { + "type": + "string", + "description": "Type: List of `string`, default: `rna`, multiple_sep: `\":\"`. ", + "help_text": "Type: List of `string`, default: `rna`, multiple_sep: `\":\"`. " + , + "default": "rna" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`. Output MuData file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`. Output MuData file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/convert/from_h5mu_to_h5ad/.config.vsh.yaml b/target/nextflow/convert/from_h5mu_to_h5ad/.config.vsh.yaml index 4625071bb70..4bf16bd4493 100644 --- a/target/nextflow/convert/from_h5mu_to_h5ad/.config.vsh.yaml +++ b/target/nextflow/convert/from_h5mu_to_h5ad/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "from_h5mu_to_h5ad" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -177,6 +177,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_h5mu_to_h5ad" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_h5mu_to_h5ad/from_h5mu_to_h5ad" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/convert/from_h5mu_to_h5ad/main.nf b/target/nextflow/convert/from_h5mu_to_h5ad/main.nf index f96ba6d0319..2ede870aeee 100644 --- a/target/nextflow/convert/from_h5mu_to_h5ad/main.nf +++ b/target/nextflow/convert/from_h5mu_to_h5ad/main.nf @@ -1,4 +1,4 @@ -// from_h5mu_to_h5ad 0.12.3 +// from_h5mu_to_h5ad 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "from_h5mu_to_h5ad", "namespace" : "convert", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -260,9 +260,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/from_h5mu_to_h5ad", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/convert/from_h5mu_to_h5ad/nextflow.config b/target/nextflow/convert/from_h5mu_to_h5ad/nextflow.config index 94c50f449cd..d73324a8309 100644 --- a/target/nextflow/convert/from_h5mu_to_h5ad/nextflow.config +++ b/target/nextflow/convert/from_h5mu_to_h5ad/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'from_h5mu_to_h5ad' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Converts a h5mu file into a h5ad file.\n' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/convert/from_h5mu_to_h5ad/nextflow_schema.json b/target/nextflow/convert/from_h5mu_to_h5ad/nextflow_schema.json index 06fe54649d8..b192426b3e3 100644 --- a/target/nextflow/convert/from_h5mu_to_h5ad/nextflow_schema.json +++ b/target/nextflow/convert/from_h5mu_to_h5ad/nextflow_schema.json @@ -1,75 +1,106 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "from_h5mu_to_h5ad", - "description": "Converts a h5mu file into a h5ad file.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "from_h5mu_to_h5ad", +"description": "Converts a h5mu file into a h5ad file.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, default: `input.h5mu`. Input MuData file", - "help_text": "Type: `file`, required, default: `input.h5mu`. Input MuData file", - "default": "input.h5mu" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5ad`. Output AnnData file", - "help_text": "Type: `file`, default: `$id.$key.output.h5ad`. Output AnnData file.", - "default": "$id.$key.output.h5ad" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, default: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the final h5ad object", - "help_text": "Type: `string`, default: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the final h5ad object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, default: `input.h5mu`. Input MuData file", + "help_text": "Type: `file`, required, default: `input.h5mu`. Input MuData file" , - "default": "gzip" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "input.h5mu" + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5ad`. Output AnnData file", + "help_text": "Type: `file`, default: `$id.$key.output.h5ad`. Output AnnData file." + , + "default": "$id.$key.output.h5ad" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, default: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the final h5ad object", + "help_text": "Type: `string`, default: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the final h5ad object.", + "enum": ["gzip", "lzf"] + + , + "default": "gzip" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/convert/velocyto_to_h5mu/.config.vsh.yaml b/target/nextflow/convert/velocyto_to_h5mu/.config.vsh.yaml index a86e9fae886..61a1cb5d5ff 100644 --- a/target/nextflow/convert/velocyto_to_h5mu/.config.vsh.yaml +++ b/target/nextflow/convert/velocyto_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "velocyto_to_h5mu" namespace: "convert" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -250,6 +250,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/velocyto_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/velocyto_to_h5mu/velocyto_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/convert/velocyto_to_h5mu/main.nf b/target/nextflow/convert/velocyto_to_h5mu/main.nf index bf693750051..42a73828637 100644 --- a/target/nextflow/convert/velocyto_to_h5mu/main.nf +++ b/target/nextflow/convert/velocyto_to_h5mu/main.nf @@ -1,4 +1,4 @@ -// velocyto_to_h5mu 0.12.3 +// velocyto_to_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -29,7 +29,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "velocyto_to_h5mu", "namespace" : "convert", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -357,9 +357,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/convert/velocyto_to_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/convert/velocyto_to_h5mu/nextflow.config b/target/nextflow/convert/velocyto_to_h5mu/nextflow.config index 9dfcb671ec2..520bd2417d6 100644 --- a/target/nextflow/convert/velocyto_to_h5mu/nextflow.config +++ b/target/nextflow/convert/velocyto_to_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'velocyto_to_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Convert a velocyto loom file to a h5mu file.\n\nIf an input h5mu file is also provided, the velocity\nh5ad object will get added to that h5mu instead.\n' author = 'Dries Schaumont, Robrecht Cannoodt, Angela Oliveira Pisco' } diff --git a/target/nextflow/convert/velocyto_to_h5mu/nextflow_schema.json b/target/nextflow/convert/velocyto_to_h5mu/nextflow_schema.json index ea1fdf4fffb..33e9b9f9059 100644 --- a/target/nextflow/convert/velocyto_to_h5mu/nextflow_schema.json +++ b/target/nextflow/convert/velocyto_to_h5mu/nextflow_schema.json @@ -1,111 +1,161 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "velocyto_to_h5mu", - "description": "Convert a velocyto loom file to a h5mu file.\n\nIf an input h5mu file is also provided, the velocity\nh5ad object will get added to that h5mu instead.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "velocyto_to_h5mu", +"description": "Convert a velocyto loom file to a h5mu file.\n\nIf an input h5mu file is also provided, the velocity\nh5ad object will get added to that h5mu instead.\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input_loom": { - "type": "string", - "description": "Type: `file`, required, example: `input.loom`. Path to the input loom file", - "help_text": "Type: `file`, required, example: `input.loom`. Path to the input loom file." - }, - - "input_h5mu": { - "type": "string", - "description": "Type: `file`, example: `input.h5mu`. If a MuData file is provided,", - "help_text": "Type: `file`, example: `input.h5mu`. If a MuData file is provided," - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna_velocity`. The name of the modality to operate on", - "help_text": "Type: `string`, default: `rna_velocity`. The name of the modality to operate on.", - "default": "rna_velocity" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Path to the output MuData file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Path to the output MuData file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input_loom": { + "type": + "string", + "description": "Type: `file`, required, example: `input.loom`. Path to the input loom file", + "help_text": "Type: `file`, required, example: `input.loom`. Path to the input loom file." - }, - - "layer_spliced": { - "type": "string", - "description": "Type: `string`, default: `velo_spliced`. Output layer for the spliced reads", - "help_text": "Type: `string`, default: `velo_spliced`. Output layer for the spliced reads.", - "default": "velo_spliced" - }, - - "layer_unspliced": { - "type": "string", - "description": "Type: `string`, default: `velo_unspliced`. Output layer for the unspliced reads", - "help_text": "Type: `string`, default: `velo_unspliced`. Output layer for the unspliced reads.", - "default": "velo_unspliced" - }, - - "layer_ambiguous": { - "type": "string", - "description": "Type: `string`, default: `velo_ambiguous`. Output layer for the ambiguous reads", - "help_text": "Type: `string`, default: `velo_ambiguous`. Output layer for the ambiguous reads.", - "default": "velo_ambiguous" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "input_h5mu": { + "type": + "string", + "description": "Type: `file`, example: `input.h5mu`. If a MuData file is provided,", + "help_text": "Type: `file`, example: `input.h5mu`. If a MuData file is provided," + + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna_velocity`. The name of the modality to operate on", + "help_text": "Type: `string`, default: `rna_velocity`. The name of the modality to operate on." + , + "default": "rna_velocity" + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Path to the output MuData file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Path to the output MuData file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "layer_spliced": { + "type": + "string", + "description": "Type: `string`, default: `velo_spliced`. Output layer for the spliced reads", + "help_text": "Type: `string`, default: `velo_spliced`. Output layer for the spliced reads." + , + "default": "velo_spliced" + } + + + , + "layer_unspliced": { + "type": + "string", + "description": "Type: `string`, default: `velo_unspliced`. Output layer for the unspliced reads", + "help_text": "Type: `string`, default: `velo_unspliced`. Output layer for the unspliced reads." + , + "default": "velo_unspliced" + } + + + , + "layer_ambiguous": { + "type": + "string", + "description": "Type: `string`, default: `velo_ambiguous`. Output layer for the ambiguous reads", + "help_text": "Type: `string`, default: `velo_ambiguous`. Output layer for the ambiguous reads." + , + "default": "velo_ambiguous" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/correction/cellbender_remove_background/.config.vsh.yaml b/target/nextflow/correction/cellbender_remove_background/.config.vsh.yaml index e07121119b5..cb466880229 100644 --- a/target/nextflow/correction/cellbender_remove_background/.config.vsh.yaml +++ b/target/nextflow/correction/cellbender_remove_background/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellbender_remove_background" namespace: "correction" - version: "0.12.3" + version: "0.12.4" argument_groups: - name: "Inputs" arguments: @@ -632,6 +632,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/correction/cellbender_remove_background" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/correction/cellbender_remove_background/cellbender_remove_background" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/correction/cellbender_remove_background/main.nf b/target/nextflow/correction/cellbender_remove_background/main.nf index 7fea2cc19c3..cb704b48201 100644 --- a/target/nextflow/correction/cellbender_remove_background/main.nf +++ b/target/nextflow/correction/cellbender_remove_background/main.nf @@ -1,4 +1,4 @@ -// cellbender_remove_background 0.12.3 +// cellbender_remove_background 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -24,7 +24,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "cellbender_remove_background", "namespace" : "correction", - "version" : "0.12.3", + "version" : "0.12.4", "argument_groups" : [ { "name" : "Inputs", @@ -706,9 +706,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/correction/cellbender_remove_background", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/correction/cellbender_remove_background/nextflow.config b/target/nextflow/correction/cellbender_remove_background/nextflow.config index 5576a3d41f7..a6f4c6ec272 100644 --- a/target/nextflow/correction/cellbender_remove_background/nextflow.config +++ b/target/nextflow/correction/cellbender_remove_background/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'cellbender_remove_background' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Eliminating technical artifacts from high-throughput single-cell RNA sequencing data.\n\nThis module removes counts due to ambient RNA molecules and random barcode swapping from (raw) UMI-based scRNA-seq count matrices. \nAt the moment, only the count matrices produced by the CellRanger count pipeline is supported. Support for additional tools and protocols \nwill be added in the future. A quick start tutorial can be found here.\n\nFleming et al. 2022, bioRxiv.\n' } diff --git a/target/nextflow/correction/cellbender_remove_background/nextflow_schema.json b/target/nextflow/correction/cellbender_remove_background/nextflow_schema.json index 2d91eda68a5..10ec3f879d5 100644 --- a/target/nextflow/correction/cellbender_remove_background/nextflow_schema.json +++ b/target/nextflow/correction/cellbender_remove_background/nextflow_schema.json @@ -1,355 +1,544 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "cellbender_remove_background", - "description": "Eliminating technical artifacts from high-throughput single-cell RNA sequencing data.\n\nThis module removes counts due to ambient RNA molecules and random barcode swapping from (raw) UMI-based scRNA-seq count matrices. \nAt the moment, only the count matrices produced by the CellRanger count pipeline is supported. Support for additional tools and protocols \nwill be added in the future. A quick start tutorial can be found here.\n\nFleming et al. 2022, bioRxiv.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "cellbender_remove_background", +"description": "Eliminating technical artifacts from high-throughput single-cell RNA sequencing data.\n\nThis module removes counts due to ambient RNA molecules and random barcode swapping from (raw) UMI-based scRNA-seq count matrices. \nAt the moment, only the count matrices produced by the CellRanger count pipeline is supported. Support for additional tools and protocols \nwill be added in the future. A quick start tutorial can be found here.\n\nFleming et al. 2022, bioRxiv.\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file. Data file on which to run tool. Data must be un-filtered: it should include empty droplets." - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. List of modalities to process", - "help_text": "Type: `string`, default: `rna`. List of modalities to process.", - "default": "rna" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Full count matrix as an h5mu file, with background RNA removed", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Full count matrix as an h5mu file, with background RNA removed. This file contains all the original droplet barcodes.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file. Data file on which to run tool. Data must be un-filtered: it should include empty droplets." - }, - - "layer_output": { - "type": "string", - "description": "Type: `string`, default: `cellbender_corrected`. Output layer", - "help_text": "Type: `string`, default: `cellbender_corrected`. Output layer", - "default": "cellbender_corrected" - }, - - "obs_background_fraction": { - "type": "string", - "description": "Type: `string`, default: `cellbender_background_fraction`. ", - "help_text": "Type: `string`, default: `cellbender_background_fraction`. ", - "default": "cellbender_background_fraction" - }, - - "obs_cell_probability": { - "type": "string", - "description": "Type: `string`, default: `cellbender_cell_probability`. ", - "help_text": "Type: `string`, default: `cellbender_cell_probability`. ", - "default": "cellbender_cell_probability" - }, - - "obs_cell_size": { - "type": "string", - "description": "Type: `string`, default: `cellbender_cell_size`. ", - "help_text": "Type: `string`, default: `cellbender_cell_size`. ", - "default": "cellbender_cell_size" - }, - - "obs_droplet_efficiency": { - "type": "string", - "description": "Type: `string`, default: `cellbender_droplet_efficiency`. ", - "help_text": "Type: `string`, default: `cellbender_droplet_efficiency`. ", - "default": "cellbender_droplet_efficiency" - }, - - "obs_latent_scale": { - "type": "string", - "description": "Type: `string`, default: `cellbender_latent_scale`. ", - "help_text": "Type: `string`, default: `cellbender_latent_scale`. ", - "default": "cellbender_latent_scale" - }, - - "var_ambient_expression": { - "type": "string", - "description": "Type: `string`, default: `cellbender_ambient_expression`. ", - "help_text": "Type: `string`, default: `cellbender_ambient_expression`. ", - "default": "cellbender_ambient_expression" - }, - - "obsm_gene_expression_encoding": { - "type": "string", - "description": "Type: `string`, default: `cellbender_gene_expression_encoding`. ", - "help_text": "Type: `string`, default: `cellbender_gene_expression_encoding`. ", - "default": "cellbender_gene_expression_encoding" - } - - } - }, - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "expected_cells_from_qc": { - "type": "boolean", - "description": "Type: `boolean`, default: `false`. Will use the Cell Ranger QC to determine the estimated number of cells", - "help_text": "Type: `boolean`, default: `false`. Will use the Cell Ranger QC to determine the estimated number of cells", - "default": "False" - }, - - "expected_cells": { - "type": "integer", - "description": "Type: `integer`, example: `1000`. Number of cells expected in the dataset (a rough estimate within a factor of 2 is sufficient)", - "help_text": "Type: `integer`, example: `1000`. Number of cells expected in the dataset (a rough estimate within a factor of 2 is sufficient)." - }, - - "total_droplets_included": { - "type": "integer", - "description": "Type: `integer`, example: `25000`. The number of droplets from the rank-ordered UMI plot\nthat will have their cell probabilities inferred as an\noutput", - "help_text": "Type: `integer`, example: `25000`. The number of droplets from the rank-ordered UMI plot\nthat will have their cell probabilities inferred as an\noutput. Include the droplets which might contain cells.\nDroplets beyond TOTAL_DROPLETS_INCLUDED should be\n\u0027surely empty\u0027 droplets.\n" - }, - - "force_cell_umi_prior": { - "type": "integer", - "description": "Type: `integer`. Ignore CellBender\u0027s heuristic prior estimation, and use this prior for UMI counts in cells", - "help_text": "Type: `integer`. Ignore CellBender\u0027s heuristic prior estimation, and use this prior for UMI counts in cells." - }, - - "force_empty_umi_prior": { - "type": "integer", - "description": "Type: `integer`. Ignore CellBender\u0027s heuristic prior estimation, and use this prior for UMI counts in empty droplets", - "help_text": "Type: `integer`. Ignore CellBender\u0027s heuristic prior estimation, and use this prior for UMI counts in empty droplets." - }, - - "model": { - "type": "string", - "description": "Type: `string`, default: `full`, choices: ``naive`, `simple`, `ambient`, `swapping`, `full``. Which model is being used for count data", - "help_text": "Type: `string`, default: `full`, choices: ``naive`, `simple`, `ambient`, `swapping`, `full``. Which model is being used for count data.\n\n* \u0027naive\u0027 subtracts the estimated ambient profile.\n* \u0027simple\u0027 does not model either ambient RNA or random barcode swapping (for debugging purposes -- not recommended).\n* \u0027ambient\u0027 assumes background RNA is incorporated into droplets.\n* \u0027swapping\u0027 assumes background RNA comes from random barcode swapping (via PCR chimeras).\n* \u0027full\u0027 uses a combined ambient and swapping model.\n", - "enum": ["naive", "simple", "ambient", "swapping", "full"] + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. List of modalities to process", + "help_text": "Type: `string`, default: `rna`. List of modalities to process." , - "default": "full" - }, - - "epochs": { - "type": "integer", - "description": "Type: `integer`, default: `150`. Number of epochs to train", - "help_text": "Type: `integer`, default: `150`. Number of epochs to train.", - "default": "150" - }, - - "low_count_threshold": { - "type": "integer", - "description": "Type: `integer`, default: `5`. Droplets with UMI counts below this number are completely \nexcluded from the analysis", - "help_text": "Type: `integer`, default: `5`. Droplets with UMI counts below this number are completely \nexcluded from the analysis. This can help identify the correct \nprior for empty droplet counts in the rare case where empty \ncounts are extremely high (over 200).\n", - "default": "5" - }, - - "z_dim": { - "type": "integer", - "description": "Type: `integer`, default: `64`. Dimension of latent variable z", - "help_text": "Type: `integer`, default: `64`. Dimension of latent variable z.\n", - "default": "64" - }, - - "z_layers": { - "type": "string", - "description": "Type: List of `integer`, default: `512`, multiple_sep: `\":\"`. Dimension of hidden layers in the encoder for z", - "help_text": "Type: List of `integer`, default: `512`, multiple_sep: `\":\"`. Dimension of hidden layers in the encoder for z.\n", - "default": "512" - }, - - "training_fraction": { - "type": "number", - "description": "Type: `double`, default: `0.9`. Training detail: the fraction of the data used for training", - "help_text": "Type: `double`, default: `0.9`. Training detail: the fraction of the data used for training.\nThe rest is never seen by the inference algorithm. Speeds up learning.\n", - "default": "0.9" - }, - - "empty_drop_training_fraction": { - "type": "number", - "description": "Type: `double`, default: `0.2`. Training detail: the fraction of the training data each epoch that \nis drawn (randomly sampled) from surely empty droplets", - "help_text": "Type: `double`, default: `0.2`. Training detail: the fraction of the training data each epoch that \nis drawn (randomly sampled) from surely empty droplets.\n", - "default": "0.2" - }, - - "ignore_features": { - "type": "string", - "description": "Type: List of `integer`, multiple_sep: `\":\"`. Integer indices of features to ignore entirely", - "help_text": "Type: List of `integer`, multiple_sep: `\":\"`. Integer indices of features to ignore entirely. In the output\ncount matrix, the counts for these features will be unchanged.\n" - }, - - "fpr": { - "type": "string", - "description": "Type: List of `double`, default: `0.01`, multiple_sep: `\":\"`. Target \u0027delta\u0027 false positive rate in [0, 1)", - "help_text": "Type: List of `double`, default: `0.01`, multiple_sep: `\":\"`. Target \u0027delta\u0027 false positive rate in [0, 1). Use 0 for a cohort\nof samples which will be jointly analyzed for differential expression.\nA false positive is a true signal count that is erroneously removed.\nMore background removal is accompanied by more signal removal at\nhigh values of FPR. You can specify multiple values, which will\ncreate multiple output files.\n", - "default": "0.01" - }, - - "exclude_feature_types": { - "type": "string", - "description": "Type: List of `string`, multiple_sep: `\":\"`. Feature types to ignore during the analysis", - "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Feature types to ignore during the analysis. These features will\nbe left unchanged in the output file.\n" - }, - - "projected_ambient_count_threshold": { - "type": "number", - "description": "Type: `double`, default: `0.1`. Controls how many features are included in the analysis, which\ncan lead to a large speedup", - "help_text": "Type: `double`, default: `0.1`. Controls how many features are included in the analysis, which\ncan lead to a large speedup. If a feature is expected to have less\nthan PROJECTED_AMBIENT_COUNT_THRESHOLD counts total in all cells\n(summed), then that gene is excluded, and it will be unchanged\nin the output count matrix. For example, \nPROJECTED_AMBIENT_COUNT_THRESHOLD = 0 will include all features\nwhich have even a single count in any empty droplet.\n", - "default": "0.1" - }, - - "learning_rate": { - "type": "number", - "description": "Type: `double`, default: `1.0E-4`. Training detail: lower learning rate for inference", - "help_text": "Type: `double`, default: `1.0E-4`. Training detail: lower learning rate for inference.\nA OneCycle learning rate schedule is used, where the\nupper learning rate is ten times this value. (For this\nvalue, probably do not exceed 1e-3).\n", - "default": "0.0001" - }, - - "final_elbo_fail_fraction": { - "type": "number", - "description": "Type: `double`. Training is considered to have failed if \n(best_test_ELBO - final_test_ELBO)/(best_test_ELBO - initial_test_ELBO) \u003e FINAL_ELBO_FAIL_FRACTION", - "help_text": "Type: `double`. Training is considered to have failed if \n(best_test_ELBO - final_test_ELBO)/(best_test_ELBO - initial_test_ELBO) \u003e FINAL_ELBO_FAIL_FRACTION.\nTraining will automatically re-run if --num-training-tries \u003e 1.\nBy default, will not fail training based on final_training_ELBO.\n" - }, - - "epoch_elbo_fail_fraction": { - "type": "number", - "description": "Type: `double`. Training is considered to have failed if \n(previous_epoch_test_ELBO - current_epoch_test_ELBO)/(previous_epoch_test_ELBO - initial_train_ELBO) \u003e EPOCH_ELBO_FAIL_FRACTION", - "help_text": "Type: `double`. Training is considered to have failed if \n(previous_epoch_test_ELBO - current_epoch_test_ELBO)/(previous_epoch_test_ELBO - initial_train_ELBO) \u003e EPOCH_ELBO_FAIL_FRACTION.\nTraining will automatically re-run if --num-training-tries \u003e 1.\nBy default, will not fail training based on epoch_training_ELBO.\n" - }, - - "num_training_tries": { - "type": "integer", - "description": "Type: `integer`, default: `1`. Number of times to attempt to train the model", - "help_text": "Type: `integer`, default: `1`. Number of times to attempt to train the model. At each subsequent attempt,\nthe learning rate is multiplied by LEARNING_RATE_RETRY_MULT.\n", - "default": "1" - }, - - "learning_rate_retry_mult": { - "type": "number", - "description": "Type: `double`, default: `0.2`. Learning rate is multiplied by this amount each time a new training\nattempt is made", - "help_text": "Type: `double`, default: `0.2`. Learning rate is multiplied by this amount each time a new training\nattempt is made. (This parameter is only used if training fails based\non EPOCH_ELBO_FAIL_FRACTION or FINAL_ELBO_FAIL_FRACTION and\nNUM_TRAINING_TRIES is \u003e 1.) \n", - "default": "0.2" - }, - - "posterior_batch_size": { - "type": "integer", - "description": "Type: `integer`, default: `128`. Training detail: size of batches when creating the posterior", - "help_text": "Type: `integer`, default: `128`. Training detail: size of batches when creating the posterior.\nReduce this to avoid running out of GPU memory creating the posterior\n(will be slower).\n", - "default": "128" - }, - - "posterior_regulation": { - "type": "string", - "description": "Type: `string`, choices: ``PRq`, `PRmu`, `PRmu_gene``. Posterior regularization method", - "help_text": "Type: `string`, choices: ``PRq`, `PRmu`, `PRmu_gene``. Posterior regularization method. (For experts: not required for normal usage,\nsee documentation). \n\n* PRq is approximate quantile-targeting.\n* PRmu is approximate mean-targeting aggregated over genes (behavior of v0.2.0).\n* PRmu_gene is approximate mean-targeting per gene.\n", - "enum": ["PRq", "PRmu", "PRmu_gene"] + "default": "rna" + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Full count matrix as an h5mu file, with background RNA removed", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Full count matrix as an h5mu file, with background RNA removed. This file contains all the original droplet barcodes." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + + + } + + + , + "layer_output": { + "type": + "string", + "description": "Type: `string`, default: `cellbender_corrected`. Output layer", + "help_text": "Type: `string`, default: `cellbender_corrected`. Output layer" + , + "default": "cellbender_corrected" + } + + + , + "obs_background_fraction": { + "type": + "string", + "description": "Type: `string`, default: `cellbender_background_fraction`. ", + "help_text": "Type: `string`, default: `cellbender_background_fraction`. " + , + "default": "cellbender_background_fraction" + } + + + , + "obs_cell_probability": { + "type": + "string", + "description": "Type: `string`, default: `cellbender_cell_probability`. ", + "help_text": "Type: `string`, default: `cellbender_cell_probability`. " + , + "default": "cellbender_cell_probability" + } + + + , + "obs_cell_size": { + "type": + "string", + "description": "Type: `string`, default: `cellbender_cell_size`. ", + "help_text": "Type: `string`, default: `cellbender_cell_size`. " + , + "default": "cellbender_cell_size" + } + + + , + "obs_droplet_efficiency": { + "type": + "string", + "description": "Type: `string`, default: `cellbender_droplet_efficiency`. ", + "help_text": "Type: `string`, default: `cellbender_droplet_efficiency`. " + , + "default": "cellbender_droplet_efficiency" + } + + + , + "obs_latent_scale": { + "type": + "string", + "description": "Type: `string`, default: `cellbender_latent_scale`. ", + "help_text": "Type: `string`, default: `cellbender_latent_scale`. " + , + "default": "cellbender_latent_scale" + } + + + , + "var_ambient_expression": { + "type": + "string", + "description": "Type: `string`, default: `cellbender_ambient_expression`. ", + "help_text": "Type: `string`, default: `cellbender_ambient_expression`. " + , + "default": "cellbender_ambient_expression" + } + + + , + "obsm_gene_expression_encoding": { + "type": + "string", + "description": "Type: `string`, default: `cellbender_gene_expression_encoding`. ", + "help_text": "Type: `string`, default: `cellbender_gene_expression_encoding`. " + , + "default": "cellbender_gene_expression_encoding" + } + + +} +}, + + + "arguments" : { + "title": "Arguments", + "type": "object", + "description": "No description", + "properties": { + + + "expected_cells_from_qc": { + "type": + "boolean", + "description": "Type: `boolean`, default: `false`. Will use the Cell Ranger QC to determine the estimated number of cells", + "help_text": "Type: `boolean`, default: `false`. Will use the Cell Ranger QC to determine the estimated number of cells" + , + "default": "False" + } + + + , + "expected_cells": { + "type": + "integer", + "description": "Type: `integer`, example: `1000`. Number of cells expected in the dataset (a rough estimate within a factor of 2 is sufficient)", + "help_text": "Type: `integer`, example: `1000`. Number of cells expected in the dataset (a rough estimate within a factor of 2 is sufficient)." + + } + + + , + "total_droplets_included": { + "type": + "integer", + "description": "Type: `integer`, example: `25000`. The number of droplets from the rank-ordered UMI plot\nthat will have their cell probabilities inferred as an\noutput", + "help_text": "Type: `integer`, example: `25000`. The number of droplets from the rank-ordered UMI plot\nthat will have their cell probabilities inferred as an\noutput. Include the droplets which might contain cells.\nDroplets beyond TOTAL_DROPLETS_INCLUDED should be\n\u0027surely empty\u0027 droplets.\n" + + } + + + , + "force_cell_umi_prior": { + "type": + "integer", + "description": "Type: `integer`. Ignore CellBender\u0027s heuristic prior estimation, and use this prior for UMI counts in cells", + "help_text": "Type: `integer`. Ignore CellBender\u0027s heuristic prior estimation, and use this prior for UMI counts in cells." + + } + + + , + "force_empty_umi_prior": { + "type": + "integer", + "description": "Type: `integer`. Ignore CellBender\u0027s heuristic prior estimation, and use this prior for UMI counts in empty droplets", + "help_text": "Type: `integer`. Ignore CellBender\u0027s heuristic prior estimation, and use this prior for UMI counts in empty droplets." + + } + + + , + "model": { + "type": + "string", + "description": "Type: `string`, default: `full`, choices: ``naive`, `simple`, `ambient`, `swapping`, `full``. Which model is being used for count data", + "help_text": "Type: `string`, default: `full`, choices: ``naive`, `simple`, `ambient`, `swapping`, `full``. Which model is being used for count data.\n\n* \u0027naive\u0027 subtracts the estimated ambient profile.\n* \u0027simple\u0027 does not model either ambient RNA or random barcode swapping (for debugging purposes -- not recommended).\n* \u0027ambient\u0027 assumes background RNA is incorporated into droplets.\n* \u0027swapping\u0027 assumes background RNA comes from random barcode swapping (via PCR chimeras).\n* \u0027full\u0027 uses a combined ambient and swapping model.\n", + "enum": ["naive", "simple", "ambient", "swapping", "full"] + + , + "default": "full" + } + + + , + "epochs": { + "type": + "integer", + "description": "Type: `integer`, default: `150`. Number of epochs to train", + "help_text": "Type: `integer`, default: `150`. Number of epochs to train." + , + "default": "150" + } + + + , + "low_count_threshold": { + "type": + "integer", + "description": "Type: `integer`, default: `5`. Droplets with UMI counts below this number are completely \nexcluded from the analysis", + "help_text": "Type: `integer`, default: `5`. Droplets with UMI counts below this number are completely \nexcluded from the analysis. This can help identify the correct \nprior for empty droplet counts in the rare case where empty \ncounts are extremely high (over 200).\n" + , + "default": "5" + } + + + , + "z_dim": { + "type": + "integer", + "description": "Type: `integer`, default: `64`. Dimension of latent variable z", + "help_text": "Type: `integer`, default: `64`. Dimension of latent variable z.\n" + , + "default": "64" + } + + + , + "z_layers": { + "type": + "string", + "description": "Type: List of `integer`, default: `512`, multiple_sep: `\":\"`. Dimension of hidden layers in the encoder for z", + "help_text": "Type: List of `integer`, default: `512`, multiple_sep: `\":\"`. Dimension of hidden layers in the encoder for z.\n" + , + "default": "512" + } + + + , + "training_fraction": { + "type": + "number", + "description": "Type: `double`, default: `0.9`. Training detail: the fraction of the data used for training", + "help_text": "Type: `double`, default: `0.9`. Training detail: the fraction of the data used for training.\nThe rest is never seen by the inference algorithm. Speeds up learning.\n" + , + "default": "0.9" + } + + + , + "empty_drop_training_fraction": { + "type": + "number", + "description": "Type: `double`, default: `0.2`. Training detail: the fraction of the training data each epoch that \nis drawn (randomly sampled) from surely empty droplets", + "help_text": "Type: `double`, default: `0.2`. Training detail: the fraction of the training data each epoch that \nis drawn (randomly sampled) from surely empty droplets.\n" + , + "default": "0.2" + } + + + , + "ignore_features": { + "type": + "string", + "description": "Type: List of `integer`, multiple_sep: `\":\"`. Integer indices of features to ignore entirely", + "help_text": "Type: List of `integer`, multiple_sep: `\":\"`. Integer indices of features to ignore entirely. In the output\ncount matrix, the counts for these features will be unchanged.\n" + + } + + + , + "fpr": { + "type": + "string", + "description": "Type: List of `double`, default: `0.01`, multiple_sep: `\":\"`. Target \u0027delta\u0027 false positive rate in [0, 1)", + "help_text": "Type: List of `double`, default: `0.01`, multiple_sep: `\":\"`. Target \u0027delta\u0027 false positive rate in [0, 1). Use 0 for a cohort\nof samples which will be jointly analyzed for differential expression.\nA false positive is a true signal count that is erroneously removed.\nMore background removal is accompanied by more signal removal at\nhigh values of FPR. You can specify multiple values, which will\ncreate multiple output files.\n" + , + "default": "0.01" + } + + + , + "exclude_feature_types": { + "type": + "string", + "description": "Type: List of `string`, multiple_sep: `\":\"`. Feature types to ignore during the analysis", + "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Feature types to ignore during the analysis. These features will\nbe left unchanged in the output file.\n" + + } + + + , + "projected_ambient_count_threshold": { + "type": + "number", + "description": "Type: `double`, default: `0.1`. Controls how many features are included in the analysis, which\ncan lead to a large speedup", + "help_text": "Type: `double`, default: `0.1`. Controls how many features are included in the analysis, which\ncan lead to a large speedup. If a feature is expected to have less\nthan PROJECTED_AMBIENT_COUNT_THRESHOLD counts total in all cells\n(summed), then that gene is excluded, and it will be unchanged\nin the output count matrix. For example, \nPROJECTED_AMBIENT_COUNT_THRESHOLD = 0 will include all features\nwhich have even a single count in any empty droplet.\n" + , + "default": "0.1" + } + + + , + "learning_rate": { + "type": + "number", + "description": "Type: `double`, default: `1.0E-4`. Training detail: lower learning rate for inference", + "help_text": "Type: `double`, default: `1.0E-4`. Training detail: lower learning rate for inference.\nA OneCycle learning rate schedule is used, where the\nupper learning rate is ten times this value. (For this\nvalue, probably do not exceed 1e-3).\n" + , + "default": "0.0001" + } + + + , + "final_elbo_fail_fraction": { + "type": + "number", + "description": "Type: `double`. Training is considered to have failed if \n(best_test_ELBO - final_test_ELBO)/(best_test_ELBO - initial_test_ELBO) \u003e FINAL_ELBO_FAIL_FRACTION", + "help_text": "Type: `double`. Training is considered to have failed if \n(best_test_ELBO - final_test_ELBO)/(best_test_ELBO - initial_test_ELBO) \u003e FINAL_ELBO_FAIL_FRACTION.\nTraining will automatically re-run if --num-training-tries \u003e 1.\nBy default, will not fail training based on final_training_ELBO.\n" + + } + + + , + "epoch_elbo_fail_fraction": { + "type": + "number", + "description": "Type: `double`. Training is considered to have failed if \n(previous_epoch_test_ELBO - current_epoch_test_ELBO)/(previous_epoch_test_ELBO - initial_train_ELBO) \u003e EPOCH_ELBO_FAIL_FRACTION", + "help_text": "Type: `double`. Training is considered to have failed if \n(previous_epoch_test_ELBO - current_epoch_test_ELBO)/(previous_epoch_test_ELBO - initial_train_ELBO) \u003e EPOCH_ELBO_FAIL_FRACTION.\nTraining will automatically re-run if --num-training-tries \u003e 1.\nBy default, will not fail training based on epoch_training_ELBO.\n" - }, - - "alpha": { - "type": "number", - "description": "Type: `double`. Tunable parameter alpha for the PRq posterior regularization method\n(not normally used: see documentation)", - "help_text": "Type: `double`. Tunable parameter alpha for the PRq posterior regularization method\n(not normally used: see documentation).\n" - }, - - "q": { - "type": "number", - "description": "Type: `double`. Tunable parameter q for the CDF threshold estimation method (not\nnormally used: see documentation)", - "help_text": "Type: `double`. Tunable parameter q for the CDF threshold estimation method (not\nnormally used: see documentation).\n" - }, - - "estimator": { - "type": "string", - "description": "Type: `string`, default: `mckp`, choices: ``map`, `mean`, `cdf`, `sample`, `mckp``. Output denoised count estimation method", - "help_text": "Type: `string`, default: `mckp`, choices: ``map`, `mean`, `cdf`, `sample`, `mckp``. Output denoised count estimation method. (For experts: not required\nfor normal usage, see documentation).\n", - "enum": ["map", "mean", "cdf", "sample", "mckp"] + } + + + , + "num_training_tries": { + "type": + "integer", + "description": "Type: `integer`, default: `1`. Number of times to attempt to train the model", + "help_text": "Type: `integer`, default: `1`. Number of times to attempt to train the model. At each subsequent attempt,\nthe learning rate is multiplied by LEARNING_RATE_RETRY_MULT.\n" , - "default": "mckp" - }, - - "estimator_multiple_cpu": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Including the flag --estimator-multiple-cpu will use more than one\nCPU to compute the MCKP output count estimator in parallel (does nothing\nfor other estimators)", - "help_text": "Type: `boolean_true`, default: `false`. Including the flag --estimator-multiple-cpu will use more than one\nCPU to compute the MCKP output count estimator in parallel (does nothing\nfor other estimators).\n", - "default": "False" - }, - - "constant_learning_rate": { - "type": "boolean", - "description": "Type: `boolean`. Including the flag --constant-learning-rate will use the ClippedAdam\noptimizer instead of the OneCycleLR learning rate schedule, which is\nthe default", - "help_text": "Type: `boolean`. Including the flag --constant-learning-rate will use the ClippedAdam\noptimizer instead of the OneCycleLR learning rate schedule, which is\nthe default. Learning is faster with the OneCycleLR schedule.\nHowever, training can easily be continued from a checkpoint for more\nepochs than the initial command specified when using ClippedAdam. On\nthe other hand, if using the OneCycleLR schedule with 150 epochs\nspecified, it is not possible to pick up from that final checkpoint\nand continue training until 250 epochs.\n" - }, - - "debug": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Including the flag --debug will log extra messages useful for debugging", - "help_text": "Type: `boolean_true`, default: `false`. Including the flag --debug will log extra messages useful for debugging.\n", - "default": "False" - }, - - "cuda": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Including the flag --cuda will run the inference on a\nGPU", - "help_text": "Type: `boolean_true`, default: `false`. Including the flag --cuda will run the inference on a\nGPU.\n", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "1" + } + + + , + "learning_rate_retry_mult": { + "type": + "number", + "description": "Type: `double`, default: `0.2`. Learning rate is multiplied by this amount each time a new training\nattempt is made", + "help_text": "Type: `double`, default: `0.2`. Learning rate is multiplied by this amount each time a new training\nattempt is made. (This parameter is only used if training fails based\non EPOCH_ELBO_FAIL_FRACTION or FINAL_ELBO_FAIL_FRACTION and\nNUM_TRAINING_TRIES is \u003e 1.) \n" + , + "default": "0.2" + } + + + , + "posterior_batch_size": { + "type": + "integer", + "description": "Type: `integer`, default: `128`. Training detail: size of batches when creating the posterior", + "help_text": "Type: `integer`, default: `128`. Training detail: size of batches when creating the posterior.\nReduce this to avoid running out of GPU memory creating the posterior\n(will be slower).\n" + , + "default": "128" + } + + + , + "posterior_regulation": { + "type": + "string", + "description": "Type: `string`, choices: ``PRq`, `PRmu`, `PRmu_gene``. Posterior regularization method", + "help_text": "Type: `string`, choices: ``PRq`, `PRmu`, `PRmu_gene``. Posterior regularization method. (For experts: not required for normal usage,\nsee documentation). \n\n* PRq is approximate quantile-targeting.\n* PRmu is approximate mean-targeting aggregated over genes (behavior of v0.2.0).\n* PRmu_gene is approximate mean-targeting per gene.\n", + "enum": ["PRq", "PRmu", "PRmu_gene"] + + + } + + + , + "alpha": { + "type": + "number", + "description": "Type: `double`. Tunable parameter alpha for the PRq posterior regularization method\n(not normally used: see documentation)", + "help_text": "Type: `double`. Tunable parameter alpha for the PRq posterior regularization method\n(not normally used: see documentation).\n" + + } + + + , + "q": { + "type": + "number", + "description": "Type: `double`. Tunable parameter q for the CDF threshold estimation method (not\nnormally used: see documentation)", + "help_text": "Type: `double`. Tunable parameter q for the CDF threshold estimation method (not\nnormally used: see documentation).\n" + + } + + + , + "estimator": { + "type": + "string", + "description": "Type: `string`, default: `mckp`, choices: ``map`, `mean`, `cdf`, `sample`, `mckp``. Output denoised count estimation method", + "help_text": "Type: `string`, default: `mckp`, choices: ``map`, `mean`, `cdf`, `sample`, `mckp``. Output denoised count estimation method. (For experts: not required\nfor normal usage, see documentation).\n", + "enum": ["map", "mean", "cdf", "sample", "mckp"] + + , + "default": "mckp" + } + + + , + "estimator_multiple_cpu": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Including the flag --estimator-multiple-cpu will use more than one\nCPU to compute the MCKP output count estimator in parallel (does nothing\nfor other estimators)", + "help_text": "Type: `boolean_true`, default: `false`. Including the flag --estimator-multiple-cpu will use more than one\nCPU to compute the MCKP output count estimator in parallel (does nothing\nfor other estimators).\n" + , + "default": "False" + } + + + , + "constant_learning_rate": { + "type": + "boolean", + "description": "Type: `boolean`. Including the flag --constant-learning-rate will use the ClippedAdam\noptimizer instead of the OneCycleLR learning rate schedule, which is\nthe default", + "help_text": "Type: `boolean`. Including the flag --constant-learning-rate will use the ClippedAdam\noptimizer instead of the OneCycleLR learning rate schedule, which is\nthe default. Learning is faster with the OneCycleLR schedule.\nHowever, training can easily be continued from a checkpoint for more\nepochs than the initial command specified when using ClippedAdam. On\nthe other hand, if using the OneCycleLR schedule with 150 epochs\nspecified, it is not possible to pick up from that final checkpoint\nand continue training until 250 epochs.\n" + + } + + + , + "debug": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Including the flag --debug will log extra messages useful for debugging", + "help_text": "Type: `boolean_true`, default: `false`. Including the flag --debug will log extra messages useful for debugging.\n" + , + "default": "False" + } + + + , + "cuda": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Including the flag --cuda will run the inference on a\nGPU", + "help_text": "Type: `boolean_true`, default: `false`. Including the flag --cuda will run the inference on a\nGPU.\n" + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/correction/cellbender_remove_background_v0_2/.config.vsh.yaml b/target/nextflow/correction/cellbender_remove_background_v0_2/.config.vsh.yaml index 798f58bacca..74ed0c17f62 100644 --- a/target/nextflow/correction/cellbender_remove_background_v0_2/.config.vsh.yaml +++ b/target/nextflow/correction/cellbender_remove_background_v0_2/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellbender_remove_background_v0_2" namespace: "correction" - version: "0.12.3" + version: "0.12.4" argument_groups: - name: "Inputs" arguments: @@ -401,6 +401,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/correction/cellbender_remove_background_v0_2" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/correction/cellbender_remove_background_v0_2/cellbender_remove_background_v0_2" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/correction/cellbender_remove_background_v0_2/main.nf b/target/nextflow/correction/cellbender_remove_background_v0_2/main.nf index 7ed3e19518b..c0d54b13f45 100644 --- a/target/nextflow/correction/cellbender_remove_background_v0_2/main.nf +++ b/target/nextflow/correction/cellbender_remove_background_v0_2/main.nf @@ -1,4 +1,4 @@ -// cellbender_remove_background_v0_2 0.12.3 +// cellbender_remove_background_v0_2 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -24,7 +24,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "cellbender_remove_background_v0_2", "namespace" : "correction", - "version" : "0.12.3", + "version" : "0.12.4", "argument_groups" : [ { "name" : "Inputs", @@ -492,9 +492,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/correction/cellbender_remove_background_v0_2", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/correction/cellbender_remove_background_v0_2/nextflow.config b/target/nextflow/correction/cellbender_remove_background_v0_2/nextflow.config index 259304a26d9..360afb03868 100644 --- a/target/nextflow/correction/cellbender_remove_background_v0_2/nextflow.config +++ b/target/nextflow/correction/cellbender_remove_background_v0_2/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'cellbender_remove_background_v0_2' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Eliminating technical artifacts from high-throughput single-cell RNA sequencing data.\n\nThis module removes counts due to ambient RNA molecules and random barcode swapping from (raw) UMI-based scRNA-seq count matrices. \nAt the moment, only the count matrices produced by the CellRanger count pipeline is supported. Support for additional tools and protocols \nwill be added in the future. A quick start tutorial can be found here.\n\nFleming et al. 2022, bioRxiv.\n' } diff --git a/target/nextflow/correction/cellbender_remove_background_v0_2/nextflow_schema.json b/target/nextflow/correction/cellbender_remove_background_v0_2/nextflow_schema.json index 443371d8e06..0a7b6baa0eb 100644 --- a/target/nextflow/correction/cellbender_remove_background_v0_2/nextflow_schema.json +++ b/target/nextflow/correction/cellbender_remove_background_v0_2/nextflow_schema.json @@ -1,234 +1,351 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "cellbender_remove_background_v0_2", - "description": "Eliminating technical artifacts from high-throughput single-cell RNA sequencing data.\n\nThis module removes counts due to ambient RNA molecules and random barcode swapping from (raw) UMI-based scRNA-seq count matrices. \nAt the moment, only the count matrices produced by the CellRanger count pipeline is supported. Support for additional tools and protocols \nwill be added in the future. A quick start tutorial can be found here.\n\nFleming et al. 2022, bioRxiv.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "cellbender_remove_background_v0_2", +"description": "Eliminating technical artifacts from high-throughput single-cell RNA sequencing data.\n\nThis module removes counts due to ambient RNA molecules and random barcode swapping from (raw) UMI-based scRNA-seq count matrices. \nAt the moment, only the count matrices produced by the CellRanger count pipeline is supported. Support for additional tools and protocols \nwill be added in the future. A quick start tutorial can be found here.\n\nFleming et al. 2022, bioRxiv.\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file." - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. List of modalities to process", - "help_text": "Type: `string`, default: `rna`. List of modalities to process.", - "default": "rna" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Full count matrix as an h5mu file, with background RNA removed", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Full count matrix as an h5mu file, with background RNA removed. This file contains all the original droplet barcodes.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file." - }, - - "layer_output": { - "type": "string", - "description": "Type: `string`, default: `corrected`. Output layer", - "help_text": "Type: `string`, default: `corrected`. Output layer", - "default": "corrected" - }, - - "obs_latent_rt_efficiency": { - "type": "string", - "description": "Type: `string`, default: `latent_rt_efficiency`. ", - "help_text": "Type: `string`, default: `latent_rt_efficiency`. ", - "default": "latent_rt_efficiency" - }, - - "obs_latent_cell_probability": { - "type": "string", - "description": "Type: `string`, default: `latent_cell_probability`. ", - "help_text": "Type: `string`, default: `latent_cell_probability`. ", - "default": "latent_cell_probability" - }, - - "obs_latent_scale": { - "type": "string", - "description": "Type: `string`, default: `latent_scale`. ", - "help_text": "Type: `string`, default: `latent_scale`. ", - "default": "latent_scale" - }, - - "var_ambient_expression": { - "type": "string", - "description": "Type: `string`, default: `ambient_expression`. ", - "help_text": "Type: `string`, default: `ambient_expression`. ", - "default": "ambient_expression" - }, - - "obsm_latent_gene_encoding": { - "type": "string", - "description": "Type: `string`, default: `cellbender_latent_gene_encoding`. ", - "help_text": "Type: `string`, default: `cellbender_latent_gene_encoding`. ", - "default": "cellbender_latent_gene_encoding" - } - - } - }, - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "expected_cells": { - "type": "integer", - "description": "Type: `integer`, example: `1000`. Number of cells expected in the dataset (a rough estimate within a factor of 2 is sufficient)", - "help_text": "Type: `integer`, example: `1000`. Number of cells expected in the dataset (a rough estimate within a factor of 2 is sufficient)." - }, - - "total_droplets_included": { - "type": "integer", - "description": "Type: `integer`, example: `25000`. The number of droplets from the rank-ordered UMI plot\nthat will be analyzed", - "help_text": "Type: `integer`, example: `25000`. The number of droplets from the rank-ordered UMI plot\nthat will be analyzed. The largest \u0027total_droplets\u0027\ndroplets will have their cell probabilities inferred\nas an output.\n" - }, - - "expected_cells_from_qc": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Will use the Cell Ranger QC to determine the estimated number of cells", - "help_text": "Type: `boolean`, default: `true`. Will use the Cell Ranger QC to determine the estimated number of cells", - "default": "True" - }, - - "model": { - "type": "string", - "description": "Type: `string`, default: `full`, choices: ``simple`, `ambient`, `swapping`, `full``. Which model is being used for count data", - "help_text": "Type: `string`, default: `full`, choices: ``simple`, `ambient`, `swapping`, `full``. Which model is being used for count data. \u0027simple\u0027\ndoes not model either ambient RNA or random barcode\nswapping (for debugging purposes -- not recommended).\n\u0027ambient\u0027 assumes background RNA is incorporated into\ndroplets. \u0027swapping\u0027 assumes background RNA comes from\nrandom barcode swapping. \u0027full\u0027 uses a combined\nambient and swapping model.\n", - "enum": ["simple", "ambient", "swapping", "full"] - , - "default": "full" - }, - - "epochs": { - "type": "integer", - "description": "Type: `integer`, default: `150`. Number of epochs to train", - "help_text": "Type: `integer`, default: `150`. Number of epochs to train.", - "default": "150" - }, - - "low_count_threshold": { - "type": "integer", - "description": "Type: `integer`, default: `15`. Droplets with UMI counts below this number are completely \nexcluded from the analysis", - "help_text": "Type: `integer`, default: `15`. Droplets with UMI counts below this number are completely \nexcluded from the analysis. This can help identify the correct \nprior for empty droplet counts in the rare case where empty \ncounts are extremely high (over 200).\n", - "default": "15" - }, - - "z_dim": { - "type": "integer", - "description": "Type: `integer`, default: `100`. Dimension of latent variable z", - "help_text": "Type: `integer`, default: `100`. Dimension of latent variable z.\n", - "default": "100" - }, - - "z_layers": { - "type": "string", - "description": "Type: List of `integer`, default: `500`, multiple_sep: `\":\"`. Dimension of hidden layers in the encoder for z", - "help_text": "Type: List of `integer`, default: `500`, multiple_sep: `\":\"`. Dimension of hidden layers in the encoder for z.\n", - "default": "500" - }, - - "training_fraction": { - "type": "number", - "description": "Type: `double`, default: `0.9`. Training detail: the fraction of the data used for training", - "help_text": "Type: `double`, default: `0.9`. Training detail: the fraction of the data used for training.\nThe rest is never seen by the inference algorithm. Speeds up learning.\n", - "default": "0.9" - }, - - "empty_drop_training_fraction": { - "type": "number", - "description": "Type: `double`, default: `0.5`. Training detail: the fraction of the training data each epoch that \nis drawn (randomly sampled) from surely empty droplets", - "help_text": "Type: `double`, default: `0.5`. Training detail: the fraction of the training data each epoch that \nis drawn (randomly sampled) from surely empty droplets.\n", - "default": "0.5" - }, - - "fpr": { - "type": "string", - "description": "Type: List of `double`, default: `0.01`, multiple_sep: `\":\"`. Target false positive rate in (0, 1)", - "help_text": "Type: List of `double`, default: `0.01`, multiple_sep: `\":\"`. Target false positive rate in (0, 1). A false positive\nis a true signal count that is erroneously removed.\nMore background removal is accompanied by more signal\nremoval at high values of FPR. You can specify\nmultiple values, which will create multiple output\nfiles.\n", - "default": "0.01" - }, - - "exclude_antibody_capture": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Including the flag --exclude-antibody-capture will\ncause remove-background to operate on gene counts\nonly, ignoring other features", - "help_text": "Type: `boolean_true`, default: `false`. Including the flag --exclude-antibody-capture will\ncause remove-background to operate on gene counts\nonly, ignoring other features.\n", - "default": "False" - }, - - "learning_rate": { - "type": "number", - "description": "Type: `double`, example: `1.0E-4`. Training detail: lower learning rate for inference", - "help_text": "Type: `double`, example: `1.0E-4`. Training detail: lower learning rate for inference. A\nOneCycle learning rate schedule is used, where the\nupper learning rate is ten times this value. (For this\nvalue, probably do not exceed 1e-3).\n" - }, - - "cuda": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Including the flag --cuda will run the inference on a\nGPU", - "help_text": "Type: `boolean_true`, default: `false`. Including the flag --cuda will run the inference on a\nGPU.\n", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. List of modalities to process", + "help_text": "Type: `string`, default: `rna`. List of modalities to process." + , + "default": "rna" + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Full count matrix as an h5mu file, with background RNA removed", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Full count matrix as an h5mu file, with background RNA removed. This file contains all the original droplet barcodes." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + + + } + + + , + "layer_output": { + "type": + "string", + "description": "Type: `string`, default: `corrected`. Output layer", + "help_text": "Type: `string`, default: `corrected`. Output layer" + , + "default": "corrected" + } + + + , + "obs_latent_rt_efficiency": { + "type": + "string", + "description": "Type: `string`, default: `latent_rt_efficiency`. ", + "help_text": "Type: `string`, default: `latent_rt_efficiency`. " + , + "default": "latent_rt_efficiency" + } + + + , + "obs_latent_cell_probability": { + "type": + "string", + "description": "Type: `string`, default: `latent_cell_probability`. ", + "help_text": "Type: `string`, default: `latent_cell_probability`. " + , + "default": "latent_cell_probability" + } + + + , + "obs_latent_scale": { + "type": + "string", + "description": "Type: `string`, default: `latent_scale`. ", + "help_text": "Type: `string`, default: `latent_scale`. " + , + "default": "latent_scale" + } + + + , + "var_ambient_expression": { + "type": + "string", + "description": "Type: `string`, default: `ambient_expression`. ", + "help_text": "Type: `string`, default: `ambient_expression`. " + , + "default": "ambient_expression" + } + + + , + "obsm_latent_gene_encoding": { + "type": + "string", + "description": "Type: `string`, default: `cellbender_latent_gene_encoding`. ", + "help_text": "Type: `string`, default: `cellbender_latent_gene_encoding`. " + , + "default": "cellbender_latent_gene_encoding" + } + + +} +}, + + + "arguments" : { + "title": "Arguments", + "type": "object", + "description": "No description", + "properties": { + + + "expected_cells": { + "type": + "integer", + "description": "Type: `integer`, example: `1000`. Number of cells expected in the dataset (a rough estimate within a factor of 2 is sufficient)", + "help_text": "Type: `integer`, example: `1000`. Number of cells expected in the dataset (a rough estimate within a factor of 2 is sufficient)." + + } + + + , + "total_droplets_included": { + "type": + "integer", + "description": "Type: `integer`, example: `25000`. The number of droplets from the rank-ordered UMI plot\nthat will be analyzed", + "help_text": "Type: `integer`, example: `25000`. The number of droplets from the rank-ordered UMI plot\nthat will be analyzed. The largest \u0027total_droplets\u0027\ndroplets will have their cell probabilities inferred\nas an output.\n" + + } + + + , + "expected_cells_from_qc": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Will use the Cell Ranger QC to determine the estimated number of cells", + "help_text": "Type: `boolean`, default: `true`. Will use the Cell Ranger QC to determine the estimated number of cells" + , + "default": "True" + } + + + , + "model": { + "type": + "string", + "description": "Type: `string`, default: `full`, choices: ``simple`, `ambient`, `swapping`, `full``. Which model is being used for count data", + "help_text": "Type: `string`, default: `full`, choices: ``simple`, `ambient`, `swapping`, `full``. Which model is being used for count data. \u0027simple\u0027\ndoes not model either ambient RNA or random barcode\nswapping (for debugging purposes -- not recommended).\n\u0027ambient\u0027 assumes background RNA is incorporated into\ndroplets. \u0027swapping\u0027 assumes background RNA comes from\nrandom barcode swapping. \u0027full\u0027 uses a combined\nambient and swapping model.\n", + "enum": ["simple", "ambient", "swapping", "full"] + + , + "default": "full" + } + + + , + "epochs": { + "type": + "integer", + "description": "Type: `integer`, default: `150`. Number of epochs to train", + "help_text": "Type: `integer`, default: `150`. Number of epochs to train." + , + "default": "150" + } + + + , + "low_count_threshold": { + "type": + "integer", + "description": "Type: `integer`, default: `15`. Droplets with UMI counts below this number are completely \nexcluded from the analysis", + "help_text": "Type: `integer`, default: `15`. Droplets with UMI counts below this number are completely \nexcluded from the analysis. This can help identify the correct \nprior for empty droplet counts in the rare case where empty \ncounts are extremely high (over 200).\n" + , + "default": "15" + } + + + , + "z_dim": { + "type": + "integer", + "description": "Type: `integer`, default: `100`. Dimension of latent variable z", + "help_text": "Type: `integer`, default: `100`. Dimension of latent variable z.\n" + , + "default": "100" + } + + + , + "z_layers": { + "type": + "string", + "description": "Type: List of `integer`, default: `500`, multiple_sep: `\":\"`. Dimension of hidden layers in the encoder for z", + "help_text": "Type: List of `integer`, default: `500`, multiple_sep: `\":\"`. Dimension of hidden layers in the encoder for z.\n" + , + "default": "500" + } + + + , + "training_fraction": { + "type": + "number", + "description": "Type: `double`, default: `0.9`. Training detail: the fraction of the data used for training", + "help_text": "Type: `double`, default: `0.9`. Training detail: the fraction of the data used for training.\nThe rest is never seen by the inference algorithm. Speeds up learning.\n" + , + "default": "0.9" + } + + + , + "empty_drop_training_fraction": { + "type": + "number", + "description": "Type: `double`, default: `0.5`. Training detail: the fraction of the training data each epoch that \nis drawn (randomly sampled) from surely empty droplets", + "help_text": "Type: `double`, default: `0.5`. Training detail: the fraction of the training data each epoch that \nis drawn (randomly sampled) from surely empty droplets.\n" + , + "default": "0.5" + } + + + , + "fpr": { + "type": + "string", + "description": "Type: List of `double`, default: `0.01`, multiple_sep: `\":\"`. Target false positive rate in (0, 1)", + "help_text": "Type: List of `double`, default: `0.01`, multiple_sep: `\":\"`. Target false positive rate in (0, 1). A false positive\nis a true signal count that is erroneously removed.\nMore background removal is accompanied by more signal\nremoval at high values of FPR. You can specify\nmultiple values, which will create multiple output\nfiles.\n" + , + "default": "0.01" + } + + + , + "exclude_antibody_capture": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Including the flag --exclude-antibody-capture will\ncause remove-background to operate on gene counts\nonly, ignoring other features", + "help_text": "Type: `boolean_true`, default: `false`. Including the flag --exclude-antibody-capture will\ncause remove-background to operate on gene counts\nonly, ignoring other features.\n" + , + "default": "False" + } + + + , + "learning_rate": { + "type": + "number", + "description": "Type: `double`, example: `1.0E-4`. Training detail: lower learning rate for inference", + "help_text": "Type: `double`, example: `1.0E-4`. Training detail: lower learning rate for inference. A\nOneCycle learning rate schedule is used, where the\nupper learning rate is ten times this value. (For this\nvalue, probably do not exceed 1e-3).\n" + + } + + + , + "cuda": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Including the flag --cuda will run the inference on a\nGPU", + "help_text": "Type: `boolean_true`, default: `false`. Including the flag --cuda will run the inference on a\nGPU.\n" + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/dataflow/concat/.config.vsh.yaml b/target/nextflow/dataflow/concat/.config.vsh.yaml index 3a98bf31ed3..e2c35f34947 100644 --- a/target/nextflow/dataflow/concat/.config.vsh.yaml +++ b/target/nextflow/dataflow/concat/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "concat" namespace: "dataflow" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -217,6 +217,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dataflow/concat" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dataflow/concat/concat" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/dataflow/concat/main.nf b/target/nextflow/dataflow/concat/main.nf index dfb027d5c2a..4ca228e8537 100644 --- a/target/nextflow/dataflow/concat/main.nf +++ b/target/nextflow/dataflow/concat/main.nf @@ -1,4 +1,4 @@ -// concat 0.12.3 +// concat 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "concat", "namespace" : "dataflow", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -297,9 +297,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/dataflow/concat", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/dataflow/concat/nextflow.config b/target/nextflow/dataflow/concat/nextflow.config index 70ee3a6e3de..798f70e2b32 100644 --- a/target/nextflow/dataflow/concat/nextflow.config +++ b/target/nextflow/dataflow/concat/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'concat' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Concatenates several uni-modal samples in .h5mu files into a single file.\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/dataflow/concat/nextflow_schema.json b/target/nextflow/dataflow/concat/nextflow_schema.json index f3e68228ccd..101e567e1ce 100644 --- a/target/nextflow/dataflow/concat/nextflow_schema.json +++ b/target/nextflow/dataflow/concat/nextflow_schema.json @@ -1,88 +1,127 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "concat", - "description": "Concatenates several uni-modal samples in .h5mu files into a single file.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "concat", +"description": "Concatenates several uni-modal samples in .h5mu files into a single file.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: List of `file`, required, example: `sample_paths`, multiple_sep: `\",\"`. Paths to the different samples to be concatenated", - "help_text": "Type: List of `file`, required, example: `sample_paths`, multiple_sep: `\",\"`. Paths to the different samples to be concatenated." - }, - - "input_id": { - "type": "string", - "description": "Type: List of `string`, multiple_sep: `\",\"`. Names of the different samples that have to be concatenated", - "help_text": "Type: List of `string`, multiple_sep: `\",\"`. Names of the different samples that have to be concatenated. Must be specified when using \u0027--mode move\u0027.\nIn this case, the ids will be used for the columns names of the dataframes registring the conflicts.\nIf specified, must be of same length as `--input`.\n" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. ", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. ", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: List of `file`, required, example: `sample_paths`, multiple_sep: `\",\"`. Paths to the different samples to be concatenated", + "help_text": "Type: List of `file`, required, example: `sample_paths`, multiple_sep: `\",\"`. Paths to the different samples to be concatenated." + + } + + + , + "input_id": { + "type": + "string", + "description": "Type: List of `string`, multiple_sep: `\",\"`. Names of the different samples that have to be concatenated", + "help_text": "Type: List of `string`, multiple_sep: `\",\"`. Names of the different samples that have to be concatenated. Must be specified when using \u0027--mode move\u0027.\nIn this case, the ids will be used for the columns names of the dataframes registring the conflicts.\nIf specified, must be of same length as `--input`.\n" + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. ", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. " + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "obs_sample_name": { + "type": + "string", + "description": "Type: `string`, default: `sample_id`. Name of the ", + "help_text": "Type: `string`, default: `sample_id`. Name of the .obs key under which to add the sample names." + , + "default": "sample_id" + } + + + , + "other_axis_mode": { + "type": + "string", + "description": "Type: `string`, default: `move`, choices: ``same`, `unique`, `first`, `only`, `concat`, `move``. How to handle the merging of other axis (var, obs, ", + "help_text": "Type: `string`, default: `move`, choices: ``same`, `unique`, `first`, `only`, `concat`, `move``. How to handle the merging of other axis (var, obs, ...).\n\n - None: keep no data\n - same: only keep elements of the matrices which are the same in each of the samples\n - unique: only keep elements for which there is only 1 possible value (1 value that can occur in multiple samples)\n - first: keep the annotation from the first sample\n - only: keep elements that show up in only one of the objects (1 unique element in only 1 sample)\n - move: identical to \u0027same\u0027, but moving the conflicting values to .varm or .obsm\n", + "enum": ["same", "unique", "first", "only", "concat", "move"] - }, - - "obs_sample_name": { - "type": "string", - "description": "Type: `string`, default: `sample_id`. Name of the ", - "help_text": "Type: `string`, default: `sample_id`. Name of the .obs key under which to add the sample names.", - "default": "sample_id" - }, - - "other_axis_mode": { - "type": "string", - "description": "Type: `string`, default: `move`, choices: ``same`, `unique`, `first`, `only`, `concat`, `move``. How to handle the merging of other axis (var, obs, ", - "help_text": "Type: `string`, default: `move`, choices: ``same`, `unique`, `first`, `only`, `concat`, `move``. How to handle the merging of other axis (var, obs, ...).\n\n - None: keep no data\n - same: only keep elements of the matrices which are the same in each of the samples\n - unique: only keep elements for which there is only 1 possible value (1 value that can occur in multiple samples)\n - first: keep the annotation from the first sample\n - only: keep elements that show up in only one of the objects (1 unique element in only 1 sample)\n - move: identical to \u0027same\u0027, but moving the conflicting values to .varm or .obsm\n", - "enum": ["same", "unique", "first", "only", "concat", "move"] , - "default": "move" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "move" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/dataflow/merge/.config.vsh.yaml b/target/nextflow/dataflow/merge/.config.vsh.yaml index 71715a30356..9a87c8835ba 100644 --- a/target/nextflow/dataflow/merge/.config.vsh.yaml +++ b/target/nextflow/dataflow/merge/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "merge" namespace: "dataflow" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -170,6 +170,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dataflow/merge" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dataflow/merge/merge" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/dataflow/merge/main.nf b/target/nextflow/dataflow/merge/main.nf index 50b5dc02cef..14e6a94765b 100644 --- a/target/nextflow/dataflow/merge/main.nf +++ b/target/nextflow/dataflow/merge/main.nf @@ -1,4 +1,4 @@ -// merge 0.12.3 +// merge 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "merge", "namespace" : "dataflow", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -253,9 +253,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/dataflow/merge", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/dataflow/merge/nextflow.config b/target/nextflow/dataflow/merge/nextflow.config index 3394916e04a..5067d6a3790 100644 --- a/target/nextflow/dataflow/merge/nextflow.config +++ b/target/nextflow/dataflow/merge/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'merge' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Combine one or more single-modality .h5mu files together into one .h5mu file.\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/dataflow/merge/nextflow_schema.json b/target/nextflow/dataflow/merge/nextflow_schema.json index 32c4ba7f789..c4ef46d5fee 100644 --- a/target/nextflow/dataflow/merge/nextflow_schema.json +++ b/target/nextflow/dataflow/merge/nextflow_schema.json @@ -1,67 +1,94 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "merge", - "description": "Combine one or more single-modality .h5mu files together into one .h5mu file.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "merge", +"description": "Combine one or more single-modality .h5mu files together into one .h5mu file.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: List of `file`, required, default: `sample_paths`, multiple_sep: `\",\"`. Paths to the single-modality ", - "help_text": "Type: List of `file`, required, default: `sample_paths`, multiple_sep: `\",\"`. Paths to the single-modality .h5mu files that need to be combined", - "default": "sample_paths" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`. Path to the output file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`. Path to the output file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: List of `file`, required, default: `sample_paths`, multiple_sep: `\",\"`. Paths to the single-modality ", + "help_text": "Type: List of `file`, required, default: `sample_paths`, multiple_sep: `\",\"`. Paths to the single-modality .h5mu files that need to be combined" + , + "default": "sample_paths" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`. Path to the output file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`. Path to the output file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/dataflow/split_modalities/.config.vsh.yaml b/target/nextflow/dataflow/split_modalities/.config.vsh.yaml index 36fd7afc059..292452a61f4 100644 --- a/target/nextflow/dataflow/split_modalities/.config.vsh.yaml +++ b/target/nextflow/dataflow/split_modalities/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "split_modalities" namespace: "dataflow" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -209,6 +209,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dataflow/split_modalities" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dataflow/split_modalities/split_modalities" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/dataflow/split_modalities/main.nf b/target/nextflow/dataflow/split_modalities/main.nf index d64dd97a720..2c43925cb3a 100644 --- a/target/nextflow/dataflow/split_modalities/main.nf +++ b/target/nextflow/dataflow/split_modalities/main.nf @@ -1,4 +1,4 @@ -// split_modalities 0.12.3 +// split_modalities 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "split_modalities", "namespace" : "dataflow", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -303,9 +303,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/dataflow/split_modalities", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/dataflow/split_modalities/nextflow.config b/target/nextflow/dataflow/split_modalities/nextflow.config index d52bf76959e..5e522b18a88 100644 --- a/target/nextflow/dataflow/split_modalities/nextflow.config +++ b/target/nextflow/dataflow/split_modalities/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'split_modalities' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Split the modalities from a single .h5mu multimodal sample into seperate .h5mu files. \n' author = 'Dries Schaumont, Robrecht Cannoodt' } diff --git a/target/nextflow/dataflow/split_modalities/nextflow_schema.json b/target/nextflow/dataflow/split_modalities/nextflow_schema.json index c172bd89e8c..c0d287f31e0 100644 --- a/target/nextflow/dataflow/split_modalities/nextflow_schema.json +++ b/target/nextflow/dataflow/split_modalities/nextflow_schema.json @@ -1,81 +1,116 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "split_modalities", - "description": "Split the modalities from a single .h5mu multimodal sample into seperate .h5mu files. \n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "split_modalities", +"description": "Split the modalities from a single .h5mu multimodal sample into seperate .h5mu files. \n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, default: `sample_path`. Path to a single ", - "help_text": "Type: `file`, required, default: `sample_path`. Path to a single .h5mu file.", - "default": "sample_path" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. Output directory containing multiple h5mu files", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. Output directory containing multiple h5mu files.", - "default": "$id.$key.output.output" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, default: `sample_path`. Path to a single ", + "help_text": "Type: `file`, required, default: `sample_path`. Path to a single .h5mu file." + , + "default": "sample_path" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. Output directory containing multiple h5mu files", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. Output directory containing multiple h5mu files." + , + "default": "$id.$key.output.output" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] - }, - - "output_types": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output_types.csv`, example: `types.csv`. A csv containing the base filename and modality type per output file", - "help_text": "Type: `file`, required, default: `$id.$key.output_types.csv`, example: `types.csv`. A csv containing the base filename and modality type per output file.", - "default": "$id.$key.output_types.csv" - }, - - "compression": { - "type": "string", - "description": "Type: `string`, default: `gzip`. The compression format to be used on the final h5mu object", - "help_text": "Type: `string`, default: `gzip`. The compression format to be used on the final h5mu object.", - "default": "gzip" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + + } + + + , + "output_types": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output_types.csv`, example: `types.csv`. A csv containing the base filename and modality type per output file", + "help_text": "Type: `file`, required, default: `$id.$key.output_types.csv`, example: `types.csv`. A csv containing the base filename and modality type per output file." + , + "default": "$id.$key.output_types.csv" + } + + + , + "compression": { + "type": + "string", + "description": "Type: `string`, default: `gzip`. The compression format to be used on the final h5mu object", + "help_text": "Type: `string`, default: `gzip`. The compression format to be used on the final h5mu object." + , + "default": "gzip" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/demux/bcl2fastq/.config.vsh.yaml b/target/nextflow/demux/bcl2fastq/.config.vsh.yaml index 4b57441e50f..4e07a975581 100644 --- a/target/nextflow/demux/bcl2fastq/.config.vsh.yaml +++ b/target/nextflow/demux/bcl2fastq/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "bcl2fastq" namespace: "demux" - version: "0.12.3" + version: "0.12.4" authors: - name: "Toni Verbeiren" roles: @@ -164,6 +164,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/demux/bcl2fastq" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/demux/bcl2fastq/bcl2fastq" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/demux/bcl2fastq/main.nf b/target/nextflow/demux/bcl2fastq/main.nf index 99d76399634..7463c4f79fc 100644 --- a/target/nextflow/demux/bcl2fastq/main.nf +++ b/target/nextflow/demux/bcl2fastq/main.nf @@ -1,4 +1,4 @@ -// bcl2fastq 0.12.3 +// bcl2fastq 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "bcl2fastq", "namespace" : "demux", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Toni Verbeiren", @@ -232,9 +232,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/demux/bcl2fastq", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/demux/bcl2fastq/nextflow.config b/target/nextflow/demux/bcl2fastq/nextflow.config index 8cff2fe7b22..2a062a1ab06 100644 --- a/target/nextflow/demux/bcl2fastq/nextflow.config +++ b/target/nextflow/demux/bcl2fastq/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'bcl2fastq' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Convert bcl files to fastq files using bcl2fastq.\n' author = 'Toni Verbeiren' } diff --git a/target/nextflow/demux/bcl2fastq/nextflow_schema.json b/target/nextflow/demux/bcl2fastq/nextflow_schema.json index f784feb634b..efa834ff8b3 100644 --- a/target/nextflow/demux/bcl2fastq/nextflow_schema.json +++ b/target/nextflow/demux/bcl2fastq/nextflow_schema.json @@ -1,78 +1,113 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "bcl2fastq", - "description": "Convert bcl files to fastq files using bcl2fastq.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "bcl2fastq", +"description": "Convert bcl files to fastq files using bcl2fastq.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `bcl_dir`. Input run directory", - "help_text": "Type: `file`, required, example: `bcl_dir`. Input run directory" - }, - - "sample_sheet": { - "type": "string", - "description": "Type: `file`, required, example: `SampleSheet.csv`. Pointer to the sample sheet", - "help_text": "Type: `file`, required, example: `SampleSheet.csv`. Pointer to the sample sheet" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `fastq_dir`. Output directory containig fastq files", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `fastq_dir`. Output directory containig fastq files", - "default": "$id.$key.output.output" - }, - - "reports": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory", - "help_text": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory", - "default": "$id.$key.reports.reports" - }, - - "ignore_missing": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. ", - "help_text": "Type: `boolean_true`, default: `false`. ", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `bcl_dir`. Input run directory", + "help_text": "Type: `file`, required, example: `bcl_dir`. Input run directory" + + } + + + , + "sample_sheet": { + "type": + "string", + "description": "Type: `file`, required, example: `SampleSheet.csv`. Pointer to the sample sheet", + "help_text": "Type: `file`, required, example: `SampleSheet.csv`. Pointer to the sample sheet" + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `fastq_dir`. Output directory containig fastq files", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `fastq_dir`. Output directory containig fastq files" + , + "default": "$id.$key.output.output" + } + + + , + "reports": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory", + "help_text": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory" + , + "default": "$id.$key.reports.reports" + } + + + , + "ignore_missing": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. ", + "help_text": "Type: `boolean_true`, default: `false`. " + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/demux/bcl_convert/.config.vsh.yaml b/target/nextflow/demux/bcl_convert/.config.vsh.yaml index 976471a7e63..0277c759099 100644 --- a/target/nextflow/demux/bcl_convert/.config.vsh.yaml +++ b/target/nextflow/demux/bcl_convert/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "bcl_convert" namespace: "demux" - version: "0.12.3" + version: "0.12.4" authors: - name: "Toni Verbeiren" roles: @@ -184,6 +184,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/demux/bcl_convert" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/demux/bcl_convert/bcl_convert" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/demux/bcl_convert/main.nf b/target/nextflow/demux/bcl_convert/main.nf index 423ef5522a9..9b96db5bb02 100644 --- a/target/nextflow/demux/bcl_convert/main.nf +++ b/target/nextflow/demux/bcl_convert/main.nf @@ -1,4 +1,4 @@ -// bcl_convert 0.12.3 +// bcl_convert 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "bcl_convert", "namespace" : "demux", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Toni Verbeiren", @@ -260,9 +260,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/demux/bcl_convert", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/demux/bcl_convert/nextflow.config b/target/nextflow/demux/bcl_convert/nextflow.config index 86e51326225..9d528464a72 100644 --- a/target/nextflow/demux/bcl_convert/nextflow.config +++ b/target/nextflow/demux/bcl_convert/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'bcl_convert' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Convert bcl files to fastq files using bcl-convert.\nInformation about upgrading from bcl2fastq via\nhttps://emea.support.illumina.com/bulletins/2020/10/upgrading-from-bcl2fastq-to-bcl-convert.html\nand https://support.illumina.com/sequencing/sequencing_software/bcl-convert/compatibility.html\n' author = 'Toni Verbeiren, Marijke Van Moerbeke' } diff --git a/target/nextflow/demux/bcl_convert/nextflow_schema.json b/target/nextflow/demux/bcl_convert/nextflow_schema.json index f6306799e44..7129d37a8fe 100644 --- a/target/nextflow/demux/bcl_convert/nextflow_schema.json +++ b/target/nextflow/demux/bcl_convert/nextflow_schema.json @@ -1,78 +1,113 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "bcl_convert", - "description": "Convert bcl files to fastq files using bcl-convert.\nInformation about upgrading from bcl2fastq via\nhttps://emea.support.illumina.com/bulletins/2020/10/upgrading-from-bcl2fastq-to-bcl-convert.html\nand https://support.illumina.com/sequencing/sequencing_software/bcl-convert/compatibility.html\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "bcl_convert", +"description": "Convert bcl files to fastq files using bcl-convert.\nInformation about upgrading from bcl2fastq via\nhttps://emea.support.illumina.com/bulletins/2020/10/upgrading-from-bcl2fastq-to-bcl-convert.html\nand https://support.illumina.com/sequencing/sequencing_software/bcl-convert/compatibility.html\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `bcl_dir`. Input run directory", - "help_text": "Type: `file`, required, example: `bcl_dir`. Input run directory" - }, - - "sample_sheet": { - "type": "string", - "description": "Type: `file`, required, example: `bcl_dir`. Pointer to the sample sheet", - "help_text": "Type: `file`, required, example: `bcl_dir`. Pointer to the sample sheet" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `fastq_dir`. Output directory containig fastq files", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `fastq_dir`. Output directory containig fastq files", - "default": "$id.$key.output.output" - }, - - "reports": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory", - "help_text": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory", - "default": "$id.$key.reports.reports" - }, - - "test_mode": { - "type": "boolean", - "description": "Type: `boolean`, default: `false`. Should bcl-convert be run in test mode (using --first-tile-only)?", - "help_text": "Type: `boolean`, default: `false`. Should bcl-convert be run in test mode (using --first-tile-only)?", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `bcl_dir`. Input run directory", + "help_text": "Type: `file`, required, example: `bcl_dir`. Input run directory" + + } + + + , + "sample_sheet": { + "type": + "string", + "description": "Type: `file`, required, example: `bcl_dir`. Pointer to the sample sheet", + "help_text": "Type: `file`, required, example: `bcl_dir`. Pointer to the sample sheet" + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `fastq_dir`. Output directory containig fastq files", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `fastq_dir`. Output directory containig fastq files" + , + "default": "$id.$key.output.output" + } + + + , + "reports": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory", + "help_text": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory" + , + "default": "$id.$key.reports.reports" + } + + + , + "test_mode": { + "type": + "boolean", + "description": "Type: `boolean`, default: `false`. Should bcl-convert be run in test mode (using --first-tile-only)?", + "help_text": "Type: `boolean`, default: `false`. Should bcl-convert be run in test mode (using --first-tile-only)?" + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/demux/cellranger_mkfastq/.config.vsh.yaml b/target/nextflow/demux/cellranger_mkfastq/.config.vsh.yaml index fb11e2898b1..411766de74d 100644 --- a/target/nextflow/demux/cellranger_mkfastq/.config.vsh.yaml +++ b/target/nextflow/demux/cellranger_mkfastq/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellranger_mkfastq" namespace: "demux" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -202,6 +202,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/demux/cellranger_mkfastq" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/demux/cellranger_mkfastq/cellranger_mkfastq" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/demux/cellranger_mkfastq/main.nf b/target/nextflow/demux/cellranger_mkfastq/main.nf index e1de17d2434..d82b2d2642a 100644 --- a/target/nextflow/demux/cellranger_mkfastq/main.nf +++ b/target/nextflow/demux/cellranger_mkfastq/main.nf @@ -1,4 +1,4 @@ -// cellranger_mkfastq 0.12.3 +// cellranger_mkfastq 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -29,7 +29,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "cellranger_mkfastq", "namespace" : "demux", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -298,9 +298,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/demux/cellranger_mkfastq", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/demux/cellranger_mkfastq/nextflow.config b/target/nextflow/demux/cellranger_mkfastq/nextflow.config index 8b66f628cc6..1d752ebdeeb 100644 --- a/target/nextflow/demux/cellranger_mkfastq/nextflow.config +++ b/target/nextflow/demux/cellranger_mkfastq/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'cellranger_mkfastq' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Demultiplex raw sequencing data' author = 'Angela Oliveira Pisco, Samuel D\'Souza, Robrecht Cannoodt' } diff --git a/target/nextflow/demux/cellranger_mkfastq/nextflow_schema.json b/target/nextflow/demux/cellranger_mkfastq/nextflow_schema.json index 3f9dc4ee76d..9da67b57341 100644 --- a/target/nextflow/demux/cellranger_mkfastq/nextflow_schema.json +++ b/target/nextflow/demux/cellranger_mkfastq/nextflow_schema.json @@ -1,71 +1,102 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "cellranger_mkfastq", - "description": "Demultiplex raw sequencing data", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "cellranger_mkfastq", +"description": "Demultiplex raw sequencing data", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `/path/to/bcl`. Path to the (untarred) BCL files", - "help_text": "Type: `file`, required, example: `/path/to/bcl`. Path to the (untarred) BCL files. Expects \u0027RunParameters.xml\u0027 at \u0027./\u0027." - }, - - "sample_sheet": { - "type": "string", - "description": "Type: `file`, required, example: `SampleSheet.csv`. The path to the sample sheet", - "help_text": "Type: `file`, required, example: `SampleSheet.csv`. The path to the sample sheet." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the demux results", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the demux results", - "default": "$id.$key.output.output" - }, - - "reports": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory", - "help_text": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory", - "default": "$id.$key.reports.reports" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `/path/to/bcl`. Path to the (untarred) BCL files", + "help_text": "Type: `file`, required, example: `/path/to/bcl`. Path to the (untarred) BCL files. Expects \u0027RunParameters.xml\u0027 at \u0027./\u0027." + + } + + + , + "sample_sheet": { + "type": + "string", + "description": "Type: `file`, required, example: `SampleSheet.csv`. The path to the sample sheet", + "help_text": "Type: `file`, required, example: `SampleSheet.csv`. The path to the sample sheet." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the demux results", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the demux results" + , + "default": "$id.$key.output.output" + } + + + , + "reports": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory", + "help_text": "Type: `file`, default: `$id.$key.reports.reports`, example: `reports_dir`. Reports directory" + , + "default": "$id.$key.reports.reports" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/dimred/pca/.config.vsh.yaml b/target/nextflow/dimred/pca/.config.vsh.yaml index 37ef15ac31d..f37a549b4db 100644 --- a/target/nextflow/dimred/pca/.config.vsh.yaml +++ b/target/nextflow/dimred/pca/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "pca" namespace: "dimred" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -248,6 +248,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dimred/pca" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dimred/pca/pca" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/dimred/pca/main.nf b/target/nextflow/dimred/pca/main.nf index 4a6f2229c27..843c510c5c8 100644 --- a/target/nextflow/dimred/pca/main.nf +++ b/target/nextflow/dimred/pca/main.nf @@ -1,4 +1,4 @@ -// pca 0.12.3 +// pca 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "pca", "namespace" : "dimred", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -337,9 +337,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/dimred/pca", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/dimred/pca/nextflow.config b/target/nextflow/dimred/pca/nextflow.config index a5da02e0032..4b6d55c057f 100644 --- a/target/nextflow/dimred/pca/nextflow.config +++ b/target/nextflow/dimred/pca/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'pca' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Computes PCA coordinates, loadings and variance decomposition. Uses the implementation of scikit-learn [Pedregosa11].\n' author = 'Dries De Maeyer' } diff --git a/target/nextflow/dimred/pca/nextflow_schema.json b/target/nextflow/dimred/pca/nextflow_schema.json index 9b38ace3170..b21447962ed 100644 --- a/target/nextflow/dimred/pca/nextflow_schema.json +++ b/target/nextflow/dimred/pca/nextflow_schema.json @@ -1,119 +1,178 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "pca", - "description": "Computes PCA coordinates, loadings and variance decomposition. Uses the implementation of scikit-learn [Pedregosa11].\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "pca", +"description": "Computes PCA coordinates, loadings and variance decomposition. Uses the implementation of scikit-learn [Pedregosa11].\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "layer": { - "type": "string", - "description": "Type: `string`. Use specified layer for expression values instead of the ", - "help_text": "Type: `string`. Use specified layer for expression values instead of the .X object from the modality." - }, - - "var_input": { - "type": "string", - "description": "Type: `string`, example: `filter_with_hvg`. Column name in ", - "help_text": "Type: `string`, example: `filter_with_hvg`. Column name in .var matrix that will be used to select which genes to run the PCA on." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "obsm_output": { - "type": "string", - "description": "Type: `string`, default: `X_pca`. In which ", - "help_text": "Type: `string`, default: `X_pca`. In which .obsm slot to store the resulting embedding.", - "default": "X_pca" - }, - - "varm_output": { - "type": "string", - "description": "Type: `string`, default: `pca_loadings`. In which ", - "help_text": "Type: `string`, default: `pca_loadings`. In which .varm slot to store the resulting loadings matrix.", - "default": "pca_loadings" - }, - - "uns_output": { - "type": "string", - "description": "Type: `string`, default: `pca_variance`. In which ", - "help_text": "Type: `string`, default: `pca_variance`. In which .uns slot to store the resulting variance objects.", - "default": "pca_variance" - }, - - "num_components": { - "type": "integer", - "description": "Type: `integer`, example: `25`. Number of principal components to compute", - "help_text": "Type: `integer`, example: `25`. Number of principal components to compute. Defaults to 50, or 1 - minimum dimension size of selected representation." - }, - - "overwrite": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Allow overwriting ", - "help_text": "Type: `boolean_true`, default: `false`. Allow overwriting .obsm, .varm and .uns slots.", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "layer": { + "type": + "string", + "description": "Type: `string`. Use specified layer for expression values instead of the ", + "help_text": "Type: `string`. Use specified layer for expression values instead of the .X object from the modality." + + } + + + , + "var_input": { + "type": + "string", + "description": "Type: `string`, example: `filter_with_hvg`. Column name in ", + "help_text": "Type: `string`, example: `filter_with_hvg`. Column name in .var matrix that will be used to select which genes to run the PCA on." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "obsm_output": { + "type": + "string", + "description": "Type: `string`, default: `X_pca`. In which ", + "help_text": "Type: `string`, default: `X_pca`. In which .obsm slot to store the resulting embedding." + , + "default": "X_pca" + } + + + , + "varm_output": { + "type": + "string", + "description": "Type: `string`, default: `pca_loadings`. In which ", + "help_text": "Type: `string`, default: `pca_loadings`. In which .varm slot to store the resulting loadings matrix." + , + "default": "pca_loadings" + } + + + , + "uns_output": { + "type": + "string", + "description": "Type: `string`, default: `pca_variance`. In which ", + "help_text": "Type: `string`, default: `pca_variance`. In which .uns slot to store the resulting variance objects." + , + "default": "pca_variance" + } + + + , + "num_components": { + "type": + "integer", + "description": "Type: `integer`, example: `25`. Number of principal components to compute", + "help_text": "Type: `integer`, example: `25`. Number of principal components to compute. Defaults to 50, or 1 - minimum dimension size of selected representation." + + } + + + , + "overwrite": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Allow overwriting ", + "help_text": "Type: `boolean_true`, default: `false`. Allow overwriting .obsm, .varm and .uns slots." + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/dimred/umap/.config.vsh.yaml b/target/nextflow/dimred/umap/.config.vsh.yaml index ea4349c006f..f31d298d501 100644 --- a/target/nextflow/dimred/umap/.config.vsh.yaml +++ b/target/nextflow/dimred/umap/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "umap" namespace: "dimred" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -307,6 +307,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dimred/umap" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/dimred/umap/umap" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/dimred/umap/main.nf b/target/nextflow/dimred/umap/main.nf index 6938628bc36..5d814fd2281 100644 --- a/target/nextflow/dimred/umap/main.nf +++ b/target/nextflow/dimred/umap/main.nf @@ -1,4 +1,4 @@ -// umap 0.12.3 +// umap 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "umap", "namespace" : "dimred", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -394,9 +394,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/dimred/umap", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/dimred/umap/nextflow.config b/target/nextflow/dimred/umap/nextflow.config index 413c403a89a..01da52cb467 100644 --- a/target/nextflow/dimred/umap/nextflow.config +++ b/target/nextflow/dimred/umap/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'umap' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'UMAP (Uniform Manifold Approximation and Projection) is a manifold learning technique suitable for visualizing high-dimensional data. Besides tending to be faster than tSNE, it optimizes the embedding such that it best reflects the topology of the data, which we represent throughout Scanpy using a neighborhood graph. tSNE, by contrast, optimizes the distribution of nearest-neighbor distances in the embedding such that these best match the distribution of distances in the high-dimensional space. We use the implementation of umap-learn [McInnes18]. For a few comparisons of UMAP with tSNE, see this preprint.\n' author = 'Dries De Maeyer' } diff --git a/target/nextflow/dimred/umap/nextflow_schema.json b/target/nextflow/dimred/umap/nextflow_schema.json index 452fd29a65d..b333a0fe17d 100644 --- a/target/nextflow/dimred/umap/nextflow_schema.json +++ b/target/nextflow/dimred/umap/nextflow_schema.json @@ -1,164 +1,241 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "umap", - "description": "UMAP (Uniform Manifold Approximation and Projection) is a manifold learning technique suitable for visualizing high-dimensional data. Besides tending to be faster than tSNE, it optimizes the embedding such that it best reflects the topology of the data, which we represent throughout Scanpy using a neighborhood graph. tSNE, by contrast, optimizes the distribution of nearest-neighbor distances in the embedding such that these best match the distribution of distances in the high-dimensional space. We use the implementation of umap-learn [McInnes18]. For a few comparisons of UMAP with tSNE, see this preprint.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "umap", +"description": "UMAP (Uniform Manifold Approximation and Projection) is a manifold learning technique suitable for visualizing high-dimensional data. Besides tending to be faster than tSNE, it optimizes the embedding such that it best reflects the topology of the data, which we represent throughout Scanpy using a neighborhood graph. tSNE, by contrast, optimizes the distribution of nearest-neighbor distances in the embedding such that these best match the distribution of distances in the high-dimensional space. We use the implementation of umap-learn [McInnes18]. For a few comparisons of UMAP with tSNE, see this preprint.\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "uns_neighbors": { - "type": "string", - "description": "Type: `string`, default: `neighbors`. The `", - "help_text": "Type: `string`, default: `neighbors`. The `.uns` neighbors slot as output by the `find_neighbors` component.", - "default": "neighbors" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "obsm_output": { - "type": "string", - "description": "Type: `string`, default: `umap`. The pre/postfix under which to store the UMAP results", - "help_text": "Type: `string`, default: `umap`. The pre/postfix under which to store the UMAP results.", - "default": "umap" - } - - } - }, - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "min_dist": { - "type": "number", - "description": "Type: `double`, default: `0.5`. The effective minimum distance between embedded points", - "help_text": "Type: `double`, default: `0.5`. The effective minimum distance between embedded points. Smaller values will result in a more clustered/clumped embedding where nearby points on the manifold are drawn closer together, while larger values will result on a more even dispersal of points. The value should be set relative to the spread value, which determines the scale at which embedded points will be spread out.", - "default": "0.5" - }, - - "spread": { - "type": "number", - "description": "Type: `double`, default: `1.0`. The effective scale of embedded points", - "help_text": "Type: `double`, default: `1.0`. The effective scale of embedded points. In combination with `min_dist` this determines how clustered/clumped the embedded points are.", - "default": "1.0" - }, - - "num_components": { - "type": "integer", - "description": "Type: `integer`, default: `2`. The number of dimensions of the embedding", - "help_text": "Type: `integer`, default: `2`. The number of dimensions of the embedding.", - "default": "2" - }, - - "max_iter": { - "type": "integer", - "description": "Type: `integer`. The number of iterations (epochs) of the optimization", - "help_text": "Type: `integer`. The number of iterations (epochs) of the optimization. Called `n_epochs` in the original UMAP. Default is set to 500 if neighbors[\u0027connectivities\u0027].shape[0] \u003c= 10000, else 200." - }, - - "alpha": { - "type": "number", - "description": "Type: `double`, default: `1.0`. The initial learning rate for the embedding optimization", - "help_text": "Type: `double`, default: `1.0`. The initial learning rate for the embedding optimization.", - "default": "1.0" - }, - - "gamma": { - "type": "number", - "description": "Type: `double`, default: `1.0`. Weighting applied to negative samples in low dimensional embedding optimization", - "help_text": "Type: `double`, default: `1.0`. Weighting applied to negative samples in low dimensional embedding optimization. Values higher than one will result in greater weight being given to negative samples.", - "default": "1.0" - }, - - "negative_sample_rate": { - "type": "integer", - "description": "Type: `integer`, default: `5`. The number of negative edge/1-simplex samples to use per positive edge/1-simplex sample in optimizing the low dimensional embedding", - "help_text": "Type: `integer`, default: `5`. The number of negative edge/1-simplex samples to use per positive edge/1-simplex sample in optimizing the low dimensional embedding.", - "default": "5" - }, - - "init_pos": { - "type": "string", - "description": "Type: `string`, default: `spectral`. How to initialize the low dimensional embedding", - "help_text": "Type: `string`, default: `spectral`. How to initialize the low dimensional embedding. Called `init` in the original UMAP. Options are:\n\n* Any key from `.obsm`\n* `\u0027paga\u0027`: positions from `paga()`\n* `\u0027spectral\u0027`: use a spectral embedding of the graph\n* `\u0027random\u0027`: assign initial embedding positions at random.\n", - "default": "spectral" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "uns_neighbors": { + "type": + "string", + "description": "Type: `string`, default: `neighbors`. The `", + "help_text": "Type: `string`, default: `neighbors`. The `.uns` neighbors slot as output by the `find_neighbors` component." + , + "default": "neighbors" + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "obsm_output": { + "type": + "string", + "description": "Type: `string`, default: `umap`. The pre/postfix under which to store the UMAP results", + "help_text": "Type: `string`, default: `umap`. The pre/postfix under which to store the UMAP results." + , + "default": "umap" + } + + +} +}, + + + "arguments" : { + "title": "Arguments", + "type": "object", + "description": "No description", + "properties": { + + + "min_dist": { + "type": + "number", + "description": "Type: `double`, default: `0.5`. The effective minimum distance between embedded points", + "help_text": "Type: `double`, default: `0.5`. The effective minimum distance between embedded points. Smaller values will result in a more clustered/clumped embedding where nearby points on the manifold are drawn closer together, while larger values will result on a more even dispersal of points. The value should be set relative to the spread value, which determines the scale at which embedded points will be spread out." + , + "default": "0.5" + } + + + , + "spread": { + "type": + "number", + "description": "Type: `double`, default: `1.0`. The effective scale of embedded points", + "help_text": "Type: `double`, default: `1.0`. The effective scale of embedded points. In combination with `min_dist` this determines how clustered/clumped the embedded points are." + , + "default": "1.0" + } + + + , + "num_components": { + "type": + "integer", + "description": "Type: `integer`, default: `2`. The number of dimensions of the embedding", + "help_text": "Type: `integer`, default: `2`. The number of dimensions of the embedding." + , + "default": "2" + } + + + , + "max_iter": { + "type": + "integer", + "description": "Type: `integer`. The number of iterations (epochs) of the optimization", + "help_text": "Type: `integer`. The number of iterations (epochs) of the optimization. Called `n_epochs` in the original UMAP. Default is set to 500 if neighbors[\u0027connectivities\u0027].shape[0] \u003c= 10000, else 200." + + } + + + , + "alpha": { + "type": + "number", + "description": "Type: `double`, default: `1.0`. The initial learning rate for the embedding optimization", + "help_text": "Type: `double`, default: `1.0`. The initial learning rate for the embedding optimization." + , + "default": "1.0" + } + + + , + "gamma": { + "type": + "number", + "description": "Type: `double`, default: `1.0`. Weighting applied to negative samples in low dimensional embedding optimization", + "help_text": "Type: `double`, default: `1.0`. Weighting applied to negative samples in low dimensional embedding optimization. Values higher than one will result in greater weight being given to negative samples." + , + "default": "1.0" + } + + + , + "negative_sample_rate": { + "type": + "integer", + "description": "Type: `integer`, default: `5`. The number of negative edge/1-simplex samples to use per positive edge/1-simplex sample in optimizing the low dimensional embedding", + "help_text": "Type: `integer`, default: `5`. The number of negative edge/1-simplex samples to use per positive edge/1-simplex sample in optimizing the low dimensional embedding." + , + "default": "5" + } + + + , + "init_pos": { + "type": + "string", + "description": "Type: `string`, default: `spectral`. How to initialize the low dimensional embedding", + "help_text": "Type: `string`, default: `spectral`. How to initialize the low dimensional embedding. Called `init` in the original UMAP. Options are:\n\n* Any key from `.obsm`\n* `\u0027paga\u0027`: positions from `paga()`\n* `\u0027spectral\u0027`: use a spectral embedding of the graph\n* `\u0027random\u0027`: assign initial embedding positions at random.\n" + , + "default": "spectral" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/download/download_file/.config.vsh.yaml b/target/nextflow/download/download_file/.config.vsh.yaml index da764f92807..8516dd1bfda 100644 --- a/target/nextflow/download/download_file/.config.vsh.yaml +++ b/target/nextflow/download/download_file/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "download_file" namespace: "download" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -133,6 +133,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/download/download_file" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/download/download_file/download_file" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/download/download_file/main.nf b/target/nextflow/download/download_file/main.nf index 4bb1fd5ef59..eadbf2f921e 100644 --- a/target/nextflow/download/download_file/main.nf +++ b/target/nextflow/download/download_file/main.nf @@ -1,4 +1,4 @@ -// download_file 0.12.3 +// download_file 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "download_file", "namespace" : "download", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -192,9 +192,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/download/download_file", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/download/download_file/nextflow.config b/target/nextflow/download/download_file/nextflow.config index 39bd7596806..440340e4727 100644 --- a/target/nextflow/download/download_file/nextflow.config +++ b/target/nextflow/download/download_file/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'download_file' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Download a file.\n' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/download/download_file/nextflow_schema.json b/target/nextflow/download/download_file/nextflow_schema.json index e7510ef7c3b..b4a3b7c48a3 100644 --- a/target/nextflow/download/download_file/nextflow_schema.json +++ b/target/nextflow/download/download_file/nextflow_schema.json @@ -1,65 +1,92 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "download_file", - "description": "Download a file.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "download_file", +"description": "Download a file.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `string`, required, example: `https://cf.10xgenomics.com/samples/cell-exp/3.0.0/pbmc_1k_protein_v3/pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. URL to a file to download", - "help_text": "Type: `string`, required, example: `https://cf.10xgenomics.com/samples/cell-exp/3.0.0/pbmc_1k_protein_v3/pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. URL to a file to download." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5`, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. Path where to store output", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5`, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. Path where to store output.", - "default": "$id.$key.output.h5" - }, - - "verbose": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Increase verbosity", - "help_text": "Type: `boolean_true`, default: `false`. Increase verbosity", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `string`, required, example: `https://cf.10xgenomics.com/samples/cell-exp/3.0.0/pbmc_1k_protein_v3/pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. URL to a file to download", + "help_text": "Type: `string`, required, example: `https://cf.10xgenomics.com/samples/cell-exp/3.0.0/pbmc_1k_protein_v3/pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. URL to a file to download." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5`, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. Path where to store output", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5`, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. Path where to store output." + , + "default": "$id.$key.output.h5" + } + + + , + "verbose": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Increase verbosity", + "help_text": "Type: `boolean_true`, default: `false`. Increase verbosity" + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/download/sync_test_resources/.config.vsh.yaml b/target/nextflow/download/sync_test_resources/.config.vsh.yaml index 059e4e8040c..a022b436753 100644 --- a/target/nextflow/download/sync_test_resources/.config.vsh.yaml +++ b/target/nextflow/download/sync_test_resources/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "sync_test_resources" namespace: "download" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -165,6 +165,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/download/sync_test_resources" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/download/sync_test_resources/sync_test_resources" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/download/sync_test_resources/main.nf b/target/nextflow/download/sync_test_resources/main.nf index 11ec7f69c86..6caa3670d57 100644 --- a/target/nextflow/download/sync_test_resources/main.nf +++ b/target/nextflow/download/sync_test_resources/main.nf @@ -1,4 +1,4 @@ -// sync_test_resources 0.12.3 +// sync_test_resources 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "sync_test_resources", "namespace" : "download", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -231,9 +231,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/download/sync_test_resources", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/download/sync_test_resources/nextflow.config b/target/nextflow/download/sync_test_resources/nextflow.config index bea4d45e6e1..b1d940b3fc6 100644 --- a/target/nextflow/download/sync_test_resources/nextflow.config +++ b/target/nextflow/download/sync_test_resources/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'sync_test_resources' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Synchronise the test resources from s3://openpipelines-data to resources_test' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/download/sync_test_resources/nextflow_schema.json b/target/nextflow/download/sync_test_resources/nextflow_schema.json index fba797e3653..8c753a3650a 100644 --- a/target/nextflow/download/sync_test_resources/nextflow_schema.json +++ b/target/nextflow/download/sync_test_resources/nextflow_schema.json @@ -1,86 +1,125 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "sync_test_resources", - "description": "Synchronise the test resources from s3://openpipelines-data to resources_test", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "sync_test_resources", +"description": "Synchronise the test resources from s3://openpipelines-data to resources_test", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `string`, default: `s3://openpipelines-data`. Path to the S3 bucket to sync from", - "help_text": "Type: `string`, default: `s3://openpipelines-data`. Path to the S3 bucket to sync from.", - "default": "s3://openpipelines-data" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.output`. Path to the test resource directory", - "help_text": "Type: `file`, default: `$id.$key.output.output`. Path to the test resource directory.", - "default": "$id.$key.output.output" - }, - - "quiet": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Displays the operations that would be performed using the specified command without actually running them", - "help_text": "Type: `boolean_true`, default: `false`. Displays the operations that would be performed using the specified command without actually running them.", - "default": "False" - }, - - "dryrun": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Does not display the operations performed from the specified command", - "help_text": "Type: `boolean_true`, default: `false`. Does not display the operations performed from the specified command.", - "default": "False" - }, - - "delete": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Files that exist in the destination but not in the source are deleted during sync", - "help_text": "Type: `boolean_true`, default: `false`. Files that exist in the destination but not in the source are deleted during sync.", - "default": "False" - }, - - "exclude": { - "type": "string", - "description": "Type: List of `string`, multiple_sep: `\":\"`. Exclude all files or objects from the command that matches the specified pattern", - "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Exclude all files or objects from the command that matches the specified pattern." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `string`, default: `s3://openpipelines-data`. Path to the S3 bucket to sync from", + "help_text": "Type: `string`, default: `s3://openpipelines-data`. Path to the S3 bucket to sync from." + , + "default": "s3://openpipelines-data" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.output`. Path to the test resource directory", + "help_text": "Type: `file`, default: `$id.$key.output.output`. Path to the test resource directory." + , + "default": "$id.$key.output.output" + } + + + , + "quiet": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Displays the operations that would be performed using the specified command without actually running them", + "help_text": "Type: `boolean_true`, default: `false`. Displays the operations that would be performed using the specified command without actually running them." + , + "default": "False" + } + + + , + "dryrun": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Does not display the operations performed from the specified command", + "help_text": "Type: `boolean_true`, default: `false`. Does not display the operations performed from the specified command." + , + "default": "False" + } + + + , + "delete": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Files that exist in the destination but not in the source are deleted during sync", + "help_text": "Type: `boolean_true`, default: `false`. Files that exist in the destination but not in the source are deleted during sync." + , + "default": "False" + } + + + , + "exclude": { + "type": + "string", + "description": "Type: List of `string`, multiple_sep: `\":\"`. Exclude all files or objects from the command that matches the specified pattern", + "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Exclude all files or objects from the command that matches the specified pattern." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/files/make_params/.config.vsh.yaml b/target/nextflow/files/make_params/.config.vsh.yaml index dd4d69a25d9..f521966803b 100644 --- a/target/nextflow/files/make_params/.config.vsh.yaml +++ b/target/nextflow/files/make_params/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "make_params" namespace: "files" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -215,6 +215,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/files/make_params" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/files/make_params/make_params" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/files/make_params/main.nf b/target/nextflow/files/make_params/main.nf index a7bb9012e26..64e71970b84 100644 --- a/target/nextflow/files/make_params/main.nf +++ b/target/nextflow/files/make_params/main.nf @@ -1,4 +1,4 @@ -// make_params 0.12.3 +// make_params 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "make_params", "namespace" : "files", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -298,9 +298,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/files/make_params", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/files/make_params/nextflow.config b/target/nextflow/files/make_params/nextflow.config index 5a1cb159823..226046dce1c 100644 --- a/target/nextflow/files/make_params/nextflow.config +++ b/target/nextflow/files/make_params/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'make_params' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Looks for files in a directory and turn it in a params file.' author = 'Angela Oliveira Pisco, Robrecht Cannoodt' } diff --git a/target/nextflow/files/make_params/nextflow_schema.json b/target/nextflow/files/make_params/nextflow_schema.json index 677ee7397c4..2566c76fe8c 100644 --- a/target/nextflow/files/make_params/nextflow_schema.json +++ b/target/nextflow/files/make_params/nextflow_schema.json @@ -1,98 +1,145 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "make_params", - "description": "Looks for files in a directory and turn it in a params file.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "make_params", +"description": "Looks for files in a directory and turn it in a params file.", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "base_dir": { - "type": "string", - "description": "Type: `file`, required, example: `/path/to/dir`. Base directory to search recursively", - "help_text": "Type: `file`, required, example: `/path/to/dir`. Base directory to search recursively" - }, - - "pattern": { - "type": "string", - "description": "Type: `string`, required, example: `*.fastq.gz`. An optional regular expression", - "help_text": "Type: `string`, required, example: `*.fastq.gz`. An optional regular expression. Only file names which match the regular expression will be matched." - }, - - "n_dirname_drop": { - "type": "integer", - "description": "Type: `integer`, default: `0`. For every matched file, the parent directory will be traversed N times", - "help_text": "Type: `integer`, default: `0`. For every matched file, the parent directory will be traversed N times.", - "default": "0" - }, - - "n_basename_id": { - "type": "integer", - "description": "Type: `integer`, default: `0`. The unique identifiers will consist of at least N dirnames", - "help_text": "Type: `integer`, default: `0`. The unique identifiers will consist of at least N dirnames.", - "default": "0" - }, - - "id_name": { - "type": "string", - "description": "Type: `string`, default: `id`. The name for storing the identifier field in the yaml", - "help_text": "Type: `string`, default: `id`. The name for storing the identifier field in the yaml.", - "default": "id" - }, - - "path_name": { - "type": "string", - "description": "Type: `string`, default: `path`. The name for storing the path field in the yaml", - "help_text": "Type: `string`, default: `path`. The name for storing the path field in the yaml.", - "default": "path" - }, - - "group_name": { - "type": "string", - "description": "Type: `string`, example: `param_list`. Top level name for the group of entries", - "help_text": "Type: `string`, example: `param_list`. Top level name for the group of entries." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.yaml`, example: `params.yaml`. Output YAML file", - "help_text": "Type: `file`, required, default: `$id.$key.output.yaml`, example: `params.yaml`. Output YAML file.", - "default": "$id.$key.output.yaml" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "base_dir": { + "type": + "string", + "description": "Type: `file`, required, example: `/path/to/dir`. Base directory to search recursively", + "help_text": "Type: `file`, required, example: `/path/to/dir`. Base directory to search recursively" + + } + + + , + "pattern": { + "type": + "string", + "description": "Type: `string`, required, example: `*.fastq.gz`. An optional regular expression", + "help_text": "Type: `string`, required, example: `*.fastq.gz`. An optional regular expression. Only file names which match the regular expression will be matched." + + } + + + , + "n_dirname_drop": { + "type": + "integer", + "description": "Type: `integer`, default: `0`. For every matched file, the parent directory will be traversed N times", + "help_text": "Type: `integer`, default: `0`. For every matched file, the parent directory will be traversed N times." + , + "default": "0" + } + + + , + "n_basename_id": { + "type": + "integer", + "description": "Type: `integer`, default: `0`. The unique identifiers will consist of at least N dirnames", + "help_text": "Type: `integer`, default: `0`. The unique identifiers will consist of at least N dirnames." + , + "default": "0" + } + + + , + "id_name": { + "type": + "string", + "description": "Type: `string`, default: `id`. The name for storing the identifier field in the yaml", + "help_text": "Type: `string`, default: `id`. The name for storing the identifier field in the yaml." + , + "default": "id" + } + + + , + "path_name": { + "type": + "string", + "description": "Type: `string`, default: `path`. The name for storing the path field in the yaml", + "help_text": "Type: `string`, default: `path`. The name for storing the path field in the yaml." + , + "default": "path" + } + + + , + "group_name": { + "type": + "string", + "description": "Type: `string`, example: `param_list`. Top level name for the group of entries", + "help_text": "Type: `string`, example: `param_list`. Top level name for the group of entries." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.yaml`, example: `params.yaml`. Output YAML file", + "help_text": "Type: `file`, required, default: `$id.$key.output.yaml`, example: `params.yaml`. Output YAML file." + , + "default": "$id.$key.output.yaml" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/filter/delimit_fraction/.config.vsh.yaml b/target/nextflow/filter/delimit_fraction/.config.vsh.yaml index 2e6fea8bf10..e834b981402 100644 --- a/target/nextflow/filter/delimit_fraction/.config.vsh.yaml +++ b/target/nextflow/filter/delimit_fraction/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "delimit_fraction" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -236,6 +236,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/delimit_fraction" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/delimit_fraction/delimit_fraction" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/filter/delimit_fraction/main.nf b/target/nextflow/filter/delimit_fraction/main.nf index 6d98952a370..5dbafc40189 100644 --- a/target/nextflow/filter/delimit_fraction/main.nf +++ b/target/nextflow/filter/delimit_fraction/main.nf @@ -1,4 +1,4 @@ -// delimit_fraction 0.12.3 +// delimit_fraction 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "delimit_fraction", "namespace" : "filter", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -330,9 +330,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/delimit_fraction", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/filter/delimit_fraction/nextflow.config b/target/nextflow/filter/delimit_fraction/nextflow.config index 4305d46b190..f405577eb47 100644 --- a/target/nextflow/filter/delimit_fraction/nextflow.config +++ b/target/nextflow/filter/delimit_fraction/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'delimit_fraction' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Turns a column containing values between 0 and 1 into a boolean column based on thresholds.\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/filter/delimit_fraction/nextflow_schema.json b/target/nextflow/filter/delimit_fraction/nextflow_schema.json index 6f5bb0e68d4..c22316618d0 100644 --- a/target/nextflow/filter/delimit_fraction/nextflow_schema.json +++ b/target/nextflow/filter/delimit_fraction/nextflow_schema.json @@ -1,127 +1,184 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "delimit_fraction", - "description": "Turns a column containing values between 0 and 1 into a boolean column based on thresholds.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "delimit_fraction", +"description": "Turns a column containing values between 0 and 1 into a boolean column based on thresholds.\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "layer": { - "type": "string", - "description": "Type: `string`, example: `raw_counts`. ", - "help_text": "Type: `string`, example: `raw_counts`. " - }, - - "obs_fraction_column": { - "type": "string", - "description": "Type: `string`, required, example: `fraction_mitochondrial`. Name of column from ", - "help_text": "Type: `string`, required, example: `fraction_mitochondrial`. Name of column from .var dataframe selecting\na column that contains floating point values between 0 and 1.\n" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "obs_name_filter": { - "type": "string", - "description": "Type: `string`, required. In which ", - "help_text": "Type: `string`, required. In which .obs slot to store a boolean array corresponding to which observations should be removed." - } - - } - }, - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "min_fraction": { - "type": "number", - "description": "Type: `double`, default: `0`. Min fraction for an observation to be retained (True in output)", - "help_text": "Type: `double`, default: `0`. Min fraction for an observation to be retained (True in output).", - "default": "0" - }, - - "max_fraction": { - "type": "number", - "description": "Type: `double`, default: `1`. Max fraction for an observation to be retained (True in output)", - "help_text": "Type: `double`, default: `1`. Max fraction for an observation to be retained (True in output).", - "default": "1" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "layer": { + "type": + "string", + "description": "Type: `string`, example: `raw_counts`. ", + "help_text": "Type: `string`, example: `raw_counts`. " + + } + + + , + "obs_fraction_column": { + "type": + "string", + "description": "Type: `string`, required, example: `fraction_mitochondrial`. Name of column from ", + "help_text": "Type: `string`, required, example: `fraction_mitochondrial`. Name of column from .var dataframe selecting\na column that contains floating point values between 0 and 1.\n" + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "obs_name_filter": { + "type": + "string", + "description": "Type: `string`, required. In which ", + "help_text": "Type: `string`, required. In which .obs slot to store a boolean array corresponding to which observations should be removed." + + } + + +} +}, + + + "arguments" : { + "title": "Arguments", + "type": "object", + "description": "No description", + "properties": { + + + "min_fraction": { + "type": + "number", + "description": "Type: `double`, default: `0`. Min fraction for an observation to be retained (True in output)", + "help_text": "Type: `double`, default: `0`. Min fraction for an observation to be retained (True in output)." + , + "default": "0" + } + + + , + "max_fraction": { + "type": + "number", + "description": "Type: `double`, default: `1`. Max fraction for an observation to be retained (True in output)", + "help_text": "Type: `double`, default: `1`. Max fraction for an observation to be retained (True in output)." + , + "default": "1" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/filter/do_filter/.config.vsh.yaml b/target/nextflow/filter/do_filter/.config.vsh.yaml index c8212d38c48..49713c6b68d 100644 --- a/target/nextflow/filter/do_filter/.config.vsh.yaml +++ b/target/nextflow/filter/do_filter/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "do_filter" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -197,6 +197,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/do_filter" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/do_filter/do_filter" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/filter/do_filter/main.nf b/target/nextflow/filter/do_filter/main.nf index 74c3d36b1a4..11ff10b4e31 100644 --- a/target/nextflow/filter/do_filter/main.nf +++ b/target/nextflow/filter/do_filter/main.nf @@ -1,4 +1,4 @@ -// do_filter 0.12.3 +// do_filter 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "do_filter", "namespace" : "filter", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -281,9 +281,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/do_filter", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/filter/do_filter/nextflow.config b/target/nextflow/filter/do_filter/nextflow.config index 39263b51694..959c42463c8 100644 --- a/target/nextflow/filter/do_filter/nextflow.config +++ b/target/nextflow/filter/do_filter/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'do_filter' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Remove observations and variables based on specified .obs and .var columns.\n' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/filter/do_filter/nextflow_schema.json b/target/nextflow/filter/do_filter/nextflow_schema.json index 9c0e896ebea..2e8655cd805 100644 --- a/target/nextflow/filter/do_filter/nextflow_schema.json +++ b/target/nextflow/filter/do_filter/nextflow_schema.json @@ -1,85 +1,124 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "do_filter", - "description": "Remove observations and variables based on specified .obs and .var columns.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "do_filter", +"description": "Remove observations and variables based on specified .obs and .var columns.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "obs_filter": { - "type": "string", - "description": "Type: List of `string`, example: `filter_with_x`, multiple_sep: `\":\"`. Which ", - "help_text": "Type: List of `string`, example: `filter_with_x`, multiple_sep: `\":\"`. Which .obs columns to use to filter the observations by." - }, - - "var_filter": { - "type": "string", - "description": "Type: List of `string`, example: `filter_with_x`, multiple_sep: `\":\"`. Which ", - "help_text": "Type: List of `string`, example: `filter_with_x`, multiple_sep: `\":\"`. Which .var columns to use to filter the observations by." - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "obs_filter": { + "type": + "string", + "description": "Type: List of `string`, example: `filter_with_x`, multiple_sep: `\":\"`. Which ", + "help_text": "Type: List of `string`, example: `filter_with_x`, multiple_sep: `\":\"`. Which .obs columns to use to filter the observations by." + + } + + + , + "var_filter": { + "type": + "string", + "description": "Type: List of `string`, example: `filter_with_x`, multiple_sep: `\":\"`. Which ", + "help_text": "Type: List of `string`, example: `filter_with_x`, multiple_sep: `\":\"`. Which .var columns to use to filter the observations by." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/filter/filter_with_counts/.config.vsh.yaml b/target/nextflow/filter/filter_with_counts/.config.vsh.yaml index 55373a7f6b4..4eb3f043508 100644 --- a/target/nextflow/filter/filter_with_counts/.config.vsh.yaml +++ b/target/nextflow/filter/filter_with_counts/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "filter_with_counts" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -290,6 +290,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/filter_with_counts" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/filter_with_counts/filter_with_counts" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/filter/filter_with_counts/main.nf b/target/nextflow/filter/filter_with_counts/main.nf index b4caa05f754..030d115fc25 100644 --- a/target/nextflow/filter/filter_with_counts/main.nf +++ b/target/nextflow/filter/filter_with_counts/main.nf @@ -1,4 +1,4 @@ -// filter_with_counts 0.12.3 +// filter_with_counts 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "filter_with_counts", "namespace" : "filter", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -402,9 +402,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/filter_with_counts", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/filter/filter_with_counts/nextflow.config b/target/nextflow/filter/filter_with_counts/nextflow.config index d49049b4e41..5a7a91e49e3 100644 --- a/target/nextflow/filter/filter_with_counts/nextflow.config +++ b/target/nextflow/filter/filter_with_counts/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'filter_with_counts' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Filter scRNA-seq data based on the primary QC metrics. \nThis is based on both the UMI counts, the gene counts \nand the mitochondrial genes (genes starting with mt/MT).\n' author = 'Dries De Maeyer, Robrecht Cannoodt' } diff --git a/target/nextflow/filter/filter_with_counts/nextflow_schema.json b/target/nextflow/filter/filter_with_counts/nextflow_schema.json index 32090b85a08..16f2f7e2b32 100644 --- a/target/nextflow/filter/filter_with_counts/nextflow_schema.json +++ b/target/nextflow/filter/filter_with_counts/nextflow_schema.json @@ -1,152 +1,225 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "filter_with_counts", - "description": "Filter scRNA-seq data based on the primary QC metrics. \nThis is based on both the UMI counts, the gene counts \nand the mitochondrial genes (genes starting with mt/MT).\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "filter_with_counts", +"description": "Filter scRNA-seq data based on the primary QC metrics. \nThis is based on both the UMI counts, the gene counts \nand the mitochondrial genes (genes starting with mt/MT).\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "layer": { - "type": "string", - "description": "Type: `string`, example: `raw_counts`. ", - "help_text": "Type: `string`, example: `raw_counts`. " - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "do_subset": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output", - "help_text": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output.", - "default": "False" - }, - - "obs_name_filter": { - "type": "string", - "description": "Type: `string`, default: `filter_with_counts`. In which ", - "help_text": "Type: `string`, default: `filter_with_counts`. In which .obs slot to store a boolean array corresponding to which observations should be removed.", - "default": "filter_with_counts" - }, - - "var_name_filter": { - "type": "string", - "description": "Type: `string`, default: `filter_with_counts`. In which ", - "help_text": "Type: `string`, default: `filter_with_counts`. In which .var slot to store a boolean array corresponding to which variables should be removed.", - "default": "filter_with_counts" - } - - } - }, - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "min_counts": { - "type": "integer", - "description": "Type: `integer`, example: `200`. Minimum number of counts captured per cell", - "help_text": "Type: `integer`, example: `200`. Minimum number of counts captured per cell." - }, - - "max_counts": { - "type": "integer", - "description": "Type: `integer`, example: `5000000`. Maximum number of counts captured per cell", - "help_text": "Type: `integer`, example: `5000000`. Maximum number of counts captured per cell." - }, - - "min_genes_per_cell": { - "type": "integer", - "description": "Type: `integer`, example: `200`. Minimum of non-zero values per cell", - "help_text": "Type: `integer`, example: `200`. Minimum of non-zero values per cell." - }, - - "max_genes_per_cell": { - "type": "integer", - "description": "Type: `integer`, example: `1500000`. Maximum of non-zero values per cell", - "help_text": "Type: `integer`, example: `1500000`. Maximum of non-zero values per cell." - }, - - "min_cells_per_gene": { - "type": "integer", - "description": "Type: `integer`, example: `3`. Minimum of non-zero values per gene", - "help_text": "Type: `integer`, example: `3`. Minimum of non-zero values per gene." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "layer": { + "type": + "string", + "description": "Type: `string`, example: `raw_counts`. ", + "help_text": "Type: `string`, example: `raw_counts`. " + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "do_subset": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output", + "help_text": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output." + , + "default": "False" + } + + + , + "obs_name_filter": { + "type": + "string", + "description": "Type: `string`, default: `filter_with_counts`. In which ", + "help_text": "Type: `string`, default: `filter_with_counts`. In which .obs slot to store a boolean array corresponding to which observations should be removed." + , + "default": "filter_with_counts" + } + + + , + "var_name_filter": { + "type": + "string", + "description": "Type: `string`, default: `filter_with_counts`. In which ", + "help_text": "Type: `string`, default: `filter_with_counts`. In which .var slot to store a boolean array corresponding to which variables should be removed." + , + "default": "filter_with_counts" + } + + +} +}, + + + "arguments" : { + "title": "Arguments", + "type": "object", + "description": "No description", + "properties": { + + + "min_counts": { + "type": + "integer", + "description": "Type: `integer`, example: `200`. Minimum number of counts captured per cell", + "help_text": "Type: `integer`, example: `200`. Minimum number of counts captured per cell." + + } + + + , + "max_counts": { + "type": + "integer", + "description": "Type: `integer`, example: `5000000`. Maximum number of counts captured per cell", + "help_text": "Type: `integer`, example: `5000000`. Maximum number of counts captured per cell." + + } + + + , + "min_genes_per_cell": { + "type": + "integer", + "description": "Type: `integer`, example: `200`. Minimum of non-zero values per cell", + "help_text": "Type: `integer`, example: `200`. Minimum of non-zero values per cell." + + } + + + , + "max_genes_per_cell": { + "type": + "integer", + "description": "Type: `integer`, example: `1500000`. Maximum of non-zero values per cell", + "help_text": "Type: `integer`, example: `1500000`. Maximum of non-zero values per cell." + + } + + + , + "min_cells_per_gene": { + "type": + "integer", + "description": "Type: `integer`, example: `3`. Minimum of non-zero values per gene", + "help_text": "Type: `integer`, example: `3`. Minimum of non-zero values per gene." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/filter/filter_with_hvg/.config.vsh.yaml b/target/nextflow/filter/filter_with_hvg/.config.vsh.yaml index b0b39549609..68341ad1e35 100644 --- a/target/nextflow/filter/filter_with_hvg/.config.vsh.yaml +++ b/target/nextflow/filter/filter_with_hvg/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "filter_with_hvg" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -347,6 +347,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/filter_with_hvg" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/filter_with_hvg/filter_with_hvg" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/filter/filter_with_hvg/main.nf b/target/nextflow/filter/filter_with_hvg/main.nf index feafe9151b2..053dd28099c 100644 --- a/target/nextflow/filter/filter_with_hvg/main.nf +++ b/target/nextflow/filter/filter_with_hvg/main.nf @@ -1,4 +1,4 @@ -// filter_with_hvg 0.12.3 +// filter_with_hvg 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "filter_with_hvg", "namespace" : "filter", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -428,9 +428,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/filter_with_hvg", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/filter/filter_with_hvg/nextflow.config b/target/nextflow/filter/filter_with_hvg/nextflow.config index 15803ba13b5..c11fe73ba27 100644 --- a/target/nextflow/filter/filter_with_hvg/nextflow.config +++ b/target/nextflow/filter/filter_with_hvg/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'filter_with_hvg' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Annotate highly variable genes [Satija15] [Zheng17] [Stuart19].\n\nExpects logarithmized data, except when flavor=\'seurat_v3\' in which count data is expected.\n\nDepending on flavor, this reproduces the R-implementations of Seurat [Satija15], Cell Ranger [Zheng17], and Seurat v3 [Stuart19].\n\nFor the dispersion-based methods ([Satija15] and [Zheng17]), the normalized dispersion is obtained by scaling with the mean and standard deviation of the dispersions for genes falling into a given bin for mean expression of genes. This means that for each bin of mean expression, highly variable genes are selected.\n\nFor [Stuart19], a normalized variance for each gene is computed. First, the data are standardized (i.e., z-score normalization per feature) with a regularized standard deviation. Next, the normalized variance is computed as the variance of each gene after the transformation. Genes are ranked by the normalized variance.\n' author = 'Dries De Maeyer, Robrecht Cannoodt' } diff --git a/target/nextflow/filter/filter_with_hvg/nextflow_schema.json b/target/nextflow/filter/filter_with_hvg/nextflow_schema.json index a7b6156f7ca..3f6658be745 100644 --- a/target/nextflow/filter/filter_with_hvg/nextflow_schema.json +++ b/target/nextflow/filter/filter_with_hvg/nextflow_schema.json @@ -1,162 +1,245 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "filter_with_hvg", - "description": "Annotate highly variable genes [Satija15] [Zheng17] [Stuart19].\n\nExpects logarithmized data, except when flavor=\u0027seurat_v3\u0027 in which count data is expected.\n\nDepending on flavor, this reproduces the R-implementations of Seurat [Satija15], Cell Ranger [Zheng17], and Seurat v3 [Stuart19].\n\nFor the dispersion-based methods ([Satija15] and [Zheng17]), the normalized dispersion is obtained by scaling with the mean and standard deviation of the dispersions for genes falling into a given bin for mean expression of genes. This means that for each bin of mean expression, highly variable genes are selected.\n\nFor [Stuart19], a normalized variance for each gene is computed. First, the data are standardized (i.e., z-score normalization per feature) with a regularized standard deviation. Next, the normalized variance is computed as the variance of each gene after the transformation. Genes are ranked by the normalized variance.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "filter_with_hvg", +"description": "Annotate highly variable genes [Satija15] [Zheng17] [Stuart19].\n\nExpects logarithmized data, except when flavor=\u0027seurat_v3\u0027 in which count data is expected.\n\nDepending on flavor, this reproduces the R-implementations of Seurat [Satija15], Cell Ranger [Zheng17], and Seurat v3 [Stuart19].\n\nFor the dispersion-based methods ([Satija15] and [Zheng17]), the normalized dispersion is obtained by scaling with the mean and standard deviation of the dispersions for genes falling into a given bin for mean expression of genes. This means that for each bin of mean expression, highly variable genes are selected.\n\nFor [Stuart19], a normalized variance for each gene is computed. First, the data are standardized (i.e., z-score normalization per feature) with a regularized standard deviation. Next, the normalized variance is computed as the variance of each gene after the transformation. Genes are ranked by the normalized variance.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "layer": { - "type": "string", - "description": "Type: `string`. use adata", - "help_text": "Type: `string`. use adata.layers[layer] for expression values instead of adata.X." - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "var_name_filter": { - "type": "string", - "description": "Type: `string`, default: `filter_with_hvg`. In which ", - "help_text": "Type: `string`, default: `filter_with_hvg`. In which .var slot to store a boolean array corresponding to which observations should be filtered out.", - "default": "filter_with_hvg" - }, - - "varm_name": { - "type": "string", - "description": "Type: `string`, default: `hvg`. In which ", - "help_text": "Type: `string`, default: `hvg`. In which .varm slot to store additional metadata.", - "default": "hvg" - }, - - "do_subset": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output", - "help_text": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output.", - "default": "False" - }, - - "flavor": { - "type": "string", - "description": "Type: `string`, default: `seurat`, choices: ``seurat`, `cell_ranger`, `seurat_v3``. Choose the flavor for identifying highly variable genes", - "help_text": "Type: `string`, default: `seurat`, choices: ``seurat`, `cell_ranger`, `seurat_v3``. Choose the flavor for identifying highly variable genes. For the dispersion based methods\nin their default workflows, Seurat passes the cutoffs whereas Cell Ranger passes n_top_genes.\n", - "enum": ["seurat", "cell_ranger", "seurat_v3"] + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " , - "default": "seurat" - }, - - "n_top_genes": { - "type": "integer", - "description": "Type: `integer`. Number of highly-variable genes to keep", - "help_text": "Type: `integer`. Number of highly-variable genes to keep. Mandatory if flavor=\u0027seurat_v3\u0027." - }, - - "min_mean": { - "type": "number", - "description": "Type: `double`, default: `0.0125`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored", - "help_text": "Type: `double`, default: `0.0125`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored. Ignored if flavor=\u0027seurat_v3\u0027.", - "default": "0.0125" - }, - - "max_mean": { - "type": "number", - "description": "Type: `double`, default: `3`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored", - "help_text": "Type: `double`, default: `3`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored. Ignored if flavor=\u0027seurat_v3\u0027.", - "default": "3" - }, - - "min_disp": { - "type": "number", - "description": "Type: `double`, default: `0.5`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored", - "help_text": "Type: `double`, default: `0.5`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored. Ignored if flavor=\u0027seurat_v3\u0027.", - "default": "0.5" - }, - - "max_disp": { - "type": "number", - "description": "Type: `double`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored", - "help_text": "Type: `double`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored. Ignored if flavor=\u0027seurat_v3\u0027. Default is +inf." - }, - - "span": { - "type": "number", - "description": "Type: `double`, default: `0.3`. The fraction of the data (cells) used when estimating the variance in the loess model fit if flavor=\u0027seurat_v3\u0027", - "help_text": "Type: `double`, default: `0.3`. The fraction of the data (cells) used when estimating the variance in the loess model fit if flavor=\u0027seurat_v3\u0027.", - "default": "0.3" - }, - - "n_bins": { - "type": "integer", - "description": "Type: `integer`, default: `20`. Number of bins for binning the mean gene expression", - "help_text": "Type: `integer`, default: `20`. Number of bins for binning the mean gene expression. Normalization is done with respect to each bin. If just a single gene falls into a bin, the normalized dispersion is artificially set to 1.", - "default": "20" - }, - - "obs_batch_key": { - "type": "string", - "description": "Type: `string`. If specified, highly-variable genes are selected within each batch separately and merged", - "help_text": "Type: `string`. If specified, highly-variable genes are selected within each batch separately and merged. This simple \nprocess avoids the selection of batch-specific genes and acts as a lightweight batch correction method. \nFor all flavors, genes are first sorted by how many batches they are a HVG. For dispersion-based flavors \nties are broken by normalized dispersion. If flavor = \u0027seurat_v3\u0027, ties are broken by the median (across\nbatches) rank based on within-batch normalized variance.\n" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "rna" + } + + + , + "layer": { + "type": + "string", + "description": "Type: `string`. use adata", + "help_text": "Type: `string`. use adata.layers[layer] for expression values instead of adata.X." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "var_name_filter": { + "type": + "string", + "description": "Type: `string`, default: `filter_with_hvg`. In which ", + "help_text": "Type: `string`, default: `filter_with_hvg`. In which .var slot to store a boolean array corresponding to which observations should be filtered out." + , + "default": "filter_with_hvg" + } + + + , + "varm_name": { + "type": + "string", + "description": "Type: `string`, default: `hvg`. In which ", + "help_text": "Type: `string`, default: `hvg`. In which .varm slot to store additional metadata." + , + "default": "hvg" + } + + + , + "do_subset": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output", + "help_text": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output." + , + "default": "False" + } + + + , + "flavor": { + "type": + "string", + "description": "Type: `string`, default: `seurat`, choices: ``seurat`, `cell_ranger`, `seurat_v3``. Choose the flavor for identifying highly variable genes", + "help_text": "Type: `string`, default: `seurat`, choices: ``seurat`, `cell_ranger`, `seurat_v3``. Choose the flavor for identifying highly variable genes. For the dispersion based methods\nin their default workflows, Seurat passes the cutoffs whereas Cell Ranger passes n_top_genes.\n", + "enum": ["seurat", "cell_ranger", "seurat_v3"] + + , + "default": "seurat" + } + + + , + "n_top_genes": { + "type": + "integer", + "description": "Type: `integer`. Number of highly-variable genes to keep", + "help_text": "Type: `integer`. Number of highly-variable genes to keep. Mandatory if flavor=\u0027seurat_v3\u0027." + + } + + + , + "min_mean": { + "type": + "number", + "description": "Type: `double`, default: `0.0125`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored", + "help_text": "Type: `double`, default: `0.0125`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored. Ignored if flavor=\u0027seurat_v3\u0027." + , + "default": "0.0125" + } + + + , + "max_mean": { + "type": + "number", + "description": "Type: `double`, default: `3`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored", + "help_text": "Type: `double`, default: `3`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored. Ignored if flavor=\u0027seurat_v3\u0027." + , + "default": "3" + } + + + , + "min_disp": { + "type": + "number", + "description": "Type: `double`, default: `0.5`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored", + "help_text": "Type: `double`, default: `0.5`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored. Ignored if flavor=\u0027seurat_v3\u0027." + , + "default": "0.5" + } + + + , + "max_disp": { + "type": + "number", + "description": "Type: `double`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored", + "help_text": "Type: `double`. If n_top_genes is defined, this and all other cutoffs for the means and the normalized dispersions are ignored. Ignored if flavor=\u0027seurat_v3\u0027. Default is +inf." + + } + + + , + "span": { + "type": + "number", + "description": "Type: `double`, default: `0.3`. The fraction of the data (cells) used when estimating the variance in the loess model fit if flavor=\u0027seurat_v3\u0027", + "help_text": "Type: `double`, default: `0.3`. The fraction of the data (cells) used when estimating the variance in the loess model fit if flavor=\u0027seurat_v3\u0027." + , + "default": "0.3" + } + + + , + "n_bins": { + "type": + "integer", + "description": "Type: `integer`, default: `20`. Number of bins for binning the mean gene expression", + "help_text": "Type: `integer`, default: `20`. Number of bins for binning the mean gene expression. Normalization is done with respect to each bin. If just a single gene falls into a bin, the normalized dispersion is artificially set to 1." + , + "default": "20" + } + + + , + "obs_batch_key": { + "type": + "string", + "description": "Type: `string`. If specified, highly-variable genes are selected within each batch separately and merged", + "help_text": "Type: `string`. If specified, highly-variable genes are selected within each batch separately and merged. This simple \nprocess avoids the selection of batch-specific genes and acts as a lightweight batch correction method. \nFor all flavors, genes are first sorted by how many batches they are a HVG. For dispersion-based flavors \nties are broken by normalized dispersion. If flavor = \u0027seurat_v3\u0027, ties are broken by the median (across\nbatches) rank based on within-batch normalized variance.\n" + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/filter/filter_with_scrublet/.config.vsh.yaml b/target/nextflow/filter/filter_with_scrublet/.config.vsh.yaml index 1d6bb387537..dc82bfada95 100644 --- a/target/nextflow/filter/filter_with_scrublet/.config.vsh.yaml +++ b/target/nextflow/filter/filter_with_scrublet/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "filter_with_scrublet" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -299,6 +299,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/filter_with_scrublet" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/filter_with_scrublet/filter_with_scrublet" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/filter/filter_with_scrublet/main.nf b/target/nextflow/filter/filter_with_scrublet/main.nf index 99f3b551e8d..7da8d52ef4e 100644 --- a/target/nextflow/filter/filter_with_scrublet/main.nf +++ b/target/nextflow/filter/filter_with_scrublet/main.nf @@ -1,4 +1,4 @@ -// filter_with_scrublet 0.12.3 +// filter_with_scrublet 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "filter_with_scrublet", "namespace" : "filter", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -389,9 +389,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/filter_with_scrublet", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/filter/filter_with_scrublet/nextflow.config b/target/nextflow/filter/filter_with_scrublet/nextflow.config index eaaa6adac90..9975a611561 100644 --- a/target/nextflow/filter/filter_with_scrublet/nextflow.config +++ b/target/nextflow/filter/filter_with_scrublet/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'filter_with_scrublet' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Doublet detection using the Scrublet method (Wolock, Lopez and Klein, 2019).\nThe method tests for potential doublets by using the expression profiles of\ncells to generate synthetic potential doubles which are tested against cells. \nThe method returns a "doublet score" on which it calls for potential doublets.\n\nFor the source code please visit https://github.com/AllonKleinLab/scrublet.\n\nFor 10x we expect the doublet rates to be:\n Multiplet Rate (%) - # of Cells Loaded - # of Cells Recovered\n ~0.4% ~800 ~500\n ~0.8% ~1,600 ~1,000\n ~1.6% ~3,200 ~2,000\n ~2.3% ~4,800 ~3,000\n ~3.1% ~6,400 ~4,000\n ~3.9% ~8,000 ~5,000\n ~4.6% ~9,600 ~6,000\n ~5.4% ~11,200 ~7,000\n ~6.1% ~12,800 ~8,000\n ~6.9% ~14,400 ~9,000\n ~7.6% ~16,000 ~10,000\n' author = 'Dries De Maeyer, Robrecht Cannoodt' } diff --git a/target/nextflow/filter/filter_with_scrublet/nextflow_schema.json b/target/nextflow/filter/filter_with_scrublet/nextflow_schema.json index ed54b2ce403..5350ded357f 100644 --- a/target/nextflow/filter/filter_with_scrublet/nextflow_schema.json +++ b/target/nextflow/filter/filter_with_scrublet/nextflow_schema.json @@ -1,136 +1,203 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "filter_with_scrublet", - "description": "Doublet detection using the Scrublet method (Wolock, Lopez and Klein, 2019).\nThe method tests for potential doublets by using the expression profiles of\ncells to generate synthetic potential doubles which are tested against cells. \nThe method returns a \"doublet score\" on which it calls for potential doublets.\n\nFor the source code please visit https://github.com/AllonKleinLab/scrublet.\n\nFor 10x we expect the doublet rates to be:\n Multiplet Rate (%) - # of Cells Loaded - # of Cells Recovered\n ~0.4% ~800 ~500\n ~0.8% ~1,600 ~1,000\n ~1.6% ~3,200 ~2,000\n ~2.3% ~4,800 ~3,000\n ~3.1% ~6,400 ~4,000\n ~3.9% ~8,000 ~5,000\n ~4.6% ~9,600 ~6,000\n ~5.4% ~11,200 ~7,000\n ~6.1% ~12,800 ~8,000\n ~6.9% ~14,400 ~9,000\n ~7.6% ~16,000 ~10,000\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "filter_with_scrublet", +"description": "Doublet detection using the Scrublet method (Wolock, Lopez and Klein, 2019).\nThe method tests for potential doublets by using the expression profiles of\ncells to generate synthetic potential doubles which are tested against cells. \nThe method returns a \"doublet score\" on which it calls for potential doublets.\n\nFor the source code please visit https://github.com/AllonKleinLab/scrublet.\n\nFor 10x we expect the doublet rates to be:\n Multiplet Rate (%) - # of Cells Loaded - # of Cells Recovered\n ~0.4% ~800 ~500\n ~0.8% ~1,600 ~1,000\n ~1.6% ~3,200 ~2,000\n ~2.3% ~4,800 ~3,000\n ~3.1% ~6,400 ~4,000\n ~3.9% ~8,000 ~5,000\n ~4.6% ~9,600 ~6,000\n ~5.4% ~11,200 ~7,000\n ~6.1% ~12,800 ~8,000\n ~6.9% ~14,400 ~9,000\n ~7.6% ~16,000 ~10,000\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "obs_name_filter": { - "type": "string", - "description": "Type: `string`, default: `filter_with_scrublet`. In which ", - "help_text": "Type: `string`, default: `filter_with_scrublet`. In which .obs slot to store a boolean array corresponding to which observations should be filtered out.", - "default": "filter_with_scrublet" - }, - - "do_subset": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output", - "help_text": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output.", - "default": "False" - }, - - "obs_name_doublet_score": { - "type": "string", - "description": "Type: `string`, default: `scrublet_doublet_score`. Name of the doublet scores column in the obs slot of the returned object", - "help_text": "Type: `string`, default: `scrublet_doublet_score`. Name of the doublet scores column in the obs slot of the returned object.", - "default": "scrublet_doublet_score" - }, - - "min_counts": { - "type": "integer", - "description": "Type: `integer`, default: `2`. The number of minimal UMI counts per cell that have to be present for initial cell detection", - "help_text": "Type: `integer`, default: `2`. The number of minimal UMI counts per cell that have to be present for initial cell detection.", - "default": "2" - }, - - "min_cells": { - "type": "integer", - "description": "Type: `integer`, default: `3`. The number of cells in which UMIs for a gene were detected", - "help_text": "Type: `integer`, default: `3`. The number of cells in which UMIs for a gene were detected.", - "default": "3" - }, - - "min_gene_variablity_percent": { - "type": "number", - "description": "Type: `double`, default: `85`. Used for gene filtering prior to PCA", - "help_text": "Type: `double`, default: `85`. Used for gene filtering prior to PCA. Keep the most highly variable genes (in the top min_gene_variability_pctl percentile), as measured by the v-statistic [Klein et al., Cell 2015].", - "default": "85" - }, - - "num_pca_components": { - "type": "integer", - "description": "Type: `integer`, default: `30`. Number of principal components to use during PCA dimensionality reduction", - "help_text": "Type: `integer`, default: `30`. Number of principal components to use during PCA dimensionality reduction.", - "default": "30" - }, - - "distance_metric": { - "type": "string", - "description": "Type: `string`, default: `euclidean`. The distance metric used for computing similarities", - "help_text": "Type: `string`, default: `euclidean`. The distance metric used for computing similarities.", - "default": "euclidean" - }, - - "allow_automatic_threshold_detection_fail": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. When scrublet fails to automatically determine the double score threshold, \nallow the component to continue and set the output columns to NA", - "help_text": "Type: `boolean_true`, default: `false`. When scrublet fails to automatically determine the double score threshold, \nallow the component to continue and set the output columns to NA.\n", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "obs_name_filter": { + "type": + "string", + "description": "Type: `string`, default: `filter_with_scrublet`. In which ", + "help_text": "Type: `string`, default: `filter_with_scrublet`. In which .obs slot to store a boolean array corresponding to which observations should be filtered out." + , + "default": "filter_with_scrublet" + } + + + , + "do_subset": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output", + "help_text": "Type: `boolean_true`, default: `false`. Whether to subset before storing the output." + , + "default": "False" + } + + + , + "obs_name_doublet_score": { + "type": + "string", + "description": "Type: `string`, default: `scrublet_doublet_score`. Name of the doublet scores column in the obs slot of the returned object", + "help_text": "Type: `string`, default: `scrublet_doublet_score`. Name of the doublet scores column in the obs slot of the returned object." + , + "default": "scrublet_doublet_score" + } + + + , + "min_counts": { + "type": + "integer", + "description": "Type: `integer`, default: `2`. The number of minimal UMI counts per cell that have to be present for initial cell detection", + "help_text": "Type: `integer`, default: `2`. The number of minimal UMI counts per cell that have to be present for initial cell detection." + , + "default": "2" + } + + + , + "min_cells": { + "type": + "integer", + "description": "Type: `integer`, default: `3`. The number of cells in which UMIs for a gene were detected", + "help_text": "Type: `integer`, default: `3`. The number of cells in which UMIs for a gene were detected." + , + "default": "3" + } + + + , + "min_gene_variablity_percent": { + "type": + "number", + "description": "Type: `double`, default: `85`. Used for gene filtering prior to PCA", + "help_text": "Type: `double`, default: `85`. Used for gene filtering prior to PCA. Keep the most highly variable genes (in the top min_gene_variability_pctl percentile), as measured by the v-statistic [Klein et al., Cell 2015]." + , + "default": "85" + } + + + , + "num_pca_components": { + "type": + "integer", + "description": "Type: `integer`, default: `30`. Number of principal components to use during PCA dimensionality reduction", + "help_text": "Type: `integer`, default: `30`. Number of principal components to use during PCA dimensionality reduction." + , + "default": "30" + } + + + , + "distance_metric": { + "type": + "string", + "description": "Type: `string`, default: `euclidean`. The distance metric used for computing similarities", + "help_text": "Type: `string`, default: `euclidean`. The distance metric used for computing similarities." + , + "default": "euclidean" + } + + + , + "allow_automatic_threshold_detection_fail": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. When scrublet fails to automatically determine the double score threshold, \nallow the component to continue and set the output columns to NA", + "help_text": "Type: `boolean_true`, default: `false`. When scrublet fails to automatically determine the double score threshold, \nallow the component to continue and set the output columns to NA.\n" + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/filter/remove_modality/.config.vsh.yaml b/target/nextflow/filter/remove_modality/.config.vsh.yaml index 98d5c843268..a8f05f5f5dd 100644 --- a/target/nextflow/filter/remove_modality/.config.vsh.yaml +++ b/target/nextflow/filter/remove_modality/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "remove_modality" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -166,6 +166,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/remove_modality" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/remove_modality/remove_modality" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/filter/remove_modality/main.nf b/target/nextflow/filter/remove_modality/main.nf index 1d98b535470..28a25387313 100644 --- a/target/nextflow/filter/remove_modality/main.nf +++ b/target/nextflow/filter/remove_modality/main.nf @@ -1,4 +1,4 @@ -// remove_modality 0.12.3 +// remove_modality 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "remove_modality", "namespace" : "filter", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -241,9 +241,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/remove_modality", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/filter/remove_modality/nextflow.config b/target/nextflow/filter/remove_modality/nextflow.config index 9b3ea4864fc..a01d820977d 100644 --- a/target/nextflow/filter/remove_modality/nextflow.config +++ b/target/nextflow/filter/remove_modality/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'remove_modality' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Remove a modality from a .h5mu file\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/filter/remove_modality/nextflow_schema.json b/target/nextflow/filter/remove_modality/nextflow_schema.json index e136f142cfe..1e597745256 100644 --- a/target/nextflow/filter/remove_modality/nextflow_schema.json +++ b/target/nextflow/filter/remove_modality/nextflow_schema.json @@ -1,72 +1,103 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "remove_modality", - "description": "Remove a modality from a .h5mu file\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "remove_modality", +"description": "Remove a modality from a .h5mu file\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: List of `string`, required, multiple_sep: `\":\"`. ", - "help_text": "Type: List of `string`, required, multiple_sep: `\":\"`. " - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: List of `string`, required, multiple_sep: `\":\"`. ", + "help_text": "Type: List of `string`, required, multiple_sep: `\":\"`. " + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/filter/subset_h5mu/.config.vsh.yaml b/target/nextflow/filter/subset_h5mu/.config.vsh.yaml index cdfe7d13abb..3e9366709b2 100644 --- a/target/nextflow/filter/subset_h5mu/.config.vsh.yaml +++ b/target/nextflow/filter/subset_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "subset_h5mu" namespace: "filter" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -182,6 +182,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/subset_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/subset_h5mu/subset_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/filter/subset_h5mu/main.nf b/target/nextflow/filter/subset_h5mu/main.nf index 0def3d04067..7f941e35fae 100644 --- a/target/nextflow/filter/subset_h5mu/main.nf +++ b/target/nextflow/filter/subset_h5mu/main.nf @@ -1,4 +1,4 @@ -// subset_h5mu 0.12.3 +// subset_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "subset_h5mu", "namespace" : "filter", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -262,9 +262,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/filter/subset_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/filter/subset_h5mu/nextflow.config b/target/nextflow/filter/subset_h5mu/nextflow.config index 5c5245bb5ad..2cd5ee0815c 100644 --- a/target/nextflow/filter/subset_h5mu/nextflow.config +++ b/target/nextflow/filter/subset_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'subset_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Create a subset of a mudata file by selecting the first number of observations\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/filter/subset_h5mu/nextflow_schema.json b/target/nextflow/filter/subset_h5mu/nextflow_schema.json index b75335fbeae..0a6d451c5a9 100644 --- a/target/nextflow/filter/subset_h5mu/nextflow_schema.json +++ b/target/nextflow/filter/subset_h5mu/nextflow_schema.json @@ -1,79 +1,114 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "subset_h5mu", - "description": "Create a subset of a mudata file by selecting the first number of observations\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "subset_h5mu", +"description": "Create a subset of a mudata file by selecting the first number of observations\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "number_of_observations": { - "type": "integer", - "description": "Type: `integer`, example: `5`. Number of observations to be selected from the h5mu file", - "help_text": "Type: `integer`, example: `5`. Number of observations to be selected from the h5mu file." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "number_of_observations": { + "type": + "integer", + "description": "Type: `integer`, example: `5`. Number of observations to be selected from the h5mu file", + "help_text": "Type: `integer`, example: `5`. Number of observations to be selected from the h5mu file." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/integrate/harmonypy/.config.vsh.yaml b/target/nextflow/integrate/harmonypy/.config.vsh.yaml index 14fe5ee1c33..0e5da0e1eed 100644 --- a/target/nextflow/integrate/harmonypy/.config.vsh.yaml +++ b/target/nextflow/integrate/harmonypy/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "harmonypy" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -235,6 +235,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/harmonypy" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/harmonypy/harmonypy" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/integrate/harmonypy/main.nf b/target/nextflow/integrate/harmonypy/main.nf index 8fd0689c681..740db64988b 100644 --- a/target/nextflow/integrate/harmonypy/main.nf +++ b/target/nextflow/integrate/harmonypy/main.nf @@ -1,4 +1,4 @@ -// harmonypy 0.12.3 +// harmonypy 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "harmonypy", "namespace" : "integrate", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -327,9 +327,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/harmonypy", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/integrate/harmonypy/nextflow.config b/target/nextflow/integrate/harmonypy/nextflow.config index 115cc75021f..5a039982997 100644 --- a/target/nextflow/integrate/harmonypy/nextflow.config +++ b/target/nextflow/integrate/harmonypy/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'harmonypy' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Performs Harmony integration based as described in https://github.com/immunogenomics/harmony. Based on an implementation in python from https://github.com/slowkow/harmonypy' author = 'Dries Schaumont, Robrecht Cannoodt' } diff --git a/target/nextflow/integrate/harmonypy/nextflow_schema.json b/target/nextflow/integrate/harmonypy/nextflow_schema.json index b46b9ac7451..2f7ae209630 100644 --- a/target/nextflow/integrate/harmonypy/nextflow_schema.json +++ b/target/nextflow/integrate/harmonypy/nextflow_schema.json @@ -1,100 +1,147 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "harmonypy", - "description": "Performs Harmony integration based as described in https://github.com/immunogenomics/harmony. Based on an implementation in python from https://github.com/slowkow/harmonypy", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "harmonypy", +"description": "Performs Harmony integration based as described in https://github.com/immunogenomics/harmony. Based on an implementation in python from https://github.com/slowkow/harmonypy", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Input h5mu file", - "help_text": "Type: `file`, required. Input h5mu file" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file.", - "default": "$id.$key.output.output" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Input h5mu file", + "help_text": "Type: `file`, required. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "obsm_input": { - "type": "string", - "description": "Type: `string`, default: `X_pca`. Which ", - "help_text": "Type: `string`, default: `X_pca`. Which .obsm slot to use as a starting PCA embedding.", - "default": "X_pca" - }, - - "obsm_output": { - "type": "string", - "description": "Type: `string`, default: `X_pca_integrated`. In which ", - "help_text": "Type: `string`, default: `X_pca_integrated`. In which .obsm slot to store the resulting integrated embedding.", - "default": "X_pca_integrated" - }, - - "theta": { - "type": "string", - "description": "Type: List of `double`, default: `2`, multiple_sep: `\":\"`. Diversity clustering penalty parameter", - "help_text": "Type: List of `double`, default: `2`, multiple_sep: `\":\"`. Diversity clustering penalty parameter. Specify for each variable in group.by.vars. theta=0 does not encourage any diversity. Larger values of theta result in more diverse clusters.", - "default": "2" - }, - - "obs_covariates": { - "type": "string", - "description": "Type: List of `string`, required, example: `batch:sample`, multiple_sep: `\":\"`. The ", - "help_text": "Type: List of `string`, required, example: `batch:sample`, multiple_sep: `\":\"`. The .obs field(s) that define the covariate(s) to regress out." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file." + , + "default": "$id.$key.output.output" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "obsm_input": { + "type": + "string", + "description": "Type: `string`, default: `X_pca`. Which ", + "help_text": "Type: `string`, default: `X_pca`. Which .obsm slot to use as a starting PCA embedding." + , + "default": "X_pca" + } + + + , + "obsm_output": { + "type": + "string", + "description": "Type: `string`, default: `X_pca_integrated`. In which ", + "help_text": "Type: `string`, default: `X_pca_integrated`. In which .obsm slot to store the resulting integrated embedding." + , + "default": "X_pca_integrated" + } + + + , + "theta": { + "type": + "string", + "description": "Type: List of `double`, default: `2`, multiple_sep: `\":\"`. Diversity clustering penalty parameter", + "help_text": "Type: List of `double`, default: `2`, multiple_sep: `\":\"`. Diversity clustering penalty parameter. Specify for each variable in group.by.vars. theta=0 does not encourage any diversity. Larger values of theta result in more diverse clusters." + , + "default": "2" + } + + + , + "obs_covariates": { + "type": + "string", + "description": "Type: List of `string`, required, example: `batch:sample`, multiple_sep: `\":\"`. The ", + "help_text": "Type: List of `string`, required, example: `batch:sample`, multiple_sep: `\":\"`. The .obs field(s) that define the covariate(s) to regress out." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/integrate/scanorama/.config.vsh.yaml b/target/nextflow/integrate/scanorama/.config.vsh.yaml index 556f08c99b6..e94c8e6fd9f 100644 --- a/target/nextflow/integrate/scanorama/.config.vsh.yaml +++ b/target/nextflow/integrate/scanorama/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scanorama" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -278,6 +278,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/scanorama" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/scanorama/scanorama" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/integrate/scanorama/main.nf b/target/nextflow/integrate/scanorama/main.nf index ba8863ed99a..0c851d8a821 100644 --- a/target/nextflow/integrate/scanorama/main.nf +++ b/target/nextflow/integrate/scanorama/main.nf @@ -1,4 +1,4 @@ -// scanorama 0.12.3 +// scanorama 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "scanorama", "namespace" : "integrate", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -376,9 +376,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/scanorama", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/integrate/scanorama/nextflow.config b/target/nextflow/integrate/scanorama/nextflow.config index 543c17e00db..a6d9c564e3d 100644 --- a/target/nextflow/integrate/scanorama/nextflow.config +++ b/target/nextflow/integrate/scanorama/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'scanorama' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Use Scanorama to integrate different experiments.\n' author = 'Dries De Maeyer, Dries Schaumont' } diff --git a/target/nextflow/integrate/scanorama/nextflow_schema.json b/target/nextflow/integrate/scanorama/nextflow_schema.json index 9ff3afff470..b04de18bec9 100644 --- a/target/nextflow/integrate/scanorama/nextflow_schema.json +++ b/target/nextflow/integrate/scanorama/nextflow_schema.json @@ -1,129 +1,192 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "scanorama", - "description": "Use Scanorama to integrate different experiments.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "scanorama", +"description": "Use Scanorama to integrate different experiments.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Input h5mu file", - "help_text": "Type: `file`, required. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5ad`. Output ", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5ad`. Output .h5mu file", - "default": "$id.$key.output.h5ad" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Input h5mu file", + "help_text": "Type: `file`, required. Input h5mu file" - }, - - "obs_batch": { - "type": "string", - "description": "Type: `string`, default: `batch`. Column name discriminating between your batches", - "help_text": "Type: `string`, default: `batch`. Column name discriminating between your batches.", - "default": "batch" - }, - - "obsm_input": { - "type": "string", - "description": "Type: `string`, default: `X_pca`. Basis obsm slot to run scanorama on", - "help_text": "Type: `string`, default: `X_pca`. Basis obsm slot to run scanorama on.", - "default": "X_pca" - }, - - "obsm_output": { - "type": "string", - "description": "Type: `string`, default: `X_scanorama`. The name of the field in adata", - "help_text": "Type: `string`, default: `X_scanorama`. The name of the field in adata.obsm where the integrated embeddings will be stored after running this function. Defaults to X_scanorama.", - "default": "X_scanorama" - }, - - "knn": { - "type": "integer", - "description": "Type: `integer`, default: `20`. Number of nearest neighbors to use for matching", - "help_text": "Type: `integer`, default: `20`. Number of nearest neighbors to use for matching.", - "default": "20" - }, - - "batch_size": { - "type": "integer", - "description": "Type: `integer`, default: `5000`. The batch size used in the alignment vector computation", - "help_text": "Type: `integer`, default: `5000`. The batch size used in the alignment vector computation. Useful when integrating very large (\u003e100k samples) datasets. Set to large value that runs within available memory.", - "default": "5000" - }, - - "sigma": { - "type": "number", - "description": "Type: `double`, default: `15`. Correction smoothing parameter on Gaussian kernel", - "help_text": "Type: `double`, default: `15`. Correction smoothing parameter on Gaussian kernel.", - "default": "15" - }, - - "approx": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Use approximate nearest neighbors with Python annoy; greatly speeds up matching runtime", - "help_text": "Type: `boolean`, default: `true`. Use approximate nearest neighbors with Python annoy; greatly speeds up matching runtime.", - "default": "True" - }, - - "alpha": { - "type": "number", - "description": "Type: `double`, default: `0.1`. Alignment score minimum cutoff", - "help_text": "Type: `double`, default: `0.1`. Alignment score minimum cutoff", - "default": "0.1" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5ad`. Output ", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5ad`. Output .h5mu file" + , + "default": "$id.$key.output.h5ad" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "obs_batch": { + "type": + "string", + "description": "Type: `string`, default: `batch`. Column name discriminating between your batches", + "help_text": "Type: `string`, default: `batch`. Column name discriminating between your batches." + , + "default": "batch" + } + + + , + "obsm_input": { + "type": + "string", + "description": "Type: `string`, default: `X_pca`. Basis obsm slot to run scanorama on", + "help_text": "Type: `string`, default: `X_pca`. Basis obsm slot to run scanorama on." + , + "default": "X_pca" + } + + + , + "obsm_output": { + "type": + "string", + "description": "Type: `string`, default: `X_scanorama`. The name of the field in adata", + "help_text": "Type: `string`, default: `X_scanorama`. The name of the field in adata.obsm where the integrated embeddings will be stored after running this function. Defaults to X_scanorama." + , + "default": "X_scanorama" + } + + + , + "knn": { + "type": + "integer", + "description": "Type: `integer`, default: `20`. Number of nearest neighbors to use for matching", + "help_text": "Type: `integer`, default: `20`. Number of nearest neighbors to use for matching." + , + "default": "20" + } + + + , + "batch_size": { + "type": + "integer", + "description": "Type: `integer`, default: `5000`. The batch size used in the alignment vector computation", + "help_text": "Type: `integer`, default: `5000`. The batch size used in the alignment vector computation. Useful when integrating very large (\u003e100k samples) datasets. Set to large value that runs within available memory." + , + "default": "5000" + } + + + , + "sigma": { + "type": + "number", + "description": "Type: `double`, default: `15`. Correction smoothing parameter on Gaussian kernel", + "help_text": "Type: `double`, default: `15`. Correction smoothing parameter on Gaussian kernel." + , + "default": "15" + } + + + , + "approx": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Use approximate nearest neighbors with Python annoy; greatly speeds up matching runtime", + "help_text": "Type: `boolean`, default: `true`. Use approximate nearest neighbors with Python annoy; greatly speeds up matching runtime." + , + "default": "True" + } + + + , + "alpha": { + "type": + "number", + "description": "Type: `double`, default: `0.1`. Alignment score minimum cutoff", + "help_text": "Type: `double`, default: `0.1`. Alignment score minimum cutoff" + , + "default": "0.1" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/integrate/scarches/.config.vsh.yaml b/target/nextflow/integrate/scarches/.config.vsh.yaml index dd77332a214..e4053a904aa 100644 --- a/target/nextflow/integrate/scarches/.config.vsh.yaml +++ b/target/nextflow/integrate/scarches/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scarches" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" info: @@ -326,6 +326,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/scarches" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/scarches/scarches" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/integrate/scarches/main.nf b/target/nextflow/integrate/scarches/main.nf index 4b928080a66..6d31cb50e1a 100644 --- a/target/nextflow/integrate/scarches/main.nf +++ b/target/nextflow/integrate/scarches/main.nf @@ -1,4 +1,4 @@ -// scarches 0.12.3 +// scarches 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "scarches", "namespace" : "integrate", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Vladimir Shitov", @@ -433,9 +433,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/scarches", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/integrate/scarches/nextflow.config b/target/nextflow/integrate/scarches/nextflow.config index 9de392f5763..db9b4ed3ea7 100644 --- a/target/nextflow/integrate/scarches/nextflow.config +++ b/target/nextflow/integrate/scarches/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'scarches' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Performs reference mapping with scArches' author = 'Vladimir Shitov' } diff --git a/target/nextflow/integrate/scarches/nextflow_schema.json b/target/nextflow/integrate/scarches/nextflow_schema.json index 6d7ad3e97e4..d873eb1f878 100644 --- a/target/nextflow/integrate/scarches/nextflow_schema.json +++ b/target/nextflow/integrate/scarches/nextflow_schema.json @@ -1,189 +1,277 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "scarches", - "description": "Performs reference mapping with scArches", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "scarches", +"description": "Performs reference mapping with scArches", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Input h5mu file to use as a query", - "help_text": "Type: `file`, required. Input h5mu file to use as a query" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "reference": { - "type": "string", - "description": "Type: `file`, required. Path to the directory with reference model or a web link", - "help_text": "Type: `file`, required. Path to the directory with reference model or a web link. For HLCA use https://zenodo.org/record/6337966/files/HLCA_reference_model.zip" - }, - - "dataset_name": { - "type": "string", - "description": "Type: `string`, default: `test_dataset`. Name of query dataset to use as a batch name", - "help_text": "Type: `string`, default: `test_dataset`. Name of query dataset to use as a batch name. If not set, name of the input file is used", - "default": "test_dataset" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file.", - "default": "$id.$key.output.output" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Input h5mu file to use as a query", + "help_text": "Type: `file`, required. Input h5mu file to use as a query" - }, - - "model_output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.model_output.model_output`. Output directory for model", - "help_text": "Type: `file`, default: `$id.$key.model_output.model_output`. Output directory for model", - "default": "$id.$key.model_output.model_output" - }, - - "obsm_output": { - "type": "string", - "description": "Type: `string`, default: `X_integrated_scanvi`. In which ", - "help_text": "Type: `string`, default: `X_integrated_scanvi`. In which .obsm slot to store the resulting integrated embedding.", - "default": "X_integrated_scanvi" - } - - } - }, - "early stopping arguments" : { - "title": "Early stopping arguments", - "type": "object", - "description": "No description", - "properties": { - - "early_stopping": { - "type": "boolean", - "description": "Type: `boolean`. Whether to perform early stopping with respect to the validation set", - "help_text": "Type: `boolean`. Whether to perform early stopping with respect to the validation set." - }, - - "early_stopping_monitor": { - "type": "string", - "description": "Type: `string`, default: `elbo_validation`, choices: ``elbo_validation`, `reconstruction_loss_validation`, `kl_local_validation``. Metric logged during validation set epoch", - "help_text": "Type: `string`, default: `elbo_validation`, choices: ``elbo_validation`, `reconstruction_loss_validation`, `kl_local_validation``. Metric logged during validation set epoch.", - "enum": ["elbo_validation", "reconstruction_loss_validation", "kl_local_validation"] + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " , - "default": "elbo_validation" - }, - - "early_stopping_patience": { - "type": "integer", - "description": "Type: `integer`, default: `45`. Number of validation epochs with no improvement after which training will be stopped", - "help_text": "Type: `integer`, default: `45`. Number of validation epochs with no improvement after which training will be stopped.", - "default": "45" - }, - - "early_stopping_min_delta": { - "type": "number", - "description": "Type: `double`, default: `0.0`. Minimum change in the monitored quantity to qualify as an improvement, i", - "help_text": "Type: `double`, default: `0.0`. Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement.", - "default": "0.0" - } - - } - }, - "learning parameters" : { - "title": "Learning parameters", - "type": "object", - "description": "No description", - "properties": { - - "max_epochs": { - "type": "integer", - "description": "Type: `integer`, required. Number of passes through the dataset, defaults to (20000 / number of cells) * 400 or 400; whichever is smallest", - "help_text": "Type: `integer`, required. Number of passes through the dataset, defaults to (20000 / number of cells) * 400 or 400; whichever is smallest." - }, - - "reduce_lr_on_plateau": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Whether to monitor validation loss and reduce learning rate when validation set `lr_scheduler_metric` plateaus", - "help_text": "Type: `boolean`, default: `true`. Whether to monitor validation loss and reduce learning rate when validation set `lr_scheduler_metric` plateaus.", - "default": "True" - }, - - "lr_factor": { - "type": "number", - "description": "Type: `double`, default: `0.6`. Factor to reduce learning rate", - "help_text": "Type: `double`, default: `0.6`. Factor to reduce learning rate.", - "default": "0.6" - }, - - "lr_patience": { - "type": "number", - "description": "Type: `double`, default: `30`. Number of epochs with no improvement after which learning rate will be reduced", - "help_text": "Type: `double`, default: `30`. Number of epochs with no improvement after which learning rate will be reduced.", - "default": "30" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "rna" + } + + + , + "reference": { + "type": + "string", + "description": "Type: `file`, required. Path to the directory with reference model or a web link", + "help_text": "Type: `file`, required. Path to the directory with reference model or a web link. For HLCA use https://zenodo.org/record/6337966/files/HLCA_reference_model.zip" + + } + + + , + "dataset_name": { + "type": + "string", + "description": "Type: `string`, default: `test_dataset`. Name of query dataset to use as a batch name", + "help_text": "Type: `string`, default: `test_dataset`. Name of query dataset to use as a batch name. If not set, name of the input file is used" + , + "default": "test_dataset" + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file." + , + "default": "$id.$key.output.output" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "model_output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.model_output.model_output`. Output directory for model", + "help_text": "Type: `file`, default: `$id.$key.model_output.model_output`. Output directory for model" + , + "default": "$id.$key.model_output.model_output" + } + + + , + "obsm_output": { + "type": + "string", + "description": "Type: `string`, default: `X_integrated_scanvi`. In which ", + "help_text": "Type: `string`, default: `X_integrated_scanvi`. In which .obsm slot to store the resulting integrated embedding." + , + "default": "X_integrated_scanvi" + } + + +} +}, + + + "early stopping arguments" : { + "title": "Early stopping arguments", + "type": "object", + "description": "No description", + "properties": { + + + "early_stopping": { + "type": + "boolean", + "description": "Type: `boolean`. Whether to perform early stopping with respect to the validation set", + "help_text": "Type: `boolean`. Whether to perform early stopping with respect to the validation set." + + } + + + , + "early_stopping_monitor": { + "type": + "string", + "description": "Type: `string`, default: `elbo_validation`, choices: ``elbo_validation`, `reconstruction_loss_validation`, `kl_local_validation``. Metric logged during validation set epoch", + "help_text": "Type: `string`, default: `elbo_validation`, choices: ``elbo_validation`, `reconstruction_loss_validation`, `kl_local_validation``. Metric logged during validation set epoch.", + "enum": ["elbo_validation", "reconstruction_loss_validation", "kl_local_validation"] + + , + "default": "elbo_validation" + } + + + , + "early_stopping_patience": { + "type": + "integer", + "description": "Type: `integer`, default: `45`. Number of validation epochs with no improvement after which training will be stopped", + "help_text": "Type: `integer`, default: `45`. Number of validation epochs with no improvement after which training will be stopped." + , + "default": "45" + } + + + , + "early_stopping_min_delta": { + "type": + "number", + "description": "Type: `double`, default: `0.0`. Minimum change in the monitored quantity to qualify as an improvement, i", + "help_text": "Type: `double`, default: `0.0`. Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement." + , + "default": "0.0" + } + + +} +}, + + + "learning parameters" : { + "title": "Learning parameters", + "type": "object", + "description": "No description", + "properties": { + + + "max_epochs": { + "type": + "integer", + "description": "Type: `integer`, required. Number of passes through the dataset, defaults to (20000 / number of cells) * 400 or 400; whichever is smallest", + "help_text": "Type: `integer`, required. Number of passes through the dataset, defaults to (20000 / number of cells) * 400 or 400; whichever is smallest." + + } + + + , + "reduce_lr_on_plateau": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Whether to monitor validation loss and reduce learning rate when validation set `lr_scheduler_metric` plateaus", + "help_text": "Type: `boolean`, default: `true`. Whether to monitor validation loss and reduce learning rate when validation set `lr_scheduler_metric` plateaus." + , + "default": "True" + } + + + , + "lr_factor": { + "type": + "number", + "description": "Type: `double`, default: `0.6`. Factor to reduce learning rate", + "help_text": "Type: `double`, default: `0.6`. Factor to reduce learning rate." + , + "default": "0.6" + } + + + , + "lr_patience": { + "type": + "number", + "description": "Type: `double`, default: `30`. Number of epochs with no improvement after which learning rate will be reduced", + "help_text": "Type: `double`, default: `30`. Number of epochs with no improvement after which learning rate will be reduced." + , + "default": "30" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/early stopping arguments" + }, + + { + "$ref": "#/definitions/learning parameters" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/early stopping arguments" - }, - { - "$ref": "#/definitions/learning parameters" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/integrate/scvi/.config.vsh.yaml b/target/nextflow/integrate/scvi/.config.vsh.yaml index b0336fa962b..ed2ae349e77 100644 --- a/target/nextflow/integrate/scvi/.config.vsh.yaml +++ b/target/nextflow/integrate/scvi/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scvi" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Malte D. Luecken" roles: @@ -586,6 +586,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/scvi" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/scvi/scvi" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/integrate/scvi/main.nf b/target/nextflow/integrate/scvi/main.nf index be868125220..9393402ffe5 100644 --- a/target/nextflow/integrate/scvi/main.nf +++ b/target/nextflow/integrate/scvi/main.nf @@ -1,4 +1,4 @@ -// scvi 0.12.3 +// scvi 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -29,7 +29,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "scvi", "namespace" : "integrate", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Malte D. Luecken", @@ -738,9 +738,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/scvi", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/integrate/scvi/nextflow.config b/target/nextflow/integrate/scvi/nextflow.config index d71811418ee..d1499cd34c3 100644 --- a/target/nextflow/integrate/scvi/nextflow.config +++ b/target/nextflow/integrate/scvi/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'scvi' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Performs scvi integration as done in the human lung cell atlas https://github.com/LungCellAtlas/HLCA' author = 'Malte D. Luecken, Dries Schaumont, Matthias Beyens' } diff --git a/target/nextflow/integrate/scvi/nextflow_schema.json b/target/nextflow/integrate/scvi/nextflow_schema.json index bf382a44e1f..c360d430245 100644 --- a/target/nextflow/integrate/scvi/nextflow_schema.json +++ b/target/nextflow/integrate/scvi/nextflow_schema.json @@ -1,351 +1,520 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "scvi", - "description": "Performs scvi integration as done in the human lung cell atlas https://github.com/LungCellAtlas/HLCA", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "scvi", +"description": "Performs scvi integration as done in the human lung cell atlas https://github.com/LungCellAtlas/HLCA", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Input h5mu file", - "help_text": "Type: `file`, required. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "input_layer": { - "type": "string", - "description": "Type: `string`. Input layer to use", - "help_text": "Type: `string`. Input layer to use. If None, X is used" - }, - - "obs_batch": { - "type": "string", - "description": "Type: `string`, default: `sample_id`. Column name discriminating between your batches", - "help_text": "Type: `string`, default: `sample_id`. Column name discriminating between your batches.", - "default": "sample_id" - }, - - "var_input": { - "type": "string", - "description": "Type: `string`. ", - "help_text": "Type: `string`. .var column containing highly variable genes. By default, do not subset genes." - }, - - "obs_labels": { - "type": "string", - "description": "Type: `string`. Key in adata", - "help_text": "Type: `string`. Key in adata.obs for label information. Categories will automatically be \nconverted into integer categories and saved to adata.obs[\u0027_scvi_labels\u0027].\nIf None, assigns the same label to all the data.\n" - }, - - "obs_size_factor": { - "type": "string", - "description": "Type: `string`. Key in adata", - "help_text": "Type: `string`. Key in adata.obs for size factor information. Instead of using library size as a size factor,\nthe provided size factor column will be used as offset in the mean of the likelihood.\nAssumed to be on linear scale.\n" - }, - - "obs_categorical_covariate": { - "type": "string", - "description": "Type: List of `string`, multiple_sep: `\":\"`. Keys in adata", - "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Keys in adata.obs that correspond to categorical data. These covariates can be added in\naddition to the batch covariate and are also treated as nuisance factors\n(i.e., the model tries to minimize their effects on the latent space).\nThus, these should not be used for biologically-relevant factors that you do _not_ want to correct for.\n" - }, - - "obs_continuous_covariate": { - "type": "string", - "description": "Type: List of `string`, multiple_sep: `\":\"`. Keys in adata", - "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Keys in adata.obs that correspond to continuous data. These covariates can be added in\naddition to the batch covariate and are also treated as nuisance factors\n(i.e., the model tries to minimize their effects on the latent space). Thus, these should not be\nused for biologically-relevant factors that you do _not_ want to correct for.\n" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file.", - "default": "$id.$key.output.output" - }, - - "output_model": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output_model.output_model`. Folder where the state of the trained model will be saved to", - "help_text": "Type: `file`, default: `$id.$key.output_model.output_model`. Folder where the state of the trained model will be saved to.", - "default": "$id.$key.output_model.output_model" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Input h5mu file", + "help_text": "Type: `file`, required. Input h5mu file" - }, - - "obsm_output": { - "type": "string", - "description": "Type: `string`, default: `X_scvi_integrated`. In which ", - "help_text": "Type: `string`, default: `X_scvi_integrated`. In which .obsm slot to store the resulting integrated embedding.", - "default": "X_scvi_integrated" - } - - } - }, - "scvi options" : { - "title": "SCVI options", - "type": "object", - "description": "No description", - "properties": { - - "n_hidden_nodes": { - "type": "integer", - "description": "Type: `integer`, default: `128`. Number of nodes per hidden layer", - "help_text": "Type: `integer`, default: `128`. Number of nodes per hidden layer.", - "default": "128" - }, - - "n_dimensions_latent_space": { - "type": "integer", - "description": "Type: `integer`, default: `30`. Dimensionality of the latent space", - "help_text": "Type: `integer`, default: `30`. Dimensionality of the latent space.", - "default": "30" - }, - - "n_hidden_layers": { - "type": "integer", - "description": "Type: `integer`, default: `2`. Number of hidden layers used for encoder and decoder neural-networks", - "help_text": "Type: `integer`, default: `2`. Number of hidden layers used for encoder and decoder neural-networks.", - "default": "2" - }, - - "dropout_rate": { - "type": "number", - "description": "Type: `double`, default: `0.1`. Dropout rate for the neural networks", - "help_text": "Type: `double`, default: `0.1`. Dropout rate for the neural networks.", - "default": "0.1" - }, - - "dispersion": { - "type": "string", - "description": "Type: `string`, default: `gene`, choices: ``gene`, `gene-batch`, `gene-label`, `gene-cell``. Set the behavior for the dispersion for negative binomial distributions:\n- gene: dispersion parameter of negative binomial is constant per gene across cells\n- gene-batch: dispersion can differ between different batches\n- gene-label: dispersion can differ between different labels\n- gene-cell: dispersion can differ for every gene in every cell\n", - "help_text": "Type: `string`, default: `gene`, choices: ``gene`, `gene-batch`, `gene-label`, `gene-cell``. Set the behavior for the dispersion for negative binomial distributions:\n- gene: dispersion parameter of negative binomial is constant per gene across cells\n- gene-batch: dispersion can differ between different batches\n- gene-label: dispersion can differ between different labels\n- gene-cell: dispersion can differ for every gene in every cell\n", - "enum": ["gene", "gene-batch", "gene-label", "gene-cell"] + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " , - "default": "gene" - }, - - "gene_likelihood": { - "type": "string", - "description": "Type: `string`, default: `nb`, choices: ``nb`, `zinb`, `poisson``. Model used to generate the expression data from a count-based likelihood distribution", - "help_text": "Type: `string`, default: `nb`, choices: ``nb`, `zinb`, `poisson``. Model used to generate the expression data from a count-based likelihood distribution.\n- nb: Negative binomial distribution\n- zinb: Zero-inflated negative binomial distribution\n- poisson: Poisson distribution\n", - "enum": ["nb", "zinb", "poisson"] + "default": "rna" + } + + + , + "input_layer": { + "type": + "string", + "description": "Type: `string`. Input layer to use", + "help_text": "Type: `string`. Input layer to use. If None, X is used" + + } + + + , + "obs_batch": { + "type": + "string", + "description": "Type: `string`, default: `sample_id`. Column name discriminating between your batches", + "help_text": "Type: `string`, default: `sample_id`. Column name discriminating between your batches." + , + "default": "sample_id" + } + + + , + "var_input": { + "type": + "string", + "description": "Type: `string`. ", + "help_text": "Type: `string`. .var column containing highly variable genes. By default, do not subset genes." + + } + + + , + "obs_labels": { + "type": + "string", + "description": "Type: `string`. Key in adata", + "help_text": "Type: `string`. Key in adata.obs for label information. Categories will automatically be \nconverted into integer categories and saved to adata.obs[\u0027_scvi_labels\u0027].\nIf None, assigns the same label to all the data.\n" + + } + + + , + "obs_size_factor": { + "type": + "string", + "description": "Type: `string`. Key in adata", + "help_text": "Type: `string`. Key in adata.obs for size factor information. Instead of using library size as a size factor,\nthe provided size factor column will be used as offset in the mean of the likelihood.\nAssumed to be on linear scale.\n" + + } + + + , + "obs_categorical_covariate": { + "type": + "string", + "description": "Type: List of `string`, multiple_sep: `\":\"`. Keys in adata", + "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Keys in adata.obs that correspond to categorical data. These covariates can be added in\naddition to the batch covariate and are also treated as nuisance factors\n(i.e., the model tries to minimize their effects on the latent space).\nThus, these should not be used for biologically-relevant factors that you do _not_ want to correct for.\n" + + } + + + , + "obs_continuous_covariate": { + "type": + "string", + "description": "Type: List of `string`, multiple_sep: `\":\"`. Keys in adata", + "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Keys in adata.obs that correspond to continuous data. These covariates can be added in\naddition to the batch covariate and are also treated as nuisance factors\n(i.e., the model tries to minimize their effects on the latent space). Thus, these should not be\nused for biologically-relevant factors that you do _not_ want to correct for.\n" + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file." + , + "default": "$id.$key.output.output" + } + + + , + "output_model": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output_model.output_model`. Folder where the state of the trained model will be saved to", + "help_text": "Type: `file`, default: `$id.$key.output_model.output_model`. Folder where the state of the trained model will be saved to." + , + "default": "$id.$key.output_model.output_model" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "obsm_output": { + "type": + "string", + "description": "Type: `string`, default: `X_scvi_integrated`. In which ", + "help_text": "Type: `string`, default: `X_scvi_integrated`. In which .obsm slot to store the resulting integrated embedding." + , + "default": "X_scvi_integrated" + } + + +} +}, + + + "scvi options" : { + "title": "SCVI options", + "type": "object", + "description": "No description", + "properties": { + + + "n_hidden_nodes": { + "type": + "integer", + "description": "Type: `integer`, default: `128`. Number of nodes per hidden layer", + "help_text": "Type: `integer`, default: `128`. Number of nodes per hidden layer." + , + "default": "128" + } + + + , + "n_dimensions_latent_space": { + "type": + "integer", + "description": "Type: `integer`, default: `30`. Dimensionality of the latent space", + "help_text": "Type: `integer`, default: `30`. Dimensionality of the latent space." + , + "default": "30" + } + + + , + "n_hidden_layers": { + "type": + "integer", + "description": "Type: `integer`, default: `2`. Number of hidden layers used for encoder and decoder neural-networks", + "help_text": "Type: `integer`, default: `2`. Number of hidden layers used for encoder and decoder neural-networks." + , + "default": "2" + } + + + , + "dropout_rate": { + "type": + "number", + "description": "Type: `double`, default: `0.1`. Dropout rate for the neural networks", + "help_text": "Type: `double`, default: `0.1`. Dropout rate for the neural networks." + , + "default": "0.1" + } + + + , + "dispersion": { + "type": + "string", + "description": "Type: `string`, default: `gene`, choices: ``gene`, `gene-batch`, `gene-label`, `gene-cell``. Set the behavior for the dispersion for negative binomial distributions:\n- gene: dispersion parameter of negative binomial is constant per gene across cells\n- gene-batch: dispersion can differ between different batches\n- gene-label: dispersion can differ between different labels\n- gene-cell: dispersion can differ for every gene in every cell\n", + "help_text": "Type: `string`, default: `gene`, choices: ``gene`, `gene-batch`, `gene-label`, `gene-cell``. Set the behavior for the dispersion for negative binomial distributions:\n- gene: dispersion parameter of negative binomial is constant per gene across cells\n- gene-batch: dispersion can differ between different batches\n- gene-label: dispersion can differ between different labels\n- gene-cell: dispersion can differ for every gene in every cell\n", + "enum": ["gene", "gene-batch", "gene-label", "gene-cell"] + + , + "default": "gene" + } + + + , + "gene_likelihood": { + "type": + "string", + "description": "Type: `string`, default: `nb`, choices: ``nb`, `zinb`, `poisson``. Model used to generate the expression data from a count-based likelihood distribution", + "help_text": "Type: `string`, default: `nb`, choices: ``nb`, `zinb`, `poisson``. Model used to generate the expression data from a count-based likelihood distribution.\n- nb: Negative binomial distribution\n- zinb: Zero-inflated negative binomial distribution\n- poisson: Poisson distribution\n", + "enum": ["nb", "zinb", "poisson"] + + , + "default": "nb" + } + + +} +}, + + + "variational auto-encoder model options" : { + "title": "Variational auto-encoder model options", + "type": "object", + "description": "No description", + "properties": { + + + "use_layer_normalization": { + "type": + "string", + "description": "Type: `string`, default: `both`, choices: ``encoder`, `decoder`, `none`, `both``. Neural networks for which to enable layer normalization", + "help_text": "Type: `string`, default: `both`, choices: ``encoder`, `decoder`, `none`, `both``. Neural networks for which to enable layer normalization. \n", + "enum": ["encoder", "decoder", "none", "both"] + + , + "default": "both" + } + + + , + "use_batch_normalization": { + "type": + "string", + "description": "Type: `string`, default: `none`, choices: ``encoder`, `decoder`, `none`, `both``. Neural networks for which to enable batch normalization", + "help_text": "Type: `string`, default: `none`, choices: ``encoder`, `decoder`, `none`, `both``. Neural networks for which to enable batch normalization. \n", + "enum": ["encoder", "decoder", "none", "both"] + , - "default": "nb" - } - - } - }, - "variational auto-encoder model options" : { - "title": "Variational auto-encoder model options", - "type": "object", - "description": "No description", - "properties": { - - "use_layer_normalization": { - "type": "string", - "description": "Type: `string`, default: `both`, choices: ``encoder`, `decoder`, `none`, `both``. Neural networks for which to enable layer normalization", - "help_text": "Type: `string`, default: `both`, choices: ``encoder`, `decoder`, `none`, `both``. Neural networks for which to enable layer normalization. \n", - "enum": ["encoder", "decoder", "none", "both"] + "default": "none" + } + + + , + "encode_covariates": { + "type": + "boolean", + "description": "Type: `boolean_false`, default: `true`. Whether to concatenate covariates to expression in encoder", + "help_text": "Type: `boolean_false`, default: `true`. Whether to concatenate covariates to expression in encoder" , - "default": "both" - }, - - "use_batch_normalization": { - "type": "string", - "description": "Type: `string`, default: `none`, choices: ``encoder`, `decoder`, `none`, `both``. Neural networks for which to enable batch normalization", - "help_text": "Type: `string`, default: `none`, choices: ``encoder`, `decoder`, `none`, `both``. Neural networks for which to enable batch normalization. \n", - "enum": ["encoder", "decoder", "none", "both"] + "default": "True" + } + + + , + "deeply_inject_covariates": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Whether to concatenate covariates into output of hidden layers in encoder/decoder", + "help_text": "Type: `boolean_true`, default: `false`. Whether to concatenate covariates into output of hidden layers in encoder/decoder. \nThis option only applies when n_layers \u003e 1. The covariates are concatenated to\nthe input of subsequent hidden layers.\n" , - "default": "none" - }, - - "encode_covariates": { - "type": "boolean", - "description": "Type: `boolean_false`, default: `true`. Whether to concatenate covariates to expression in encoder", - "help_text": "Type: `boolean_false`, default: `true`. Whether to concatenate covariates to expression in encoder", - "default": "True" - }, - - "deeply_inject_covariates": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Whether to concatenate covariates into output of hidden layers in encoder/decoder", - "help_text": "Type: `boolean_true`, default: `false`. Whether to concatenate covariates into output of hidden layers in encoder/decoder. \nThis option only applies when n_layers \u003e 1. The covariates are concatenated to\nthe input of subsequent hidden layers.\n", - "default": "False" - }, - - "use_observed_lib_size": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Use observed library size for RNA as scaling factor in mean of conditional distribution", - "help_text": "Type: `boolean_true`, default: `false`. Use observed library size for RNA as scaling factor in mean of conditional distribution.\n", - "default": "False" - } - - } - }, - "early stopping arguments" : { - "title": "Early stopping arguments", - "type": "object", - "description": "No description", - "properties": { - - "early_stopping": { - "type": "boolean", - "description": "Type: `boolean`. Whether to perform early stopping with respect to the validation set", - "help_text": "Type: `boolean`. Whether to perform early stopping with respect to the validation set." - }, - - "early_stopping_monitor": { - "type": "string", - "description": "Type: `string`, default: `elbo_validation`, choices: ``elbo_validation`, `reconstruction_loss_validation`, `kl_local_validation``. Metric logged during validation set epoch", - "help_text": "Type: `string`, default: `elbo_validation`, choices: ``elbo_validation`, `reconstruction_loss_validation`, `kl_local_validation``. Metric logged during validation set epoch.", - "enum": ["elbo_validation", "reconstruction_loss_validation", "kl_local_validation"] + "default": "False" + } + + + , + "use_observed_lib_size": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Use observed library size for RNA as scaling factor in mean of conditional distribution", + "help_text": "Type: `boolean_true`, default: `false`. Use observed library size for RNA as scaling factor in mean of conditional distribution.\n" , - "default": "elbo_validation" - }, - - "early_stopping_patience": { - "type": "integer", - "description": "Type: `integer`, default: `45`. Number of validation epochs with no improvement after which training will be stopped", - "help_text": "Type: `integer`, default: `45`. Number of validation epochs with no improvement after which training will be stopped.", - "default": "45" - }, - - "early_stopping_min_delta": { - "type": "number", - "description": "Type: `double`, default: `0.0`. Minimum change in the monitored quantity to qualify as an improvement, i", - "help_text": "Type: `double`, default: `0.0`. Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement.", - "default": "0.0" - } - - } - }, - "learning parameters" : { - "title": "Learning parameters", - "type": "object", - "description": "No description", - "properties": { - - "max_epochs": { - "type": "integer", - "description": "Type: `integer`. Number of passes through the dataset, defaults to (20000 / number of cells) * 400 or 400; whichever is smallest", - "help_text": "Type: `integer`. Number of passes through the dataset, defaults to (20000 / number of cells) * 400 or 400; whichever is smallest." - }, - - "reduce_lr_on_plateau": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Whether to monitor validation loss and reduce learning rate when validation set `lr_scheduler_metric` plateaus", - "help_text": "Type: `boolean`, default: `true`. Whether to monitor validation loss and reduce learning rate when validation set `lr_scheduler_metric` plateaus.", - "default": "True" - }, - - "lr_factor": { - "type": "number", - "description": "Type: `double`, default: `0.6`. Factor to reduce learning rate", - "help_text": "Type: `double`, default: `0.6`. Factor to reduce learning rate.", - "default": "0.6" - }, - - "lr_patience": { - "type": "number", - "description": "Type: `double`, default: `30`. Number of epochs with no improvement after which learning rate will be reduced", - "help_text": "Type: `double`, default: `30`. Number of epochs with no improvement after which learning rate will be reduced.", - "default": "30" - } - - } - }, - "data validition" : { - "title": "Data validition", - "type": "object", - "description": "No description", - "properties": { - - "n_obs_min_count": { - "type": "integer", - "description": "Type: `integer`, default: `0`. Minimum number of cells threshold ensuring that every obs_batch category has sufficient observations (cells) for model training", - "help_text": "Type: `integer`, default: `0`. Minimum number of cells threshold ensuring that every obs_batch category has sufficient observations (cells) for model training.", - "default": "0" - }, - - "n_var_min_count": { - "type": "integer", - "description": "Type: `integer`, default: `0`. Minimum number of genes threshold ensuring that every var_input filter has sufficient observations (genes) for model training", - "help_text": "Type: `integer`, default: `0`. Minimum number of genes threshold ensuring that every var_input filter has sufficient observations (genes) for model training.", - "default": "0" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "False" + } + + +} +}, + + + "early stopping arguments" : { + "title": "Early stopping arguments", + "type": "object", + "description": "No description", + "properties": { + + + "early_stopping": { + "type": + "boolean", + "description": "Type: `boolean`. Whether to perform early stopping with respect to the validation set", + "help_text": "Type: `boolean`. Whether to perform early stopping with respect to the validation set." + + } + + + , + "early_stopping_monitor": { + "type": + "string", + "description": "Type: `string`, default: `elbo_validation`, choices: ``elbo_validation`, `reconstruction_loss_validation`, `kl_local_validation``. Metric logged during validation set epoch", + "help_text": "Type: `string`, default: `elbo_validation`, choices: ``elbo_validation`, `reconstruction_loss_validation`, `kl_local_validation``. Metric logged during validation set epoch.", + "enum": ["elbo_validation", "reconstruction_loss_validation", "kl_local_validation"] + + , + "default": "elbo_validation" + } + + + , + "early_stopping_patience": { + "type": + "integer", + "description": "Type: `integer`, default: `45`. Number of validation epochs with no improvement after which training will be stopped", + "help_text": "Type: `integer`, default: `45`. Number of validation epochs with no improvement after which training will be stopped." + , + "default": "45" + } + + + , + "early_stopping_min_delta": { + "type": + "number", + "description": "Type: `double`, default: `0.0`. Minimum change in the monitored quantity to qualify as an improvement, i", + "help_text": "Type: `double`, default: `0.0`. Minimum change in the monitored quantity to qualify as an improvement, i.e. an absolute change of less than min_delta, will count as no improvement." + , + "default": "0.0" + } + + +} +}, + + + "learning parameters" : { + "title": "Learning parameters", + "type": "object", + "description": "No description", + "properties": { + + + "max_epochs": { + "type": + "integer", + "description": "Type: `integer`. Number of passes through the dataset, defaults to (20000 / number of cells) * 400 or 400; whichever is smallest", + "help_text": "Type: `integer`. Number of passes through the dataset, defaults to (20000 / number of cells) * 400 or 400; whichever is smallest." + + } + + + , + "reduce_lr_on_plateau": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Whether to monitor validation loss and reduce learning rate when validation set `lr_scheduler_metric` plateaus", + "help_text": "Type: `boolean`, default: `true`. Whether to monitor validation loss and reduce learning rate when validation set `lr_scheduler_metric` plateaus." + , + "default": "True" + } + + + , + "lr_factor": { + "type": + "number", + "description": "Type: `double`, default: `0.6`. Factor to reduce learning rate", + "help_text": "Type: `double`, default: `0.6`. Factor to reduce learning rate." + , + "default": "0.6" + } + + + , + "lr_patience": { + "type": + "number", + "description": "Type: `double`, default: `30`. Number of epochs with no improvement after which learning rate will be reduced", + "help_text": "Type: `double`, default: `30`. Number of epochs with no improvement after which learning rate will be reduced." + , + "default": "30" + } + + +} +}, + + + "data validition" : { + "title": "Data validition", + "type": "object", + "description": "No description", + "properties": { + + + "n_obs_min_count": { + "type": + "integer", + "description": "Type: `integer`, default: `0`. Minimum number of cells threshold ensuring that every obs_batch category has sufficient observations (cells) for model training", + "help_text": "Type: `integer`, default: `0`. Minimum number of cells threshold ensuring that every obs_batch category has sufficient observations (cells) for model training." + , + "default": "0" + } + + + , + "n_var_min_count": { + "type": + "integer", + "description": "Type: `integer`, default: `0`. Minimum number of genes threshold ensuring that every var_input filter has sufficient observations (genes) for model training", + "help_text": "Type: `integer`, default: `0`. Minimum number of genes threshold ensuring that every var_input filter has sufficient observations (genes) for model training." + , + "default": "0" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/scvi options" + }, + + { + "$ref": "#/definitions/variational auto-encoder model options" + }, + + { + "$ref": "#/definitions/early stopping arguments" + }, + + { + "$ref": "#/definitions/learning parameters" + }, + + { + "$ref": "#/definitions/data validition" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/scvi options" - }, - { - "$ref": "#/definitions/variational auto-encoder model options" - }, - { - "$ref": "#/definitions/early stopping arguments" - }, - { - "$ref": "#/definitions/learning parameters" - }, - { - "$ref": "#/definitions/data validition" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/integrate/totalvi/.config.vsh.yaml b/target/nextflow/integrate/totalvi/.config.vsh.yaml index b8dbc1cb07b..b5c8d3205e9 100644 --- a/target/nextflow/integrate/totalvi/.config.vsh.yaml +++ b/target/nextflow/integrate/totalvi/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "totalvi" namespace: "integrate" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" info: @@ -343,6 +343,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/totalvi" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/totalvi/totalvi" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/integrate/totalvi/main.nf b/target/nextflow/integrate/totalvi/main.nf index df2d508e7ee..0ac4451bc7a 100644 --- a/target/nextflow/integrate/totalvi/main.nf +++ b/target/nextflow/integrate/totalvi/main.nf @@ -1,4 +1,4 @@ -// totalvi 0.12.3 +// totalvi 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "totalvi", "namespace" : "integrate", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Vladimir Shitov", @@ -453,9 +453,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/integrate/totalvi", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/integrate/totalvi/nextflow.config b/target/nextflow/integrate/totalvi/nextflow.config index fced150c165..459d6dbe239 100644 --- a/target/nextflow/integrate/totalvi/nextflow.config +++ b/target/nextflow/integrate/totalvi/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'totalvi' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Performs mapping to the reference by totalvi model: https://docs.scvi-tools.org/en/stable/tutorials/notebooks/scarches_scvi_tools.html#Reference-mapping-with-TOTALVI' author = 'Vladimir Shitov' } diff --git a/target/nextflow/integrate/totalvi/nextflow_schema.json b/target/nextflow/integrate/totalvi/nextflow_schema.json index 851c672489f..a4150f4d8e5 100644 --- a/target/nextflow/integrate/totalvi/nextflow_schema.json +++ b/target/nextflow/integrate/totalvi/nextflow_schema.json @@ -1,195 +1,292 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "totalvi", - "description": "Performs mapping to the reference by totalvi model: https://docs.scvi-tools.org/en/stable/tutorials/notebooks/scarches_scvi_tools.html#Reference-mapping-with-TOTALVI", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "totalvi", +"description": "Performs mapping to the reference by totalvi model: https://docs.scvi-tools.org/en/stable/tutorials/notebooks/scarches_scvi_tools.html#Reference-mapping-with-TOTALVI", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Input h5mu file with query data to integrate with reference", - "help_text": "Type: `file`, required. Input h5mu file with query data to integrate with reference." - }, - - "reference": { - "type": "string", - "description": "Type: `file`, required. Input h5mu file with reference data to train the TOTALVI model", - "help_text": "Type: `file`, required. Input h5mu file with reference data to train the TOTALVI model." - }, - - "force_retrain": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. If true, retrain the model and save it to reference_model_path", - "help_text": "Type: `boolean_true`, default: `false`. If true, retrain the model and save it to reference_model_path", - "default": "False" - }, - - "query_modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "query_proteins_modality": { - "type": "string", - "description": "Type: `string`. Name of the modality in the input (query) h5mu file containing protein data", - "help_text": "Type: `string`. Name of the modality in the input (query) h5mu file containing protein data" - }, - - "reference_modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "reference_proteins_modality": { - "type": "string", - "description": "Type: `string`, default: `prot`. Name of the modality containing proteins in the reference", - "help_text": "Type: `string`, default: `prot`. Name of the modality containing proteins in the reference", - "default": "prot" - }, - - "input_layer": { - "type": "string", - "description": "Type: `string`. Input layer to use", - "help_text": "Type: `string`. Input layer to use. If None, X is used" - }, - - "obs_batch": { - "type": "string", - "description": "Type: `string`, default: `sample_id`. Column name discriminating between your batches", - "help_text": "Type: `string`, default: `sample_id`. Column name discriminating between your batches.", - "default": "sample_id" - }, - - "var_input": { - "type": "string", - "description": "Type: `string`. ", - "help_text": "Type: `string`. .var column containing highly variable genes. By default, do not subset genes." - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file.", - "default": "$id.$key.output.output" - }, - - "obsm_output": { - "type": "string", - "description": "Type: `string`, default: `X_integrated_totalvi`. In which ", - "help_text": "Type: `string`, default: `X_integrated_totalvi`. In which .obsm slot to store the resulting integrated embedding.", - "default": "X_integrated_totalvi" - }, - - "obsm_normalized_rna_output": { - "type": "string", - "description": "Type: `string`, default: `X_totalvi_normalized_rna`. In which ", - "help_text": "Type: `string`, default: `X_totalvi_normalized_rna`. In which .obsm slot to store the normalized RNA from TOTALVI.", - "default": "X_totalvi_normalized_rna" - }, - - "obsm_normalized_protein_output": { - "type": "string", - "description": "Type: `string`, default: `X_totalvi_normalized_protein`. In which ", - "help_text": "Type: `string`, default: `X_totalvi_normalized_protein`. In which .obsm slot to store the normalized protein data from TOTALVI.", - "default": "X_totalvi_normalized_protein" - }, - - "reference_model_path": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.reference_model_path.reference_model_path`. Directory with the reference model", - "help_text": "Type: `file`, default: `$id.$key.reference_model_path.reference_model_path`. Directory with the reference model. If not exists, trained model will be saved there", - "default": "$id.$key.reference_model_path.reference_model_path" - }, - - "query_model_path": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.query_model_path.query_model_path`. Directory, where the query model will be saved", - "help_text": "Type: `file`, default: `$id.$key.query_model_path.query_model_path`. Directory, where the query model will be saved", - "default": "$id.$key.query_model_path.query_model_path" - } - - } - }, - "learning parameters" : { - "title": "Learning parameters", - "type": "object", - "description": "No description", - "properties": { - - "max_epochs": { - "type": "integer", - "description": "Type: `integer`, default: `400`. Number of passes through the dataset", - "help_text": "Type: `integer`, default: `400`. Number of passes through the dataset", - "default": "400" - }, - - "max_query_epochs": { - "type": "integer", - "description": "Type: `integer`, default: `200`. Number of passes through the dataset, when fine-tuning model for query", - "help_text": "Type: `integer`, default: `200`. Number of passes through the dataset, when fine-tuning model for query", - "default": "200" - }, - - "weight_decay": { - "type": "number", - "description": "Type: `double`, default: `0.0`. Weight decay, when fine-tuning model for query", - "help_text": "Type: `double`, default: `0.0`. Weight decay, when fine-tuning model for query", - "default": "0.0" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Input h5mu file with query data to integrate with reference", + "help_text": "Type: `file`, required. Input h5mu file with query data to integrate with reference." + + } + + + , + "reference": { + "type": + "string", + "description": "Type: `file`, required. Input h5mu file with reference data to train the TOTALVI model", + "help_text": "Type: `file`, required. Input h5mu file with reference data to train the TOTALVI model." + + } + + + , + "force_retrain": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. If true, retrain the model and save it to reference_model_path", + "help_text": "Type: `boolean_true`, default: `false`. If true, retrain the model and save it to reference_model_path" + , + "default": "False" + } + + + , + "query_modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "query_proteins_modality": { + "type": + "string", + "description": "Type: `string`. Name of the modality in the input (query) h5mu file containing protein data", + "help_text": "Type: `string`. Name of the modality in the input (query) h5mu file containing protein data" + + } + + + , + "reference_modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "reference_proteins_modality": { + "type": + "string", + "description": "Type: `string`, default: `prot`. Name of the modality containing proteins in the reference", + "help_text": "Type: `string`, default: `prot`. Name of the modality containing proteins in the reference" + , + "default": "prot" + } + + + , + "input_layer": { + "type": + "string", + "description": "Type: `string`. Input layer to use", + "help_text": "Type: `string`. Input layer to use. If None, X is used" + + } + + + , + "obs_batch": { + "type": + "string", + "description": "Type: `string`, default: `sample_id`. Column name discriminating between your batches", + "help_text": "Type: `string`, default: `sample_id`. Column name discriminating between your batches." + , + "default": "sample_id" + } + + + , + "var_input": { + "type": + "string", + "description": "Type: `string`. ", + "help_text": "Type: `string`. .var column containing highly variable genes. By default, do not subset genes." + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file." + , + "default": "$id.$key.output.output" + } + + + , + "obsm_output": { + "type": + "string", + "description": "Type: `string`, default: `X_integrated_totalvi`. In which ", + "help_text": "Type: `string`, default: `X_integrated_totalvi`. In which .obsm slot to store the resulting integrated embedding." + , + "default": "X_integrated_totalvi" + } + + + , + "obsm_normalized_rna_output": { + "type": + "string", + "description": "Type: `string`, default: `X_totalvi_normalized_rna`. In which ", + "help_text": "Type: `string`, default: `X_totalvi_normalized_rna`. In which .obsm slot to store the normalized RNA from TOTALVI." + , + "default": "X_totalvi_normalized_rna" + } + + + , + "obsm_normalized_protein_output": { + "type": + "string", + "description": "Type: `string`, default: `X_totalvi_normalized_protein`. In which ", + "help_text": "Type: `string`, default: `X_totalvi_normalized_protein`. In which .obsm slot to store the normalized protein data from TOTALVI." + , + "default": "X_totalvi_normalized_protein" + } + + + , + "reference_model_path": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.reference_model_path.reference_model_path`. Directory with the reference model", + "help_text": "Type: `file`, default: `$id.$key.reference_model_path.reference_model_path`. Directory with the reference model. If not exists, trained model will be saved there" + , + "default": "$id.$key.reference_model_path.reference_model_path" + } + + + , + "query_model_path": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.query_model_path.query_model_path`. Directory, where the query model will be saved", + "help_text": "Type: `file`, default: `$id.$key.query_model_path.query_model_path`. Directory, where the query model will be saved" + , + "default": "$id.$key.query_model_path.query_model_path" + } + + +} +}, + + + "learning parameters" : { + "title": "Learning parameters", + "type": "object", + "description": "No description", + "properties": { + + + "max_epochs": { + "type": + "integer", + "description": "Type: `integer`, default: `400`. Number of passes through the dataset", + "help_text": "Type: `integer`, default: `400`. Number of passes through the dataset" + , + "default": "400" + } + + + , + "max_query_epochs": { + "type": + "integer", + "description": "Type: `integer`, default: `200`. Number of passes through the dataset, when fine-tuning model for query", + "help_text": "Type: `integer`, default: `200`. Number of passes through the dataset, when fine-tuning model for query" + , + "default": "200" + } + + + , + "weight_decay": { + "type": + "number", + "description": "Type: `double`, default: `0.0`. Weight decay, when fine-tuning model for query", + "help_text": "Type: `double`, default: `0.0`. Weight decay, when fine-tuning model for query" + , + "default": "0.0" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/learning parameters" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/learning parameters" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/interpret/lianapy/.config.vsh.yaml b/target/nextflow/interpret/lianapy/.config.vsh.yaml index 1fe32159ae0..e6f8c299aeb 100644 --- a/target/nextflow/interpret/lianapy/.config.vsh.yaml +++ b/target/nextflow/interpret/lianapy/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "lianapy" namespace: "interpret" - version: "0.12.3" + version: "0.12.4" authors: - name: "Mauro Saporita" roles: @@ -308,6 +308,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/interpret/lianapy" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/interpret/lianapy/lianapy" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/interpret/lianapy/main.nf b/target/nextflow/interpret/lianapy/main.nf index 32a8fe671ae..e3005106b51 100644 --- a/target/nextflow/interpret/lianapy/main.nf +++ b/target/nextflow/interpret/lianapy/main.nf @@ -1,4 +1,4 @@ -// lianapy 0.12.3 +// lianapy 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "lianapy", "namespace" : "interpret", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Mauro Saporita", @@ -404,9 +404,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/interpret/lianapy", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/interpret/lianapy/nextflow.config b/target/nextflow/interpret/lianapy/nextflow.config index a30996e01ef..f3ce24b560c 100644 --- a/target/nextflow/interpret/lianapy/nextflow.config +++ b/target/nextflow/interpret/lianapy/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'lianapy' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Performs LIANA integration based as described in https://github.com/saezlab/liana-py' author = 'Mauro Saporita, Povilas Gibas' } diff --git a/target/nextflow/interpret/lianapy/nextflow_schema.json b/target/nextflow/interpret/lianapy/nextflow_schema.json index b9995300f44..b258b7deb01 100644 --- a/target/nextflow/interpret/lianapy/nextflow_schema.json +++ b/target/nextflow/interpret/lianapy/nextflow_schema.json @@ -1,140 +1,207 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "lianapy", - "description": "Performs LIANA integration based as described in https://github.com/saezlab/liana-py", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "lianapy", +"description": "Performs LIANA integration based as described in https://github.com/saezlab/liana-py", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Input h5mu file", - "help_text": "Type: `file`, required. Input h5mu file" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file.", - "default": "$id.$key.output.output" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, default: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, default: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Input h5mu file", + "help_text": "Type: `file`, required. Input h5mu file" + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output h5mu file." , - "default": "gzip" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "layer": { - "type": "string", - "description": "Type: `string`. Layer in anndata", - "help_text": "Type: `string`. Layer in anndata.AnnData.layers to use. If None, use mudata.mod[modality].X." - }, - - "groupby": { - "type": "string", - "description": "Type: `string`, default: `bulk_labels`. The key of the observations grouping to consider", - "help_text": "Type: `string`, default: `bulk_labels`. The key of the observations grouping to consider.", - "default": "bulk_labels" - }, - - "resource_name": { - "type": "string", - "description": "Type: `string`, default: `consensus`, choices: ``baccin2019`, `cellcall`, `cellchatdb`, `cellinker`, `cellphonedb`, `celltalkdb`, `connectomedb2020`, `consensus`, `embrace`, `guide2pharma`, `hpmr`, `icellnet`, `italk`, `kirouac2010`, `lrdb`, `mouseconsensus`, `ramilowski2015``. Name of the resource to be loaded and use for ligand-receptor inference", - "help_text": "Type: `string`, default: `consensus`, choices: ``baccin2019`, `cellcall`, `cellchatdb`, `cellinker`, `cellphonedb`, `celltalkdb`, `connectomedb2020`, `consensus`, `embrace`, `guide2pharma`, `hpmr`, `icellnet`, `italk`, `kirouac2010`, `lrdb`, `mouseconsensus`, `ramilowski2015``. Name of the resource to be loaded and use for ligand-receptor inference.", - "enum": ["baccin2019", "cellcall", "cellchatdb", "cellinker", "cellphonedb", "celltalkdb", "connectomedb2020", "consensus", "embrace", "guide2pharma", "hpmr", "icellnet", "italk", "kirouac2010", "lrdb", "mouseconsensus", "ramilowski2015"] + "default": "$id.$key.output.output" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, default: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, default: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + , - "default": "consensus" - }, - - "gene_symbol": { - "type": "string", - "description": "Type: `string`, default: `gene_symbol`. Column name in var DataFrame in which gene symbol are stored", - "help_text": "Type: `string`, default: `gene_symbol`. Column name in var DataFrame in which gene symbol are stored.", - "default": "gene_symbol" - }, - - "expr_prop": { - "type": "number", - "description": "Type: `double`, default: `0.1`. Minimum expression proportion for the ligands/receptors (and their subunits) in the corresponding cell identities", - "help_text": "Type: `double`, default: `0.1`. Minimum expression proportion for the ligands/receptors (and their subunits) in the corresponding cell identities. Set to \u00270\u0027, to return unfiltered results.", - "default": "0.1" - }, - - "min_cells": { - "type": "integer", - "description": "Type: `integer`, default: `5`. Minimum cells per cell identity (\u0027groupby\u0027) to be considered for downstream analysis", - "help_text": "Type: `integer`, default: `5`. Minimum cells per cell identity (\u0027groupby\u0027) to be considered for downstream analysis.", - "default": "5" - }, - - "aggregate_method": { - "type": "string", - "description": "Type: `string`, default: `rra`, choices: ``mean`, `rra``. Method aggregation approach, one of [\u0027mean\u0027, \u0027rra\u0027], where \u0027mean\u0027 represents the mean rank, while \u0027rra\u0027 is the RobustRankAggregate (Kolde et al", - "help_text": "Type: `string`, default: `rra`, choices: ``mean`, `rra``. Method aggregation approach, one of [\u0027mean\u0027, \u0027rra\u0027], where \u0027mean\u0027 represents the mean rank, while \u0027rra\u0027 is the RobustRankAggregate (Kolde et al., 2014) of the interactions.", - "enum": ["mean", "rra"] + "default": "gzip" + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " , - "default": "rra" - }, - - "return_all_lrs": { - "type": "boolean", - "description": "Type: `boolean`, default: `false`. Bool whether to return all LRs, or only those that surpass the \u0027expr_prop\u0027 threshold", - "help_text": "Type: `boolean`, default: `false`. Bool whether to return all LRs, or only those that surpass the \u0027expr_prop\u0027 threshold. Those interactions that do not pass the \u0027expr_prop\u0027 threshold will be assigned to the *worst* score of the ones that do. \u0027False\u0027 by default.", - "default": "False" - }, - - "n_perms": { - "type": "integer", - "description": "Type: `integer`, default: `100`. Number of permutations for the permutation test", - "help_text": "Type: `integer`, default: `100`. Number of permutations for the permutation test. Note that this is relevant only for permutation-based methods - e.g. \u0027CellPhoneDB", - "default": "100" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "rna" + } + + + , + "layer": { + "type": + "string", + "description": "Type: `string`. Layer in anndata", + "help_text": "Type: `string`. Layer in anndata.AnnData.layers to use. If None, use mudata.mod[modality].X." + + } + + + , + "groupby": { + "type": + "string", + "description": "Type: `string`, default: `bulk_labels`. The key of the observations grouping to consider", + "help_text": "Type: `string`, default: `bulk_labels`. The key of the observations grouping to consider." + , + "default": "bulk_labels" + } + + + , + "resource_name": { + "type": + "string", + "description": "Type: `string`, default: `consensus`, choices: ``baccin2019`, `cellcall`, `cellchatdb`, `cellinker`, `cellphonedb`, `celltalkdb`, `connectomedb2020`, `consensus`, `embrace`, `guide2pharma`, `hpmr`, `icellnet`, `italk`, `kirouac2010`, `lrdb`, `mouseconsensus`, `ramilowski2015``. Name of the resource to be loaded and use for ligand-receptor inference", + "help_text": "Type: `string`, default: `consensus`, choices: ``baccin2019`, `cellcall`, `cellchatdb`, `cellinker`, `cellphonedb`, `celltalkdb`, `connectomedb2020`, `consensus`, `embrace`, `guide2pharma`, `hpmr`, `icellnet`, `italk`, `kirouac2010`, `lrdb`, `mouseconsensus`, `ramilowski2015``. Name of the resource to be loaded and use for ligand-receptor inference.", + "enum": ["baccin2019", "cellcall", "cellchatdb", "cellinker", "cellphonedb", "celltalkdb", "connectomedb2020", "consensus", "embrace", "guide2pharma", "hpmr", "icellnet", "italk", "kirouac2010", "lrdb", "mouseconsensus", "ramilowski2015"] + + , + "default": "consensus" + } + + + , + "gene_symbol": { + "type": + "string", + "description": "Type: `string`, default: `gene_symbol`. Column name in var DataFrame in which gene symbol are stored", + "help_text": "Type: `string`, default: `gene_symbol`. Column name in var DataFrame in which gene symbol are stored." + , + "default": "gene_symbol" + } + + + , + "expr_prop": { + "type": + "number", + "description": "Type: `double`, default: `0.1`. Minimum expression proportion for the ligands/receptors (and their subunits) in the corresponding cell identities", + "help_text": "Type: `double`, default: `0.1`. Minimum expression proportion for the ligands/receptors (and their subunits) in the corresponding cell identities. Set to \u00270\u0027, to return unfiltered results." + , + "default": "0.1" + } + + + , + "min_cells": { + "type": + "integer", + "description": "Type: `integer`, default: `5`. Minimum cells per cell identity (\u0027groupby\u0027) to be considered for downstream analysis", + "help_text": "Type: `integer`, default: `5`. Minimum cells per cell identity (\u0027groupby\u0027) to be considered for downstream analysis." + , + "default": "5" + } + + + , + "aggregate_method": { + "type": + "string", + "description": "Type: `string`, default: `rra`, choices: ``mean`, `rra``. Method aggregation approach, one of [\u0027mean\u0027, \u0027rra\u0027], where \u0027mean\u0027 represents the mean rank, while \u0027rra\u0027 is the RobustRankAggregate (Kolde et al", + "help_text": "Type: `string`, default: `rra`, choices: ``mean`, `rra``. Method aggregation approach, one of [\u0027mean\u0027, \u0027rra\u0027], where \u0027mean\u0027 represents the mean rank, while \u0027rra\u0027 is the RobustRankAggregate (Kolde et al., 2014) of the interactions.", + "enum": ["mean", "rra"] + + , + "default": "rra" + } + + + , + "return_all_lrs": { + "type": + "boolean", + "description": "Type: `boolean`, default: `false`. Bool whether to return all LRs, or only those that surpass the \u0027expr_prop\u0027 threshold", + "help_text": "Type: `boolean`, default: `false`. Bool whether to return all LRs, or only those that surpass the \u0027expr_prop\u0027 threshold. Those interactions that do not pass the \u0027expr_prop\u0027 threshold will be assigned to the *worst* score of the ones that do. \u0027False\u0027 by default." + , + "default": "False" + } + + + , + "n_perms": { + "type": + "integer", + "description": "Type: `integer`, default: `100`. Number of permutations for the permutation test", + "help_text": "Type: `integer`, default: `100`. Number of permutations for the permutation test. Note that this is relevant only for permutation-based methods - e.g. \u0027CellPhoneDB" + , + "default": "100" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/labels_transfer/knn/.config.vsh.yaml b/target/nextflow/labels_transfer/knn/.config.vsh.yaml index 8f099934250..f1868c4828e 100644 --- a/target/nextflow/labels_transfer/knn/.config.vsh.yaml +++ b/target/nextflow/labels_transfer/knn/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "knn" namespace: "labels_transfer" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" roles: @@ -374,6 +374,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/labels_transfer/knn" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/labels_transfer/knn/knn" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/labels_transfer/knn/main.nf b/target/nextflow/labels_transfer/knn/main.nf index c65164cb08a..1cdb4932d0e 100644 --- a/target/nextflow/labels_transfer/knn/main.nf +++ b/target/nextflow/labels_transfer/knn/main.nf @@ -1,4 +1,4 @@ -// knn 0.12.3 +// knn 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "knn", "namespace" : "labels_transfer", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Vladimir Shitov", @@ -500,9 +500,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/labels_transfer/knn", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/labels_transfer/knn/nextflow.config b/target/nextflow/labels_transfer/knn/nextflow.config index bb783120861..59895a9eecd 100644 --- a/target/nextflow/labels_transfer/knn/nextflow.config +++ b/target/nextflow/labels_transfer/knn/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'knn' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Performs label transfer from reference to query using KNN classifier' author = 'Vladimir Shitov' } diff --git a/target/nextflow/labels_transfer/knn/nextflow_schema.json b/target/nextflow/labels_transfer/knn/nextflow_schema.json index 7af9ecce014..2e268c2a852 100644 --- a/target/nextflow/labels_transfer/knn/nextflow_schema.json +++ b/target/nextflow/labels_transfer/knn/nextflow_schema.json @@ -1,51 +1,70 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "knn", - "description": "Performs label transfer from reference to query using KNN classifier", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "knn", +"description": "Performs label transfer from reference to query using KNN classifier", +"type": "object", +"definitions": { + + + + "learning parameters" : { + "title": "Learning parameters", "type": "object", - "definitions": { - "learning parameters" : { - "title": "Learning parameters", - "type": "object", - "description": "No description", - "properties": { - - "n_neighbors": { - "type": "integer", - "description": "Type: `integer`, required. Number of nearest neighbors to use for classification", - "help_text": "Type: `integer`, required. Number of nearest neighbors to use for classification" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "n_neighbors": { + "type": + "integer", + "description": "Type: `integer`, required. Number of nearest neighbors to use for classification", + "help_text": "Type: `integer`, required. Number of nearest neighbors to use for classification" + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/learning parameters" }, - "allOf": [ - { - "$ref": "#/definitions/learning parameters" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/labels_transfer/xgboost/.config.vsh.yaml b/target/nextflow/labels_transfer/xgboost/.config.vsh.yaml index e2887b6c124..f1a68d5a4e8 100644 --- a/target/nextflow/labels_transfer/xgboost/.config.vsh.yaml +++ b/target/nextflow/labels_transfer/xgboost/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "xgboost" namespace: "labels_transfer" - version: "0.12.3" + version: "0.12.4" authors: - name: "Vladimir Shitov" roles: @@ -589,6 +589,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/labels_transfer/xgboost" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/labels_transfer/xgboost/xgboost" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/labels_transfer/xgboost/main.nf b/target/nextflow/labels_transfer/xgboost/main.nf index 161cfb54a5e..f2a13f82c74 100644 --- a/target/nextflow/labels_transfer/xgboost/main.nf +++ b/target/nextflow/labels_transfer/xgboost/main.nf @@ -1,4 +1,4 @@ -// xgboost 0.12.3 +// xgboost 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "xgboost", "namespace" : "labels_transfer", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Vladimir Shitov", @@ -736,9 +736,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/labels_transfer/xgboost", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/labels_transfer/xgboost/nextflow.config b/target/nextflow/labels_transfer/xgboost/nextflow.config index 2b25dee2d7a..30a52321bbc 100644 --- a/target/nextflow/labels_transfer/xgboost/nextflow.config +++ b/target/nextflow/labels_transfer/xgboost/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'xgboost' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Performs label transfer from reference to query using XGBoost classifier' author = 'Vladimir Shitov' } diff --git a/target/nextflow/labels_transfer/xgboost/nextflow_schema.json b/target/nextflow/labels_transfer/xgboost/nextflow_schema.json index 70f8bb777f0..a5eb170bb64 100644 --- a/target/nextflow/labels_transfer/xgboost/nextflow_schema.json +++ b/target/nextflow/labels_transfer/xgboost/nextflow_schema.json @@ -1,177 +1,263 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "xgboost", - "description": "Performs label transfer from reference to query using XGBoost classifier", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "xgboost", +"description": "Performs label transfer from reference to query using XGBoost classifier", +"type": "object", +"definitions": { + + + + "execution arguments" : { + "title": "Execution arguments", "type": "object", - "definitions": { - "execution arguments" : { - "title": "Execution arguments", - "type": "object", - "description": "No description", - "properties": { - - "force_retrain": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Retrain models on the reference even if model_output directory already has trained classifiers", - "help_text": "Type: `boolean_true`, default: `false`. Retrain models on the reference even if model_output directory already has trained classifiers. WARNING! It will rewrite existing classifiers for targets in the model_output directory!", - "default": "False" - }, - - "use_gpu": { - "type": "boolean", - "description": "Type: `boolean`, default: `false`. Use GPU during models training and inference (recommended)", - "help_text": "Type: `boolean`, default: `false`. Use GPU during models training and inference (recommended).", - "default": "False" - }, - - "verbosity": { - "type": "integer", - "description": "Type: `integer`, default: `1`. The verbosity level for evaluation of the classifier from the range [0,2]", - "help_text": "Type: `integer`, default: `1`. The verbosity level for evaluation of the classifier from the range [0,2]", - "default": "1" - }, - - "model_output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.model_output.model_output`. Output directory for model", - "help_text": "Type: `file`, default: `$id.$key.model_output.model_output`. Output directory for model", - "default": "$id.$key.model_output.model_output" - } - - } - }, - "learning parameters" : { - "title": "Learning parameters", - "type": "object", - "description": "No description", - "properties": { - - "learning_rate": { - "type": "number", - "description": "Type: `double`, default: `0.3`. Step size shrinkage used in update to prevents overfitting", - "help_text": "Type: `double`, default: `0.3`. Step size shrinkage used in update to prevents overfitting. Range: [0,1]. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "0.3" - }, - - "min_split_loss": { - "type": "number", - "description": "Type: `double`, default: `0`. Minimum loss reduction required to make a further partition on a leaf node of the tree", - "help_text": "Type: `double`, default: `0`. Minimum loss reduction required to make a further partition on a leaf node of the tree. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "0" - }, - - "max_depth": { - "type": "integer", - "description": "Type: `integer`, default: `6`. Maximum depth of a tree", - "help_text": "Type: `integer`, default: `6`. Maximum depth of a tree. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "6" - }, - - "min_child_weight": { - "type": "integer", - "description": "Type: `integer`, default: `1`. Minimum sum of instance weight (hessian) needed in a child", - "help_text": "Type: `integer`, default: `1`. Minimum sum of instance weight (hessian) needed in a child. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "1" - }, - - "max_delta_step": { - "type": "number", - "description": "Type: `double`, default: `0`. Maximum delta step we allow each leaf output to be", - "help_text": "Type: `double`, default: `0`. Maximum delta step we allow each leaf output to be. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "0" - }, - - "subsample": { - "type": "number", - "description": "Type: `double`, default: `1`. Subsample ratio of the training instances", - "help_text": "Type: `double`, default: `1`. Subsample ratio of the training instances. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "1" - }, - - "sampling_method": { - "type": "string", - "description": "Type: `string`, default: `uniform`, choices: ``uniform`, `gradient_based``. The method to use to sample the training instances", - "help_text": "Type: `string`, default: `uniform`, choices: ``uniform`, `gradient_based``. The method to use to sample the training instances. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "enum": ["uniform", "gradient_based"] - , - "default": "uniform" - }, - - "colsample_bytree": { - "type": "number", - "description": "Type: `double`, default: `1`. Fraction of columns to be subsampled", - "help_text": "Type: `double`, default: `1`. Fraction of columns to be subsampled. Range (0, 1]. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "1" - }, - - "colsample_bylevel": { - "type": "number", - "description": "Type: `double`, default: `1`. Subsample ratio of columns for each level", - "help_text": "Type: `double`, default: `1`. Subsample ratio of columns for each level. Range (0, 1]. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "1" - }, - - "colsample_bynode": { - "type": "number", - "description": "Type: `double`, default: `1`. Subsample ratio of columns for each node (split)", - "help_text": "Type: `double`, default: `1`. Subsample ratio of columns for each node (split). Range (0, 1]. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "1" - }, - - "reg_lambda": { - "type": "number", - "description": "Type: `double`, default: `1`. L2 regularization term on weights", - "help_text": "Type: `double`, default: `1`. L2 regularization term on weights. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "1" - }, - - "reg_alpha": { - "type": "number", - "description": "Type: `double`, default: `0`. L1 regularization term on weights", - "help_text": "Type: `double`, default: `0`. L1 regularization term on weights. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "0" - }, - - "scale_pos_weight": { - "type": "number", - "description": "Type: `double`, default: `1`. Control the balance of positive and negative weights, useful for unbalanced classes", - "help_text": "Type: `double`, default: `1`. Control the balance of positive and negative weights, useful for unbalanced classes. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", - "default": "1" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "force_retrain": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Retrain models on the reference even if model_output directory already has trained classifiers", + "help_text": "Type: `boolean_true`, default: `false`. Retrain models on the reference even if model_output directory already has trained classifiers. WARNING! It will rewrite existing classifiers for targets in the model_output directory!" + , + "default": "False" + } + + + , + "use_gpu": { + "type": + "boolean", + "description": "Type: `boolean`, default: `false`. Use GPU during models training and inference (recommended)", + "help_text": "Type: `boolean`, default: `false`. Use GPU during models training and inference (recommended)." + , + "default": "False" + } + + + , + "verbosity": { + "type": + "integer", + "description": "Type: `integer`, default: `1`. The verbosity level for evaluation of the classifier from the range [0,2]", + "help_text": "Type: `integer`, default: `1`. The verbosity level for evaluation of the classifier from the range [0,2]" + , + "default": "1" + } + + + , + "model_output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.model_output.model_output`. Output directory for model", + "help_text": "Type: `file`, default: `$id.$key.model_output.model_output`. Output directory for model" + , + "default": "$id.$key.model_output.model_output" + } + + +} +}, + + + "learning parameters" : { + "title": "Learning parameters", + "type": "object", + "description": "No description", + "properties": { + + + "learning_rate": { + "type": + "number", + "description": "Type: `double`, default: `0.3`. Step size shrinkage used in update to prevents overfitting", + "help_text": "Type: `double`, default: `0.3`. Step size shrinkage used in update to prevents overfitting. Range: [0,1]. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "0.3" + } + + + , + "min_split_loss": { + "type": + "number", + "description": "Type: `double`, default: `0`. Minimum loss reduction required to make a further partition on a leaf node of the tree", + "help_text": "Type: `double`, default: `0`. Minimum loss reduction required to make a further partition on a leaf node of the tree. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "0" + } + + + , + "max_depth": { + "type": + "integer", + "description": "Type: `integer`, default: `6`. Maximum depth of a tree", + "help_text": "Type: `integer`, default: `6`. Maximum depth of a tree. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "6" + } + + + , + "min_child_weight": { + "type": + "integer", + "description": "Type: `integer`, default: `1`. Minimum sum of instance weight (hessian) needed in a child", + "help_text": "Type: `integer`, default: `1`. Minimum sum of instance weight (hessian) needed in a child. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "1" + } + + + , + "max_delta_step": { + "type": + "number", + "description": "Type: `double`, default: `0`. Maximum delta step we allow each leaf output to be", + "help_text": "Type: `double`, default: `0`. Maximum delta step we allow each leaf output to be. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "0" + } + + + , + "subsample": { + "type": + "number", + "description": "Type: `double`, default: `1`. Subsample ratio of the training instances", + "help_text": "Type: `double`, default: `1`. Subsample ratio of the training instances. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "1" + } + + + , + "sampling_method": { + "type": + "string", + "description": "Type: `string`, default: `uniform`, choices: ``uniform`, `gradient_based``. The method to use to sample the training instances", + "help_text": "Type: `string`, default: `uniform`, choices: ``uniform`, `gradient_based``. The method to use to sample the training instances. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference", + "enum": ["uniform", "gradient_based"] + + , + "default": "uniform" + } + + + , + "colsample_bytree": { + "type": + "number", + "description": "Type: `double`, default: `1`. Fraction of columns to be subsampled", + "help_text": "Type: `double`, default: `1`. Fraction of columns to be subsampled. Range (0, 1]. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "1" + } + + + , + "colsample_bylevel": { + "type": + "number", + "description": "Type: `double`, default: `1`. Subsample ratio of columns for each level", + "help_text": "Type: `double`, default: `1`. Subsample ratio of columns for each level. Range (0, 1]. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "1" + } + + + , + "colsample_bynode": { + "type": + "number", + "description": "Type: `double`, default: `1`. Subsample ratio of columns for each node (split)", + "help_text": "Type: `double`, default: `1`. Subsample ratio of columns for each node (split). Range (0, 1]. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "1" + } + + + , + "reg_lambda": { + "type": + "number", + "description": "Type: `double`, default: `1`. L2 regularization term on weights", + "help_text": "Type: `double`, default: `1`. L2 regularization term on weights. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "1" + } + + + , + "reg_alpha": { + "type": + "number", + "description": "Type: `double`, default: `0`. L1 regularization term on weights", + "help_text": "Type: `double`, default: `0`. L1 regularization term on weights. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "0" + } + + + , + "scale_pos_weight": { + "type": + "number", + "description": "Type: `double`, default: `1`. Control the balance of positive and negative weights, useful for unbalanced classes", + "help_text": "Type: `double`, default: `1`. Control the balance of positive and negative weights, useful for unbalanced classes. See https://xgboost.readthedocs.io/en/stable/parameter.html#parameters-for-tree-booster for the reference" + , + "default": "1" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/execution arguments" + }, + + { + "$ref": "#/definitions/learning parameters" }, - "allOf": [ - { - "$ref": "#/definitions/execution arguments" - }, - { - "$ref": "#/definitions/learning parameters" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/bd_rhapsody/.config.vsh.yaml b/target/nextflow/mapping/bd_rhapsody/.config.vsh.yaml index 72f3da797a9..771016b4cc9 100644 --- a/target/nextflow/mapping/bd_rhapsody/.config.vsh.yaml +++ b/target/nextflow/mapping/bd_rhapsody/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "bd_rhapsody" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -412,6 +412,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/bd_rhapsody" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/bd_rhapsody/bd_rhapsody" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/bd_rhapsody/main.nf b/target/nextflow/mapping/bd_rhapsody/main.nf index 4698ab4afbd..e6330397e3b 100644 --- a/target/nextflow/mapping/bd_rhapsody/main.nf +++ b/target/nextflow/mapping/bd_rhapsody/main.nf @@ -1,4 +1,4 @@ -// bd_rhapsody 0.12.3 +// bd_rhapsody 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "bd_rhapsody", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -532,9 +532,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/bd_rhapsody", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/bd_rhapsody/nextflow.config b/target/nextflow/mapping/bd_rhapsody/nextflow.config index 9e8ade0d47a..c875dd85b99 100644 --- a/target/nextflow/mapping/bd_rhapsody/nextflow.config +++ b/target/nextflow/mapping/bd_rhapsody/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'bd_rhapsody' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'A wrapper for the BD Rhapsody Analysis CWL v1.10.1 pipeline.\n\nThe CWL pipeline file is obtained by cloning \'https://bitbucket.org/CRSwDev/cwl/src/master/\' and removing all objects with class \'DockerRequirement\' from the YML.\n\nThis pipeline can be used for a targeted analysis (with `--mode targeted`) or for a whole transcriptome analysis (with `--mode wta`).\n\n* If mode is `"targeted"`, then either the `--reference` or `--abseq_reference` parameters must be defined.\n* If mode is `"wta"`, then `--reference` and `--transcriptome_annotation` must be defined, `--abseq_reference` and `--supplemental_reference` is optional.\n\nThe reference_genome and transcriptome_annotation files can be generated with the make_reference pipeline.\nAlternatively, BD also provides standard references which can be downloaded from these locations:\n\n - Human: http://bd-rhapsody-public.s3-website-us-east-1.amazonaws.com/Rhapsody-WTA/GRCh38-PhiX-gencodev29/\n - Mouse: http://bd-rhapsody-public.s3-website-us-east-1.amazonaws.com/Rhapsody-WTA/GRCm38-PhiX-gencodevM19/\n' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/mapping/bd_rhapsody/nextflow_schema.json b/target/nextflow/mapping/bd_rhapsody/nextflow_schema.json index 2f080179cc6..78b4c85dd34 100644 --- a/target/nextflow/mapping/bd_rhapsody/nextflow_schema.json +++ b/target/nextflow/mapping/bd_rhapsody/nextflow_schema.json @@ -1,239 +1,348 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "bd_rhapsody", - "description": "A wrapper for the BD Rhapsody Analysis CWL v1.10.1 pipeline.\n\nThe CWL pipeline file is obtained by cloning \u0027https://bitbucket.org/CRSwDev/cwl/src/master/\u0027 and removing all objects with class \u0027DockerRequirement\u0027 from the YML.\n\nThis pipeline can be used for a targeted analysis (with `--mode targeted`) or for a whole transcriptome analysis (with `--mode wta`).\n\n* If mode is `\"targeted\"`, then either the `--reference` or `--abseq_reference` parameters must be defined.\n* If mode is `\"wta\"`, then `--reference` and `--transcriptome_annotation` must be defined, `--abseq_reference` and `--supplemental_reference` is optional.\n\nThe reference_genome and transcriptome_annotation files can be generated with the make_reference pipeline.\nAlternatively, BD also provides standard references which can be downloaded from these locations:\n\n - Human: http://bd-rhapsody-public.s3-website-us-east-1.amazonaws.com/Rhapsody-WTA/GRCh38-PhiX-gencodev29/\n - Mouse: http://bd-rhapsody-public.s3-website-us-east-1.amazonaws.com/Rhapsody-WTA/GRCm38-PhiX-gencodevM19/\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "bd_rhapsody", +"description": "A wrapper for the BD Rhapsody Analysis CWL v1.10.1 pipeline.\n\nThe CWL pipeline file is obtained by cloning \u0027https://bitbucket.org/CRSwDev/cwl/src/master/\u0027 and removing all objects with class \u0027DockerRequirement\u0027 from the YML.\n\nThis pipeline can be used for a targeted analysis (with `--mode targeted`) or for a whole transcriptome analysis (with `--mode wta`).\n\n* If mode is `\"targeted\"`, then either the `--reference` or `--abseq_reference` parameters must be defined.\n* If mode is `\"wta\"`, then `--reference` and `--transcriptome_annotation` must be defined, `--abseq_reference` and `--supplemental_reference` is optional.\n\nThe reference_genome and transcriptome_annotation files can be generated with the make_reference pipeline.\nAlternatively, BD also provides standard references which can be downloaded from these locations:\n\n - Human: http://bd-rhapsody-public.s3-website-us-east-1.amazonaws.com/Rhapsody-WTA/GRCh38-PhiX-gencodev29/\n - Mouse: http://bd-rhapsody-public.s3-website-us-east-1.amazonaws.com/Rhapsody-WTA/GRCm38-PhiX-gencodevM19/\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "mode": { - "type": "string", - "description": "Type: `string`, required, example: `wta`, choices: ``wta`, `targeted``. Whether to run a whole transcriptome analysis (WTA) or a targeted analysis", - "help_text": "Type: `string`, required, example: `wta`, choices: ``wta`, `targeted``. Whether to run a whole transcriptome analysis (WTA) or a targeted analysis.", - "enum": ["wta", "targeted"] - - }, - - "input": { - "type": "string", - "description": "Type: List of `file`, required, example: `input.fastq.gz`, multiple_sep: `\";\"`. Path to your read files in the FASTQ", - "help_text": "Type: List of `file`, required, example: `input.fastq.gz`, multiple_sep: `\";\"`. Path to your read files in the FASTQ.GZ format. You may specify as many R1/R2 read pairs as you want." - }, - - "reference": { - "type": "string", - "description": "Type: List of `file`, required, example: `reference_genome.tar.gz|reference.fasta`, multiple_sep: `\";\"`. Refence to map to", - "help_text": "Type: List of `file`, required, example: `reference_genome.tar.gz|reference.fasta`, multiple_sep: `\";\"`. Refence to map to. For `--mode wta`, this is the path to STAR index as a tar.gz file. For `--mode targeted`, this is the path to mRNA reference file for pre-designed, supplemental, or custom panel, in FASTA format" - }, - - "transcriptome_annotation": { - "type": "string", - "description": "Type: `file`, example: `transcriptome.gtf`. Path to GTF annotation file (only for `--mode wta`)", - "help_text": "Type: `file`, example: `transcriptome.gtf`. Path to GTF annotation file (only for `--mode wta`)." - }, - - "abseq_reference": { - "type": "string", - "description": "Type: List of `file`, example: `abseq_reference.fasta`, multiple_sep: `\";\"`. Path to the AbSeq reference file in FASTA format", - "help_text": "Type: List of `file`, example: `abseq_reference.fasta`, multiple_sep: `\";\"`. Path to the AbSeq reference file in FASTA format. Only needed if BD AbSeq Ab-Oligos are used." - }, - - "supplemental_reference": { - "type": "string", - "description": "Type: List of `file`, example: `supplemental_reference.fasta`, multiple_sep: `\";\"`. Path to the supplemental reference file in FASTA format", - "help_text": "Type: List of `file`, example: `supplemental_reference.fasta`, multiple_sep: `\";\"`. Path to the supplemental reference file in FASTA format. Only needed if there are additional transgene sequences used in the experiment (only for `--mode wta`)." - }, - - "sample_prefix": { - "type": "string", - "description": "Type: `string`, default: `sample`. Specify a run name to use as the output file base name", - "help_text": "Type: `string`, default: `sample`. Specify a run name to use as the output file base name. Use only letters, numbers, or hyphens. Do not use special characters or spaces.", - "default": "sample" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `output_dir/`. Output folder", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `output_dir/`. Output folder. Output still needs to be processed further.", - "default": "$id.$key.output.output" - } - - } - }, - "putative cell calling settings" : { - "title": "Putative cell calling settings", - "type": "object", - "description": "No description", - "properties": { - - "putative_cell_call": { - "type": "string", - "description": "Type: `string`, example: `mRNA`, choices: ``mRNA`, `AbSeq_Experimental``. Specify the dataset to be used for putative cell calling", - "help_text": "Type: `string`, example: `mRNA`, choices: ``mRNA`, `AbSeq_Experimental``. Specify the dataset to be used for putative cell calling. For putative cell calling using an AbSeq dataset, please provide an AbSeq_Reference fasta file above.", - "enum": ["mRNA", "AbSeq_Experimental"] - - }, - - "exact_cell_count": { - "type": "integer", - "description": "Type: `integer`, example: `10000`. Exact cell count - Set a specific number (\u003e=1) of cells as putative, based on those with the highest error-corrected read count", - "help_text": "Type: `integer`, example: `10000`. Exact cell count - Set a specific number (\u003e=1) of cells as putative, based on those with the highest error-corrected read count" - }, - - "disable_putative_calling": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Disable Refined Putative Cell Calling - Determine putative cells using only the basic algorithm (minimum second derivative along the cumulative reads curve)", - "help_text": "Type: `boolean_true`, default: `false`. Disable Refined Putative Cell Calling - Determine putative cells using only the basic algorithm (minimum second derivative along the cumulative reads curve). The refined algorithm attempts to remove false positives and recover false negatives, but may not be ideal for certain complex mixtures of cell types. Does not apply if Exact Cell Count is set.", - "default": "False" - } - - } - }, - "subsample arguments" : { - "title": "Subsample arguments", - "type": "object", - "description": "No description", - "properties": { - - "subsample": { - "type": "number", - "description": "Type: `double`, example: `0.01`. A number \u003e1 or fraction (0 \u003c n \u003c 1) to indicate the number or percentage of reads to subsample", - "help_text": "Type: `double`, example: `0.01`. A number \u003e1 or fraction (0 \u003c n \u003c 1) to indicate the number or percentage of reads to subsample." - }, - - "subsample_seed": { - "type": "integer", - "description": "Type: `integer`, example: `3445`. A seed for replicating a previous subsampled run", - "help_text": "Type: `integer`, example: `3445`. A seed for replicating a previous subsampled run." - } - - } - }, - "multiplex arguments" : { - "title": "Multiplex arguments", - "type": "object", - "description": "No description", - "properties": { - - "sample_tags_version": { - "type": "string", - "description": "Type: `string`, example: `human`, choices: ``human`, `hs`, `mouse`, `mm``. Specify if multiplexed run", - "help_text": "Type: `string`, example: `human`, choices: ``human`, `hs`, `mouse`, `mm``. Specify if multiplexed run.", - "enum": ["human", "hs", "mouse", "mm"] - - }, - - "tag_names": { - "type": "string", - "description": "Type: List of `string`, example: `4-mySample:9-myOtherSample:6-alsoThisSample`, multiple_sep: `\":\"`. Tag_Names (optional) - Specify the tag number followed by \u0027-\u0027 and the desired sample name to appear in Sample_Tag_Metrics", - "help_text": "Type: List of `string`, example: `4-mySample:9-myOtherSample:6-alsoThisSample`, multiple_sep: `\":\"`. Tag_Names (optional) - Specify the tag number followed by \u0027-\u0027 and the desired sample name to appear in Sample_Tag_Metrics.csv.\nDo not use the special characters: \u0026, (), [], {}, \u003c\u003e, ?, |\n" - } - - } - }, - "vdj arguments" : { - "title": "VDJ arguments", - "type": "object", - "description": "No description", - "properties": { - - "vdj_version": { - "type": "string", - "description": "Type: `string`, example: `human`, choices: ``human`, `mouse`, `humanBCR`, `humanBCR`, `humanTCR`, `mouseBCR``. Specify if VDJ run", - "help_text": "Type: `string`, example: `human`, choices: ``human`, `mouse`, `humanBCR`, `humanBCR`, `humanTCR`, `mouseBCR``. Specify if VDJ run.", - "enum": ["human", "mouse", "humanBCR", "humanBCR", "humanTCR", "mouseBCR"] - - } - - } - }, - "cwl-runner arguments" : { - "title": "CWL-runner arguments", - "type": "object", - "description": "No description", - "properties": { - - "parallel": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Run jobs in parallel", - "help_text": "Type: `boolean`, default: `true`. Run jobs in parallel.", - "default": "True" - }, - - "timestamps": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Add timestamps to the errors, warnings, and notifications", - "help_text": "Type: `boolean_true`, default: `false`. Add timestamps to the errors, warnings, and notifications.", - "default": "False" - }, - - "dryrun": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. If true, the output directory will only contain the CWL input files, but the pipeline itself will not be executed", - "help_text": "Type: `boolean_true`, default: `false`. If true, the output directory will only contain the CWL input files, but the pipeline itself will not be executed.", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "mode": { + "type": + "string", + "description": "Type: `string`, required, example: `wta`, choices: ``wta`, `targeted``. Whether to run a whole transcriptome analysis (WTA) or a targeted analysis", + "help_text": "Type: `string`, required, example: `wta`, choices: ``wta`, `targeted``. Whether to run a whole transcriptome analysis (WTA) or a targeted analysis.", + "enum": ["wta", "targeted"] + + + } + + + , + "input": { + "type": + "string", + "description": "Type: List of `file`, required, example: `input.fastq.gz`, multiple_sep: `\";\"`. Path to your read files in the FASTQ", + "help_text": "Type: List of `file`, required, example: `input.fastq.gz`, multiple_sep: `\";\"`. Path to your read files in the FASTQ.GZ format. You may specify as many R1/R2 read pairs as you want." + + } + + + , + "reference": { + "type": + "string", + "description": "Type: List of `file`, required, example: `reference_genome.tar.gz|reference.fasta`, multiple_sep: `\";\"`. Refence to map to", + "help_text": "Type: List of `file`, required, example: `reference_genome.tar.gz|reference.fasta`, multiple_sep: `\";\"`. Refence to map to. For `--mode wta`, this is the path to STAR index as a tar.gz file. For `--mode targeted`, this is the path to mRNA reference file for pre-designed, supplemental, or custom panel, in FASTA format" + + } + + + , + "transcriptome_annotation": { + "type": + "string", + "description": "Type: `file`, example: `transcriptome.gtf`. Path to GTF annotation file (only for `--mode wta`)", + "help_text": "Type: `file`, example: `transcriptome.gtf`. Path to GTF annotation file (only for `--mode wta`)." + + } + + + , + "abseq_reference": { + "type": + "string", + "description": "Type: List of `file`, example: `abseq_reference.fasta`, multiple_sep: `\";\"`. Path to the AbSeq reference file in FASTA format", + "help_text": "Type: List of `file`, example: `abseq_reference.fasta`, multiple_sep: `\";\"`. Path to the AbSeq reference file in FASTA format. Only needed if BD AbSeq Ab-Oligos are used." + + } + + + , + "supplemental_reference": { + "type": + "string", + "description": "Type: List of `file`, example: `supplemental_reference.fasta`, multiple_sep: `\";\"`. Path to the supplemental reference file in FASTA format", + "help_text": "Type: List of `file`, example: `supplemental_reference.fasta`, multiple_sep: `\";\"`. Path to the supplemental reference file in FASTA format. Only needed if there are additional transgene sequences used in the experiment (only for `--mode wta`)." + + } + + + , + "sample_prefix": { + "type": + "string", + "description": "Type: `string`, default: `sample`. Specify a run name to use as the output file base name", + "help_text": "Type: `string`, default: `sample`. Specify a run name to use as the output file base name. Use only letters, numbers, or hyphens. Do not use special characters or spaces." + , + "default": "sample" + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `output_dir/`. Output folder", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `output_dir/`. Output folder. Output still needs to be processed further." + , + "default": "$id.$key.output.output" + } + + +} +}, + + + "putative cell calling settings" : { + "title": "Putative cell calling settings", + "type": "object", + "description": "No description", + "properties": { + + + "putative_cell_call": { + "type": + "string", + "description": "Type: `string`, example: `mRNA`, choices: ``mRNA`, `AbSeq_Experimental``. Specify the dataset to be used for putative cell calling", + "help_text": "Type: `string`, example: `mRNA`, choices: ``mRNA`, `AbSeq_Experimental``. Specify the dataset to be used for putative cell calling. For putative cell calling using an AbSeq dataset, please provide an AbSeq_Reference fasta file above.", + "enum": ["mRNA", "AbSeq_Experimental"] + + + } + + + , + "exact_cell_count": { + "type": + "integer", + "description": "Type: `integer`, example: `10000`. Exact cell count - Set a specific number (\u003e=1) of cells as putative, based on those with the highest error-corrected read count", + "help_text": "Type: `integer`, example: `10000`. Exact cell count - Set a specific number (\u003e=1) of cells as putative, based on those with the highest error-corrected read count" + + } + + + , + "disable_putative_calling": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Disable Refined Putative Cell Calling - Determine putative cells using only the basic algorithm (minimum second derivative along the cumulative reads curve)", + "help_text": "Type: `boolean_true`, default: `false`. Disable Refined Putative Cell Calling - Determine putative cells using only the basic algorithm (minimum second derivative along the cumulative reads curve). The refined algorithm attempts to remove false positives and recover false negatives, but may not be ideal for certain complex mixtures of cell types. Does not apply if Exact Cell Count is set." + , + "default": "False" + } + + +} +}, + + + "subsample arguments" : { + "title": "Subsample arguments", + "type": "object", + "description": "No description", + "properties": { + + + "subsample": { + "type": + "number", + "description": "Type: `double`, example: `0.01`. A number \u003e1 or fraction (0 \u003c n \u003c 1) to indicate the number or percentage of reads to subsample", + "help_text": "Type: `double`, example: `0.01`. A number \u003e1 or fraction (0 \u003c n \u003c 1) to indicate the number or percentage of reads to subsample." + + } + + + , + "subsample_seed": { + "type": + "integer", + "description": "Type: `integer`, example: `3445`. A seed for replicating a previous subsampled run", + "help_text": "Type: `integer`, example: `3445`. A seed for replicating a previous subsampled run." + + } + + +} +}, + + + "multiplex arguments" : { + "title": "Multiplex arguments", + "type": "object", + "description": "No description", + "properties": { + + + "sample_tags_version": { + "type": + "string", + "description": "Type: `string`, example: `human`, choices: ``human`, `hs`, `mouse`, `mm``. Specify if multiplexed run", + "help_text": "Type: `string`, example: `human`, choices: ``human`, `hs`, `mouse`, `mm``. Specify if multiplexed run.", + "enum": ["human", "hs", "mouse", "mm"] + + + } + + + , + "tag_names": { + "type": + "string", + "description": "Type: List of `string`, example: `4-mySample:9-myOtherSample:6-alsoThisSample`, multiple_sep: `\":\"`. Tag_Names (optional) - Specify the tag number followed by \u0027-\u0027 and the desired sample name to appear in Sample_Tag_Metrics", + "help_text": "Type: List of `string`, example: `4-mySample:9-myOtherSample:6-alsoThisSample`, multiple_sep: `\":\"`. Tag_Names (optional) - Specify the tag number followed by \u0027-\u0027 and the desired sample name to appear in Sample_Tag_Metrics.csv.\nDo not use the special characters: \u0026, (), [], {}, \u003c\u003e, ?, |\n" + + } + + +} +}, + + + "vdj arguments" : { + "title": "VDJ arguments", + "type": "object", + "description": "No description", + "properties": { + + + "vdj_version": { + "type": + "string", + "description": "Type: `string`, example: `human`, choices: ``human`, `mouse`, `humanBCR`, `humanBCR`, `humanTCR`, `mouseBCR``. Specify if VDJ run", + "help_text": "Type: `string`, example: `human`, choices: ``human`, `mouse`, `humanBCR`, `humanBCR`, `humanTCR`, `mouseBCR``. Specify if VDJ run.", + "enum": ["human", "mouse", "humanBCR", "humanBCR", "humanTCR", "mouseBCR"] + + + } + + +} +}, + + + "cwl-runner arguments" : { + "title": "CWL-runner arguments", + "type": "object", + "description": "No description", + "properties": { + + + "parallel": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Run jobs in parallel", + "help_text": "Type: `boolean`, default: `true`. Run jobs in parallel." + , + "default": "True" + } + + + , + "timestamps": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Add timestamps to the errors, warnings, and notifications", + "help_text": "Type: `boolean_true`, default: `false`. Add timestamps to the errors, warnings, and notifications." + , + "default": "False" + } + + + , + "dryrun": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. If true, the output directory will only contain the CWL input files, but the pipeline itself will not be executed", + "help_text": "Type: `boolean_true`, default: `false`. If true, the output directory will only contain the CWL input files, but the pipeline itself will not be executed." + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/putative cell calling settings" + }, + + { + "$ref": "#/definitions/subsample arguments" + }, + + { + "$ref": "#/definitions/multiplex arguments" + }, + + { + "$ref": "#/definitions/vdj arguments" + }, + + { + "$ref": "#/definitions/cwl-runner arguments" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/putative cell calling settings" - }, - { - "$ref": "#/definitions/subsample arguments" - }, - { - "$ref": "#/definitions/multiplex arguments" - }, - { - "$ref": "#/definitions/vdj arguments" - }, - { - "$ref": "#/definitions/cwl-runner arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/cellranger_count/.config.vsh.yaml b/target/nextflow/mapping/cellranger_count/.config.vsh.yaml index b754fc36012..589b46ac27b 100644 --- a/target/nextflow/mapping/cellranger_count/.config.vsh.yaml +++ b/target/nextflow/mapping/cellranger_count/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellranger_count" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -261,6 +261,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/cellranger_count" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/cellranger_count/cellranger_count" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/cellranger_count/main.nf b/target/nextflow/mapping/cellranger_count/main.nf index 56b530e78e1..f3e52a88a97 100644 --- a/target/nextflow/mapping/cellranger_count/main.nf +++ b/target/nextflow/mapping/cellranger_count/main.nf @@ -1,4 +1,4 @@ -// cellranger_count 0.12.3 +// cellranger_count 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -29,7 +29,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "cellranger_count", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -363,9 +363,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/cellranger_count", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/cellranger_count/nextflow.config b/target/nextflow/mapping/cellranger_count/nextflow.config index c34d92a1a3f..447bb8b9239 100644 --- a/target/nextflow/mapping/cellranger_count/nextflow.config +++ b/target/nextflow/mapping/cellranger_count/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'cellranger_count' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Align fastq files using Cell Ranger count.' author = 'Angela Oliveira Pisco, Samuel D\'Souza, Robrecht Cannoodt' } diff --git a/target/nextflow/mapping/cellranger_count/nextflow_schema.json b/target/nextflow/mapping/cellranger_count/nextflow_schema.json index 4f76cbff566..e6f42eec020 100644 --- a/target/nextflow/mapping/cellranger_count/nextflow_schema.json +++ b/target/nextflow/mapping/cellranger_count/nextflow_schema.json @@ -1,122 +1,175 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "cellranger_count", - "description": "Align fastq files using Cell Ranger count.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "cellranger_count", +"description": "Align fastq files using Cell Ranger count.", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: List of `file`, required, example: `sample_S1_L001_R1_001.fastq.gz;sample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The fastq", - "help_text": "Type: List of `file`, required, example: `sample_S1_L001_R1_001.fastq.gz;sample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The fastq.gz files to align. Can also be a single directory containing fastq.gz files." - }, - - "reference": { - "type": "string", - "description": "Type: `file`, required, example: `reference.tar.gz`. The path to Cell Ranger reference tar", - "help_text": "Type: `file`, required, example: `reference.tar.gz`. The path to Cell Ranger reference tar.gz file. Can also be a directory." - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the alignment results", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the alignment results.", - "default": "$id.$key.output.output" - } - - } - }, - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "expect_cells": { - "type": "integer", - "description": "Type: `integer`, example: `3000`. Expected number of recovered cells, used as input to cell calling algorithm", - "help_text": "Type: `integer`, example: `3000`. Expected number of recovered cells, used as input to cell calling algorithm." - }, - - "chemistry": { - "type": "string", - "description": "Type: `string`, default: `auto`, choices: ``auto`, `threeprime`, `fiveprime`, `SC3Pv1`, `SC3Pv2`, `SC3Pv3`, `SC3Pv3LT`, `SC3Pv3HT`, `SC5P-PE`, `SC5P-R2`, `SC-FB``. Assay configuration", - "help_text": "Type: `string`, default: `auto`, choices: ``auto`, `threeprime`, `fiveprime`, `SC3Pv1`, `SC3Pv2`, `SC3Pv3`, `SC3Pv3LT`, `SC3Pv3HT`, `SC5P-PE`, `SC5P-R2`, `SC-FB``. Assay configuration.\n- auto: autodetect mode\n- threeprime: Single Cell 3\u0027\n- fiveprime: Single Cell 5\u0027\n- SC3Pv1: Single Cell 3\u0027 v1\n- SC3Pv2: Single Cell 3\u0027 v2\n- SC3Pv3: Single Cell 3\u0027 v3\n- SC3Pv3LT: Single Cell 3\u0027 v3 LT\n- SC3Pv3HT: Single Cell 3\u0027 v3 HT\n- SC5P-PE: Single Cell 5\u0027 paired-end\n- SC5P-R2: Single Cell 5\u0027 R2-only\n- SC-FB: Single Cell Antibody-only 3\u0027 v2 or 5\u0027\nSee https://kb.10xgenomics.com/hc/en-us/articles/115003764132-How-does-Cell-Ranger-auto-detect-chemistry- for more information.\n", - "enum": ["auto", "threeprime", "fiveprime", "SC3Pv1", "SC3Pv2", "SC3Pv3", "SC3Pv3LT", "SC3Pv3HT", "SC5P-PE", "SC5P-R2", "SC-FB"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: List of `file`, required, example: `sample_S1_L001_R1_001.fastq.gz;sample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The fastq", + "help_text": "Type: List of `file`, required, example: `sample_S1_L001_R1_001.fastq.gz;sample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The fastq.gz files to align. Can also be a single directory containing fastq.gz files." + + } + + + , + "reference": { + "type": + "string", + "description": "Type: `file`, required, example: `reference.tar.gz`. The path to Cell Ranger reference tar", + "help_text": "Type: `file`, required, example: `reference.tar.gz`. The path to Cell Ranger reference tar.gz file. Can also be a directory." + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the alignment results", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the alignment results." + , + "default": "$id.$key.output.output" + } + + +} +}, + + + "arguments" : { + "title": "Arguments", + "type": "object", + "description": "No description", + "properties": { + + + "expect_cells": { + "type": + "integer", + "description": "Type: `integer`, example: `3000`. Expected number of recovered cells, used as input to cell calling algorithm", + "help_text": "Type: `integer`, example: `3000`. Expected number of recovered cells, used as input to cell calling algorithm." + + } + + + , + "chemistry": { + "type": + "string", + "description": "Type: `string`, default: `auto`, choices: ``auto`, `threeprime`, `fiveprime`, `SC3Pv1`, `SC3Pv2`, `SC3Pv3`, `SC3Pv3LT`, `SC3Pv3HT`, `SC5P-PE`, `SC5P-R2`, `SC-FB``. Assay configuration", + "help_text": "Type: `string`, default: `auto`, choices: ``auto`, `threeprime`, `fiveprime`, `SC3Pv1`, `SC3Pv2`, `SC3Pv3`, `SC3Pv3LT`, `SC3Pv3HT`, `SC5P-PE`, `SC5P-R2`, `SC-FB``. Assay configuration.\n- auto: autodetect mode\n- threeprime: Single Cell 3\u0027\n- fiveprime: Single Cell 5\u0027\n- SC3Pv1: Single Cell 3\u0027 v1\n- SC3Pv2: Single Cell 3\u0027 v2\n- SC3Pv3: Single Cell 3\u0027 v3\n- SC3Pv3LT: Single Cell 3\u0027 v3 LT\n- SC3Pv3HT: Single Cell 3\u0027 v3 HT\n- SC5P-PE: Single Cell 5\u0027 paired-end\n- SC5P-R2: Single Cell 5\u0027 R2-only\n- SC-FB: Single Cell Antibody-only 3\u0027 v2 or 5\u0027\nSee https://kb.10xgenomics.com/hc/en-us/articles/115003764132-How-does-Cell-Ranger-auto-detect-chemistry- for more information.\n", + "enum": ["auto", "threeprime", "fiveprime", "SC3Pv1", "SC3Pv2", "SC3Pv3", "SC3Pv3LT", "SC3Pv3HT", "SC5P-PE", "SC5P-R2", "SC-FB"] + + , + "default": "auto" + } + + + , + "secondary_analysis": { + "type": + "boolean", + "description": "Type: `boolean`, default: `false`. Whether or not to run the secondary analysis e", + "help_text": "Type: `boolean`, default: `false`. Whether or not to run the secondary analysis e.g. clustering." + , + "default": "False" + } + + + , + "generate_bam": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Whether to generate a BAM file", + "help_text": "Type: `boolean`, default: `true`. Whether to generate a BAM file." , - "default": "auto" - }, - - "secondary_analysis": { - "type": "boolean", - "description": "Type: `boolean`, default: `false`. Whether or not to run the secondary analysis e", - "help_text": "Type: `boolean`, default: `false`. Whether or not to run the secondary analysis e.g. clustering.", - "default": "False" - }, - - "generate_bam": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Whether to generate a BAM file", - "help_text": "Type: `boolean`, default: `true`. Whether to generate a BAM file.", - "default": "True" - }, - - "include_introns": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Include intronic reads in count (default=true unless --target-panel is specified in which case default=false)", - "help_text": "Type: `boolean`, default: `true`. Include intronic reads in count (default=true unless --target-panel is specified in which case default=false)", - "default": "True" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "True" + } + + + , + "include_introns": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Include intronic reads in count (default=true unless --target-panel is specified in which case default=false)", + "help_text": "Type: `boolean`, default: `true`. Include intronic reads in count (default=true unless --target-panel is specified in which case default=false)" + , + "default": "True" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/cellranger_count_split/.config.vsh.yaml b/target/nextflow/mapping/cellranger_count_split/.config.vsh.yaml index fac2388e7eb..4c86d993a7f 100644 --- a/target/nextflow/mapping/cellranger_count_split/.config.vsh.yaml +++ b/target/nextflow/mapping/cellranger_count_split/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellranger_count_split" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -213,6 +213,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/cellranger_count_split" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/cellranger_count_split/cellranger_count_split" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/cellranger_count_split/main.nf b/target/nextflow/mapping/cellranger_count_split/main.nf index 26ff26016ed..93c8774e3f5 100644 --- a/target/nextflow/mapping/cellranger_count_split/main.nf +++ b/target/nextflow/mapping/cellranger_count_split/main.nf @@ -1,4 +1,4 @@ -// cellranger_count_split 0.12.3 +// cellranger_count_split 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -29,7 +29,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "cellranger_count_split", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -301,9 +301,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/cellranger_count_split", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/cellranger_count_split/nextflow.config b/target/nextflow/mapping/cellranger_count_split/nextflow.config index a6a1e141d9d..3aa03c9d330 100644 --- a/target/nextflow/mapping/cellranger_count_split/nextflow.config +++ b/target/nextflow/mapping/cellranger_count_split/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'cellranger_count_split' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Split 10x Cell Ranger output directory into separate output fields.' author = 'Angela Oliveira Pisco, Samuel D\'Souza, Robrecht Cannoodt' } diff --git a/target/nextflow/mapping/cellranger_count_split/nextflow_schema.json b/target/nextflow/mapping/cellranger_count_split/nextflow_schema.json index 4f3d065b1e0..e8746190f71 100644 --- a/target/nextflow/mapping/cellranger_count_split/nextflow_schema.json +++ b/target/nextflow/mapping/cellranger_count_split/nextflow_schema.json @@ -1,93 +1,136 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "cellranger_count_split", - "description": "Split 10x Cell Ranger output directory into separate output fields.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "cellranger_count_split", +"description": "Split 10x Cell Ranger output directory into separate output fields.", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input_dir`. Output directory from a Cell Ranger count run", - "help_text": "Type: `file`, required, example: `input_dir`. Output directory from a Cell Ranger count run." - }, - - "filtered_h5": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.filtered_h5.h5`, example: `filtered_feature_bc_matrix.h5`. ", - "help_text": "Type: `file`, default: `$id.$key.filtered_h5.h5`, example: `filtered_feature_bc_matrix.h5`. ", - "default": "$id.$key.filtered_h5.h5" - }, - - "metrics_summary": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.metrics_summary.csv`, example: `metrics_summary.csv`. ", - "help_text": "Type: `file`, default: `$id.$key.metrics_summary.csv`, example: `metrics_summary.csv`. ", - "default": "$id.$key.metrics_summary.csv" - }, - - "molecule_info": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.molecule_info.h5`, example: `molecule_info.h5`. ", - "help_text": "Type: `file`, default: `$id.$key.molecule_info.h5`, example: `molecule_info.h5`. ", - "default": "$id.$key.molecule_info.h5" - }, - - "bam": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.bam.bam`, example: `possorted_genome_bam.bam`. ", - "help_text": "Type: `file`, default: `$id.$key.bam.bam`, example: `possorted_genome_bam.bam`. ", - "default": "$id.$key.bam.bam" - }, - - "bai": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.bai.bai`, example: `possorted_genome_bam.bam.bai`. ", - "help_text": "Type: `file`, default: `$id.$key.bai.bai`, example: `possorted_genome_bam.bam.bai`. ", - "default": "$id.$key.bai.bai" - }, - - "raw_h5": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.raw_h5.h5`, example: `raw_feature_bc_matrix.h5`. ", - "help_text": "Type: `file`, default: `$id.$key.raw_h5.h5`, example: `raw_feature_bc_matrix.h5`. ", - "default": "$id.$key.raw_h5.h5" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input_dir`. Output directory from a Cell Ranger count run", + "help_text": "Type: `file`, required, example: `input_dir`. Output directory from a Cell Ranger count run." + + } + + + , + "filtered_h5": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.filtered_h5.h5`, example: `filtered_feature_bc_matrix.h5`. ", + "help_text": "Type: `file`, default: `$id.$key.filtered_h5.h5`, example: `filtered_feature_bc_matrix.h5`. " + , + "default": "$id.$key.filtered_h5.h5" + } + + + , + "metrics_summary": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.metrics_summary.csv`, example: `metrics_summary.csv`. ", + "help_text": "Type: `file`, default: `$id.$key.metrics_summary.csv`, example: `metrics_summary.csv`. " + , + "default": "$id.$key.metrics_summary.csv" + } + + + , + "molecule_info": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.molecule_info.h5`, example: `molecule_info.h5`. ", + "help_text": "Type: `file`, default: `$id.$key.molecule_info.h5`, example: `molecule_info.h5`. " + , + "default": "$id.$key.molecule_info.h5" + } + + + , + "bam": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.bam.bam`, example: `possorted_genome_bam.bam`. ", + "help_text": "Type: `file`, default: `$id.$key.bam.bam`, example: `possorted_genome_bam.bam`. " + , + "default": "$id.$key.bam.bam" + } + + + , + "bai": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.bai.bai`, example: `possorted_genome_bam.bam.bai`. ", + "help_text": "Type: `file`, default: `$id.$key.bai.bai`, example: `possorted_genome_bam.bam.bai`. " + , + "default": "$id.$key.bai.bai" + } + + + , + "raw_h5": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.raw_h5.h5`, example: `raw_feature_bc_matrix.h5`. ", + "help_text": "Type: `file`, default: `$id.$key.raw_h5.h5`, example: `raw_feature_bc_matrix.h5`. " + , + "default": "$id.$key.raw_h5.h5" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/cellranger_multi/.config.vsh.yaml b/target/nextflow/mapping/cellranger_multi/.config.vsh.yaml index 7e9ca75621a..e0e7c0a576f 100644 --- a/target/nextflow/mapping/cellranger_multi/.config.vsh.yaml +++ b/target/nextflow/mapping/cellranger_multi/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellranger_multi" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -418,6 +418,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/cellranger_multi" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/cellranger_multi/cellranger_multi" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/cellranger_multi/main.nf b/target/nextflow/mapping/cellranger_multi/main.nf index 3ba8bbecac8..ac1eb88faf5 100644 --- a/target/nextflow/mapping/cellranger_multi/main.nf +++ b/target/nextflow/mapping/cellranger_multi/main.nf @@ -1,4 +1,4 @@ -// cellranger_multi 0.12.3 +// cellranger_multi 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -29,7 +29,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "cellranger_multi", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -545,9 +545,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/cellranger_multi", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/cellranger_multi/nextflow.config b/target/nextflow/mapping/cellranger_multi/nextflow.config index f0df0196fa6..a27a28f2b6c 100644 --- a/target/nextflow/mapping/cellranger_multi/nextflow.config +++ b/target/nextflow/mapping/cellranger_multi/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'cellranger_multi' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Align fastq files using Cell Ranger multi.' author = 'Angela Oliveira Pisco, Robrecht Cannoodt, Dries De Maeyer' } diff --git a/target/nextflow/mapping/cellranger_multi/nextflow_schema.json b/target/nextflow/mapping/cellranger_multi/nextflow_schema.json index b18565e7dd3..ad0f1ffda48 100644 --- a/target/nextflow/mapping/cellranger_multi/nextflow_schema.json +++ b/target/nextflow/mapping/cellranger_multi/nextflow_schema.json @@ -1,222 +1,328 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "cellranger_multi", - "description": "Align fastq files using Cell Ranger multi.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "cellranger_multi", +"description": "Align fastq files using Cell Ranger multi.", +"type": "object", +"definitions": { + + + + "outputs" : { + "title": "Outputs", "type": "object", - "definitions": { - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the alignment results", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the alignment results.", - "default": "$id.$key.output.output" - } - - } - }, - "input files" : { - "title": "Input files", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed", - "help_text": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed. FASTQ files should conform to the naming conventions of bcl2fastq and mkfastq:\n`[Sample Name]_S[Sample Index]_L00[Lane Number]_[Read Type]_001.fastq.gz`\n" - }, - - "gex_reference": { - "type": "string", - "description": "Type: `file`, required, example: `reference_genome.tar.gz`. Genome refence index built by Cell Ranger mkref", - "help_text": "Type: `file`, required, example: `reference_genome.tar.gz`. Genome refence index built by Cell Ranger mkref." - }, - - "vdj_reference": { - "type": "string", - "description": "Type: `file`, example: `reference_vdj.tar.gz`. VDJ refence index built by Cell Ranger mkref", - "help_text": "Type: `file`, example: `reference_vdj.tar.gz`. VDJ refence index built by Cell Ranger mkref." - }, - - "vdj_inner_enrichment_primers": { - "type": "string", - "description": "Type: `file`, example: `enrichment_primers.txt`. V(D)J Immune Profiling libraries: if inner enrichment primers other than those provided \nin the 10x Genomics kits are used, they need to be specified here as a\ntext file with one primer per line", - "help_text": "Type: `file`, example: `enrichment_primers.txt`. V(D)J Immune Profiling libraries: if inner enrichment primers other than those provided \nin the 10x Genomics kits are used, they need to be specified here as a\ntext file with one primer per line.\n" - }, - - "feature_reference": { - "type": "string", - "description": "Type: `file`, example: `feature_reference.csv`. Path to the Feature reference CSV file, declaring Feature Barcode constructs and associated barcodes", - "help_text": "Type: `file`, example: `feature_reference.csv`. Path to the Feature reference CSV file, declaring Feature Barcode constructs and associated barcodes. Required only for Antibody Capture or CRISPR Guide Capture libraries. See https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/using/feature-bc-analysis#feature-ref for more information." - } - - } - }, - "library arguments" : { - "title": "Library arguments", - "type": "object", - "description": "No description", - "properties": { - - "library_id": { - "type": "string", - "description": "Type: List of `string`, required, example: `mysample1`, multiple_sep: `\";\"`. The Illumina sample name to analyze", - "help_text": "Type: List of `string`, required, example: `mysample1`, multiple_sep: `\";\"`. The Illumina sample name to analyze. This must exactly match the \u0027Sample Name\u0027 part of the FASTQ files specified in the `--input` argument." - }, - - "library_type": { - "type": "string", - "description": "Type: List of `string`, required, example: `Gene Expression`, multiple_sep: `\";\"`. The underlying feature type of the library", - "help_text": "Type: List of `string`, required, example: `Gene Expression`, multiple_sep: `\";\"`. The underlying feature type of the library.\nPossible values: \"Gene Expression\", \"VDJ\", \"VDJ-T\", \"VDJ-B\", \"Antibody Capture\", \"CRISPR Guide Capture\", \"Multiplexing Capture\"\n" - }, - - "library_subsample": { - "type": "string", - "description": "Type: List of `string`, example: `0.5`, multiple_sep: `\";\"`. Optional", - "help_text": "Type: List of `string`, example: `0.5`, multiple_sep: `\";\"`. Optional. The rate at which reads from the provided FASTQ files are sampled. Must be strictly greater than 0 and less than or equal to 1." - }, - - "library_lanes": { - "type": "string", - "description": "Type: List of `string`, example: `1-4`, multiple_sep: `\";\"`. Lanes associated with this sample", - "help_text": "Type: List of `string`, example: `1-4`, multiple_sep: `\";\"`. Lanes associated with this sample. Defaults to using all lanes." - } - - } - }, - "gene expression arguments" : { - "title": "Gene expression arguments", - "type": "object", - "description": "Arguments relevant to the analysis of gene expression data.", - "properties": { - - "gex_expect_cells": { - "type": "integer", - "description": "Type: `integer`, example: `3000`. Expected number of recovered cells, used as input to cell calling algorithm", - "help_text": "Type: `integer`, example: `3000`. Expected number of recovered cells, used as input to cell calling algorithm." - }, - - "gex_chemistry": { - "type": "string", - "description": "Type: `string`, default: `auto`, choices: ``auto`, `threeprime`, `fiveprime`, `SC3Pv1`, `SC3Pv2`, `SC3Pv3`, `SC3Pv3LT`, `SC3Pv3HT`, `SC5P-PE`, `SC5P-R2`, `SC-FB``. Assay configuration", - "help_text": "Type: `string`, default: `auto`, choices: ``auto`, `threeprime`, `fiveprime`, `SC3Pv1`, `SC3Pv2`, `SC3Pv3`, `SC3Pv3LT`, `SC3Pv3HT`, `SC5P-PE`, `SC5P-R2`, `SC-FB``. Assay configuration.\n- auto: autodetect mode\n- threeprime: Single Cell 3\u0027\n- fiveprime: Single Cell 5\u0027\n- SC3Pv1: Single Cell 3\u0027 v1\n- SC3Pv2: Single Cell 3\u0027 v2\n- SC3Pv3: Single Cell 3\u0027 v3\n- SC3Pv3LT: Single Cell 3\u0027 v3 LT\n- SC3Pv3HT: Single Cell 3\u0027 v3 HT\n- SC5P-PE: Single Cell 5\u0027 paired-end\n- SC5P-R2: Single Cell 5\u0027 R2-only\n- SC-FB: Single Cell Antibody-only 3\u0027 v2 or 5\u0027\nSee https://kb.10xgenomics.com/hc/en-us/articles/115003764132-How-does-Cell-Ranger-auto-detect-chemistry- for more information.\n", - "enum": ["auto", "threeprime", "fiveprime", "SC3Pv1", "SC3Pv2", "SC3Pv3", "SC3Pv3LT", "SC3Pv3HT", "SC5P-PE", "SC5P-R2", "SC-FB"] + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the alignment results", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/output`. The folder to store the alignment results." , - "default": "auto" - }, - - "gex_secondary_analysis": { - "type": "boolean", - "description": "Type: `boolean`, default: `false`. Whether or not to run the secondary analysis e", - "help_text": "Type: `boolean`, default: `false`. Whether or not to run the secondary analysis e.g. clustering.", - "default": "False" - }, - - "gex_generate_bam": { - "type": "boolean", - "description": "Type: `boolean`, default: `false`. Whether to generate a BAM file", - "help_text": "Type: `boolean`, default: `false`. Whether to generate a BAM file.", - "default": "False" - }, - - "gex_include_introns": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Include intronic reads in count (default=true unless --target-panel is specified in which case default=false)", - "help_text": "Type: `boolean`, default: `true`. Include intronic reads in count (default=true unless --target-panel is specified in which case default=false)", - "default": "True" - } - - } - }, - "cell multiplexing parameters" : { - "title": "Cell multiplexing parameters", - "type": "object", - "description": "Arguments related to cell multiplexing.", - "properties": { - - "cell_multiplex_sample_id": { - "type": "string", - "description": "Type: `string`. A name to identify a multiplexed sample", - "help_text": "Type: `string`. A name to identify a multiplexed sample. Must be alphanumeric with hyphens and/or underscores, and less than 64 characters. Required for Cell Multiplexing libraries." - }, - - "cell_multiplex_oligo_ids": { - "type": "string", - "description": "Type: `string`. The Cell Multiplexing oligo IDs used to multiplex this sample", - "help_text": "Type: `string`. The Cell Multiplexing oligo IDs used to multiplex this sample. If multiple CMOs were used for a sample, separate IDs with a pipe (e.g., CMO301|CMO302). Required for Cell Multiplexing libraries." - }, - - "cell_multiplex_description": { - "type": "string", - "description": "Type: `string`. A description for the sample", - "help_text": "Type: `string`. A description for the sample." - } - - } - }, - "executor arguments" : { - "title": "Executor arguments", - "type": "object", - "description": "No description", - "properties": { - - "dryrun": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. If true, the output directory will only contain the CWL input files, but the pipeline itself will not be executed", - "help_text": "Type: `boolean_true`, default: `false`. If true, the output directory will only contain the CWL input files, but the pipeline itself will not be executed.", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "$id.$key.output.output" + } + + +} +}, + + + "input files" : { + "title": "Input files", + "type": "object", + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed", + "help_text": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed. FASTQ files should conform to the naming conventions of bcl2fastq and mkfastq:\n`[Sample Name]_S[Sample Index]_L00[Lane Number]_[Read Type]_001.fastq.gz`\n" + + } + + + , + "gex_reference": { + "type": + "string", + "description": "Type: `file`, required, example: `reference_genome.tar.gz`. Genome refence index built by Cell Ranger mkref", + "help_text": "Type: `file`, required, example: `reference_genome.tar.gz`. Genome refence index built by Cell Ranger mkref." + + } + + + , + "vdj_reference": { + "type": + "string", + "description": "Type: `file`, example: `reference_vdj.tar.gz`. VDJ refence index built by Cell Ranger mkref", + "help_text": "Type: `file`, example: `reference_vdj.tar.gz`. VDJ refence index built by Cell Ranger mkref." + + } + + + , + "vdj_inner_enrichment_primers": { + "type": + "string", + "description": "Type: `file`, example: `enrichment_primers.txt`. V(D)J Immune Profiling libraries: if inner enrichment primers other than those provided \nin the 10x Genomics kits are used, they need to be specified here as a\ntext file with one primer per line", + "help_text": "Type: `file`, example: `enrichment_primers.txt`. V(D)J Immune Profiling libraries: if inner enrichment primers other than those provided \nin the 10x Genomics kits are used, they need to be specified here as a\ntext file with one primer per line.\n" + + } + + + , + "feature_reference": { + "type": + "string", + "description": "Type: `file`, example: `feature_reference.csv`. Path to the Feature reference CSV file, declaring Feature Barcode constructs and associated barcodes", + "help_text": "Type: `file`, example: `feature_reference.csv`. Path to the Feature reference CSV file, declaring Feature Barcode constructs and associated barcodes. Required only for Antibody Capture or CRISPR Guide Capture libraries. See https://support.10xgenomics.com/single-cell-gene-expression/software/pipelines/latest/using/feature-bc-analysis#feature-ref for more information." + + } + + +} +}, + + + "library arguments" : { + "title": "Library arguments", + "type": "object", + "description": "No description", + "properties": { + + + "library_id": { + "type": + "string", + "description": "Type: List of `string`, required, example: `mysample1`, multiple_sep: `\";\"`. The Illumina sample name to analyze", + "help_text": "Type: List of `string`, required, example: `mysample1`, multiple_sep: `\";\"`. The Illumina sample name to analyze. This must exactly match the \u0027Sample Name\u0027 part of the FASTQ files specified in the `--input` argument." + + } + + + , + "library_type": { + "type": + "string", + "description": "Type: List of `string`, required, example: `Gene Expression`, multiple_sep: `\";\"`. The underlying feature type of the library", + "help_text": "Type: List of `string`, required, example: `Gene Expression`, multiple_sep: `\";\"`. The underlying feature type of the library.\nPossible values: \"Gene Expression\", \"VDJ\", \"VDJ-T\", \"VDJ-B\", \"Antibody Capture\", \"CRISPR Guide Capture\", \"Multiplexing Capture\"\n" + + } + + + , + "library_subsample": { + "type": + "string", + "description": "Type: List of `string`, example: `0.5`, multiple_sep: `\";\"`. Optional", + "help_text": "Type: List of `string`, example: `0.5`, multiple_sep: `\";\"`. Optional. The rate at which reads from the provided FASTQ files are sampled. Must be strictly greater than 0 and less than or equal to 1." + + } + + + , + "library_lanes": { + "type": + "string", + "description": "Type: List of `string`, example: `1-4`, multiple_sep: `\";\"`. Lanes associated with this sample", + "help_text": "Type: List of `string`, example: `1-4`, multiple_sep: `\";\"`. Lanes associated with this sample. Defaults to using all lanes." + + } + + +} +}, + + + "gene expression arguments" : { + "title": "Gene expression arguments", + "type": "object", + "description": "Arguments relevant to the analysis of gene expression data.", + "properties": { + + + "gex_expect_cells": { + "type": + "integer", + "description": "Type: `integer`, example: `3000`. Expected number of recovered cells, used as input to cell calling algorithm", + "help_text": "Type: `integer`, example: `3000`. Expected number of recovered cells, used as input to cell calling algorithm." + + } + + + , + "gex_chemistry": { + "type": + "string", + "description": "Type: `string`, default: `auto`, choices: ``auto`, `threeprime`, `fiveprime`, `SC3Pv1`, `SC3Pv2`, `SC3Pv3`, `SC3Pv3LT`, `SC3Pv3HT`, `SC5P-PE`, `SC5P-R2`, `SC-FB``. Assay configuration", + "help_text": "Type: `string`, default: `auto`, choices: ``auto`, `threeprime`, `fiveprime`, `SC3Pv1`, `SC3Pv2`, `SC3Pv3`, `SC3Pv3LT`, `SC3Pv3HT`, `SC5P-PE`, `SC5P-R2`, `SC-FB``. Assay configuration.\n- auto: autodetect mode\n- threeprime: Single Cell 3\u0027\n- fiveprime: Single Cell 5\u0027\n- SC3Pv1: Single Cell 3\u0027 v1\n- SC3Pv2: Single Cell 3\u0027 v2\n- SC3Pv3: Single Cell 3\u0027 v3\n- SC3Pv3LT: Single Cell 3\u0027 v3 LT\n- SC3Pv3HT: Single Cell 3\u0027 v3 HT\n- SC5P-PE: Single Cell 5\u0027 paired-end\n- SC5P-R2: Single Cell 5\u0027 R2-only\n- SC-FB: Single Cell Antibody-only 3\u0027 v2 or 5\u0027\nSee https://kb.10xgenomics.com/hc/en-us/articles/115003764132-How-does-Cell-Ranger-auto-detect-chemistry- for more information.\n", + "enum": ["auto", "threeprime", "fiveprime", "SC3Pv1", "SC3Pv2", "SC3Pv3", "SC3Pv3LT", "SC3Pv3HT", "SC5P-PE", "SC5P-R2", "SC-FB"] + + , + "default": "auto" + } + + + , + "gex_secondary_analysis": { + "type": + "boolean", + "description": "Type: `boolean`, default: `false`. Whether or not to run the secondary analysis e", + "help_text": "Type: `boolean`, default: `false`. Whether or not to run the secondary analysis e.g. clustering." + , + "default": "False" + } + + + , + "gex_generate_bam": { + "type": + "boolean", + "description": "Type: `boolean`, default: `false`. Whether to generate a BAM file", + "help_text": "Type: `boolean`, default: `false`. Whether to generate a BAM file." + , + "default": "False" + } + + + , + "gex_include_introns": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Include intronic reads in count (default=true unless --target-panel is specified in which case default=false)", + "help_text": "Type: `boolean`, default: `true`. Include intronic reads in count (default=true unless --target-panel is specified in which case default=false)" + , + "default": "True" + } + + +} +}, + + + "cell multiplexing parameters" : { + "title": "Cell multiplexing parameters", + "type": "object", + "description": "Arguments related to cell multiplexing.", + "properties": { + + + "cell_multiplex_sample_id": { + "type": + "string", + "description": "Type: `string`. A name to identify a multiplexed sample", + "help_text": "Type: `string`. A name to identify a multiplexed sample. Must be alphanumeric with hyphens and/or underscores, and less than 64 characters. Required for Cell Multiplexing libraries." + + } + + + , + "cell_multiplex_oligo_ids": { + "type": + "string", + "description": "Type: `string`. The Cell Multiplexing oligo IDs used to multiplex this sample", + "help_text": "Type: `string`. The Cell Multiplexing oligo IDs used to multiplex this sample. If multiple CMOs were used for a sample, separate IDs with a pipe (e.g., CMO301|CMO302). Required for Cell Multiplexing libraries." + + } + + + , + "cell_multiplex_description": { + "type": + "string", + "description": "Type: `string`. A description for the sample", + "help_text": "Type: `string`. A description for the sample." + + } + + +} +}, + + + "executor arguments" : { + "title": "Executor arguments", + "type": "object", + "description": "No description", + "properties": { + + + "dryrun": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. If true, the output directory will only contain the CWL input files, but the pipeline itself will not be executed", + "help_text": "Type: `boolean_true`, default: `false`. If true, the output directory will only contain the CWL input files, but the pipeline itself will not be executed." + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/input files" + }, + + { + "$ref": "#/definitions/library arguments" + }, + + { + "$ref": "#/definitions/gene expression arguments" + }, + + { + "$ref": "#/definitions/cell multiplexing parameters" + }, + + { + "$ref": "#/definitions/executor arguments" }, - "allOf": [ - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/input files" - }, - { - "$ref": "#/definitions/library arguments" - }, - { - "$ref": "#/definitions/gene expression arguments" - }, - { - "$ref": "#/definitions/cell multiplexing parameters" - }, - { - "$ref": "#/definitions/executor arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/htseq_count/.config.vsh.yaml b/target/nextflow/mapping/htseq_count/.config.vsh.yaml index ced73cd1c29..1b58e73d30c 100644 --- a/target/nextflow/mapping/htseq_count/.config.vsh.yaml +++ b/target/nextflow/mapping/htseq_count/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "htseq_count" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -413,6 +413,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/htseq_count" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/htseq_count/htseq_count" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/htseq_count/main.nf b/target/nextflow/mapping/htseq_count/main.nf index 6877e869652..76bba96a6a3 100644 --- a/target/nextflow/mapping/htseq_count/main.nf +++ b/target/nextflow/mapping/htseq_count/main.nf @@ -1,4 +1,4 @@ -// htseq_count 0.12.3 +// htseq_count 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "htseq_count", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -553,9 +553,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/htseq_count", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/htseq_count/nextflow.config b/target/nextflow/mapping/htseq_count/nextflow.config index 3601e4198af..c652b269db6 100644 --- a/target/nextflow/mapping/htseq_count/nextflow.config +++ b/target/nextflow/mapping/htseq_count/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'htseq_count' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Quantify gene expression for subsequent testing for differential expression.\n\nThis script takes one or more alignment files in SAM/BAM format and a feature file in GFF format and calculates for each feature the number of reads mapping to it. \n\nSee http://htseq.readthedocs.io/en/master/count.html for details.\n' author = 'Robrecht Cannoodt, Angela Oliveira Pisco' } diff --git a/target/nextflow/mapping/htseq_count/nextflow_schema.json b/target/nextflow/mapping/htseq_count/nextflow_schema.json index 0e8a3b3d6d4..d426d557a63 100644 --- a/target/nextflow/mapping/htseq_count/nextflow_schema.json +++ b/target/nextflow/mapping/htseq_count/nextflow_schema.json @@ -1,198 +1,291 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "htseq_count", - "description": "Quantify gene expression for subsequent testing for differential expression.\n\nThis script takes one or more alignment files in SAM/BAM format and a feature file in GFF format and calculates for each feature the number of reads mapping to it. \n\nSee http://htseq.readthedocs.io/en/master/count.html for details.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "htseq_count", +"description": "Quantify gene expression for subsequent testing for differential expression.\n\nThis script takes one or more alignment files in SAM/BAM format and a feature file in GFF format and calculates for each feature the number of reads mapping to it. \n\nSee http://htseq.readthedocs.io/en/master/count.html for details.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "order": { - "type": "string", - "description": "Type: `string`, default: `name`, choices: ``pos`, `name``. Sorting order of \u003calignment_file\u003e", - "help_text": "Type: `string`, default: `name`, choices: ``pos`, `name``. Sorting order of \u003calignment_file\u003e. Paired-end sequencing data must be sorted either by position or\nby read name, and the sorting order must be specified. Ignored for single-end data.\n", - "enum": ["pos", "name"] + "description": "No description", + "properties": { + + + "order": { + "type": + "string", + "description": "Type: `string`, default: `name`, choices: ``pos`, `name``. Sorting order of \u003calignment_file\u003e", + "help_text": "Type: `string`, default: `name`, choices: ``pos`, `name``. Sorting order of \u003calignment_file\u003e. Paired-end sequencing data must be sorted either by position or\nby read name, and the sorting order must be specified. Ignored for single-end data.\n", + "enum": ["pos", "name"] + , - "default": "name" - }, - - "stranded": { - "type": "string", - "description": "Type: `string`, default: `yes`, choices: ``yes`, `no`, `reverse``. Whether the data is from a strand-specific assay", - "help_text": "Type: `string`, default: `yes`, choices: ``yes`, `no`, `reverse``. Whether the data is from a strand-specific assay. \u0027reverse\u0027 means \u0027yes\u0027 with reversed strand interpretation.", - "enum": ["yes", "no", "reverse"] + "default": "name" + } + + + , + "stranded": { + "type": + "string", + "description": "Type: `string`, default: `yes`, choices: ``yes`, `no`, `reverse``. Whether the data is from a strand-specific assay", + "help_text": "Type: `string`, default: `yes`, choices: ``yes`, `no`, `reverse``. Whether the data is from a strand-specific assay. \u0027reverse\u0027 means \u0027yes\u0027 with reversed strand interpretation.", + "enum": ["yes", "no", "reverse"] + , - "default": "yes" - }, - - "minimum_alignment_quality": { - "type": "integer", - "description": "Type: `integer`, default: `10`. Skip all reads with MAPQ alignment quality lower than the given minimum value", - "help_text": "Type: `integer`, default: `10`. Skip all reads with MAPQ alignment quality lower than the given minimum value. \nMAPQ is the 5th column of a SAM/BAM file and its usage depends on the software \nused to map the reads.\n", - "default": "10" - }, - - "type": { - "type": "string", - "description": "Type: `string`, example: `exon`. Feature type (3rd column in GTF file) to be used, all features of other type are ignored (default, suitable for Ensembl GTF files: exon)", - "help_text": "Type: `string`, example: `exon`. Feature type (3rd column in GTF file) to be used, all features of other type are ignored (default, suitable for Ensembl GTF files: exon)" - }, - - "id_attribute": { - "type": "string", - "description": "Type: List of `string`, example: `gene_id`, multiple_sep: `\":\"`. GTF attribute to be used as feature ID (default, suitable for Ensembl GTF files: gene_id)", - "help_text": "Type: List of `string`, example: `gene_id`, multiple_sep: `\":\"`. GTF attribute to be used as feature ID (default, suitable for Ensembl GTF files: gene_id).\nAll feature of the right type (see -t option) within the same GTF attribute will be added\ntogether. The typical way of using this option is to count all exonic reads from each gene\nand add the exons but other uses are possible as well. You can call this option multiple\ntimes: in that case, the combination of all attributes separated by colons (:) will be used\nas a unique identifier, e.g. for exons you might use -i gene_id -i exon_number.\n" - }, - - "additional_attributes": { - "type": "string", - "description": "Type: List of `string`, example: `gene_name`, multiple_sep: `\":\"`. Additional feature attributes (suitable for Ensembl GTF files: gene_name)", - "help_text": "Type: List of `string`, example: `gene_name`, multiple_sep: `\":\"`. Additional feature attributes (suitable for Ensembl GTF files: gene_name). Use multiple times\nfor more than one additional attribute. These attributes are only used as annotations in the\noutput, while the determination of how the counts are added together is done based on option -i.\n" - }, - - "add_chromosome_info": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Store information about the chromosome of each feature as an additional attribute\n(e", - "help_text": "Type: `boolean_true`, default: `false`. Store information about the chromosome of each feature as an additional attribute\n(e.g. colunm in the TSV output file).\n", - "default": "False" - }, - - "mode": { - "type": "string", - "description": "Type: `string`, default: `union`, choices: ``union`, `intersection-strict`, `intersection-nonempty``. Mode to handle reads overlapping more than one feature", - "help_text": "Type: `string`, default: `union`, choices: ``union`, `intersection-strict`, `intersection-nonempty``. Mode to handle reads overlapping more than one feature.", - "enum": ["union", "intersection-strict", "intersection-nonempty"] + "default": "yes" + } + + + , + "minimum_alignment_quality": { + "type": + "integer", + "description": "Type: `integer`, default: `10`. Skip all reads with MAPQ alignment quality lower than the given minimum value", + "help_text": "Type: `integer`, default: `10`. Skip all reads with MAPQ alignment quality lower than the given minimum value. \nMAPQ is the 5th column of a SAM/BAM file and its usage depends on the software \nused to map the reads.\n" , - "default": "union" - }, - - "non_unique": { - "type": "string", - "description": "Type: `string`, default: `none`, choices: ``none`, `all`, `fraction`, `random``. Whether and how to score reads that are not uniquely aligned or ambiguously assigned to features", - "help_text": "Type: `string`, default: `none`, choices: ``none`, `all`, `fraction`, `random``. Whether and how to score reads that are not uniquely aligned or ambiguously assigned to features.", - "enum": ["none", "all", "fraction", "random"] + "default": "10" + } + + + , + "type": { + "type": + "string", + "description": "Type: `string`, example: `exon`. Feature type (3rd column in GTF file) to be used, all features of other type are ignored (default, suitable for Ensembl GTF files: exon)", + "help_text": "Type: `string`, example: `exon`. Feature type (3rd column in GTF file) to be used, all features of other type are ignored (default, suitable for Ensembl GTF files: exon)" + + } + + + , + "id_attribute": { + "type": + "string", + "description": "Type: List of `string`, example: `gene_id`, multiple_sep: `\":\"`. GTF attribute to be used as feature ID (default, suitable for Ensembl GTF files: gene_id)", + "help_text": "Type: List of `string`, example: `gene_id`, multiple_sep: `\":\"`. GTF attribute to be used as feature ID (default, suitable for Ensembl GTF files: gene_id).\nAll feature of the right type (see -t option) within the same GTF attribute will be added\ntogether. The typical way of using this option is to count all exonic reads from each gene\nand add the exons but other uses are possible as well. You can call this option multiple\ntimes: in that case, the combination of all attributes separated by colons (:) will be used\nas a unique identifier, e.g. for exons you might use -i gene_id -i exon_number.\n" + + } + + + , + "additional_attributes": { + "type": + "string", + "description": "Type: List of `string`, example: `gene_name`, multiple_sep: `\":\"`. Additional feature attributes (suitable for Ensembl GTF files: gene_name)", + "help_text": "Type: List of `string`, example: `gene_name`, multiple_sep: `\":\"`. Additional feature attributes (suitable for Ensembl GTF files: gene_name). Use multiple times\nfor more than one additional attribute. These attributes are only used as annotations in the\noutput, while the determination of how the counts are added together is done based on option -i.\n" + + } + + + , + "add_chromosome_info": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Store information about the chromosome of each feature as an additional attribute\n(e", + "help_text": "Type: `boolean_true`, default: `false`. Store information about the chromosome of each feature as an additional attribute\n(e.g. colunm in the TSV output file).\n" + , + "default": "False" + } + + + , + "mode": { + "type": + "string", + "description": "Type: `string`, default: `union`, choices: ``union`, `intersection-strict`, `intersection-nonempty``. Mode to handle reads overlapping more than one feature", + "help_text": "Type: `string`, default: `union`, choices: ``union`, `intersection-strict`, `intersection-nonempty``. Mode to handle reads overlapping more than one feature.", + "enum": ["union", "intersection-strict", "intersection-nonempty"] + + , + "default": "union" + } + + + , + "non_unique": { + "type": + "string", + "description": "Type: `string`, default: `none`, choices: ``none`, `all`, `fraction`, `random``. Whether and how to score reads that are not uniquely aligned or ambiguously assigned to features", + "help_text": "Type: `string`, default: `none`, choices: ``none`, `all`, `fraction`, `random``. Whether and how to score reads that are not uniquely aligned or ambiguously assigned to features.", + "enum": ["none", "all", "fraction", "random"] + , - "default": "none" - }, - - "secondary_alignments": { - "type": "string", - "description": "Type: `string`, choices: ``score`, `ignore``. Whether to score secondary alignments (0x100 flag)", - "help_text": "Type: `string`, choices: ``score`, `ignore``. Whether to score secondary alignments (0x100 flag).", - "enum": ["score", "ignore"] - - }, - - "supplementary_alignments": { - "type": "string", - "description": "Type: `string`, choices: ``score`, `ignore``. Whether to score supplementary alignments (0x800 flag)", - "help_text": "Type: `string`, choices: ``score`, `ignore``. Whether to score supplementary alignments (0x800 flag).", - "enum": ["score", "ignore"] - - }, - - "counts_output_sparse": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Store the counts as a sparse matrix (mtx, h5ad, loom)", - "help_text": "Type: `boolean_true`, default: `false`. Store the counts as a sparse matrix (mtx, h5ad, loom).", - "default": "False" - } - - } - }, - "input" : { - "title": "Input", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: List of `file`, required, example: `mysample1.BAM;mysample2.BAM`, multiple_sep: `\";\"`. Path to the SAM/BAM files containing the mapped reads", - "help_text": "Type: List of `file`, required, example: `mysample1.BAM;mysample2.BAM`, multiple_sep: `\";\"`. Path to the SAM/BAM files containing the mapped reads." - }, - - "reference": { - "type": "string", - "description": "Type: `file`, required, example: `reference.gtf`. Path to the GTF file containing the features", - "help_text": "Type: `file`, required, example: `reference.gtf`. Path to the GTF file containing the features." - } - - } - }, - "output" : { - "title": "Output", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.tsv`, example: `htseq-count.tsv`. Filename to output the counts to", - "help_text": "Type: `file`, required, default: `$id.$key.output.tsv`, example: `htseq-count.tsv`. Filename to output the counts to.", - "default": "$id.$key.output.tsv" - }, - - "output_delimiter": { - "type": "string", - "description": "Type: `string`, example: `\t`. Column delimiter in output", - "help_text": "Type: `string`, example: `\t`. Column delimiter in output." - }, - - "output_sam": { - "type": "string", - "description": "Type: List of `file`, default: `$id.$key.output_sam_*.BAM`, example: `mysample1_out.BAM;mysample2_out.BAM`, multiple_sep: `\";\"`. Write out all SAM alignment records into SAM/BAM files (one per input file needed), \nannotating each line with its feature assignment (as an optional field with tag \u0027XF\u0027)", - "help_text": "Type: List of `file`, default: `$id.$key.output_sam_*.BAM`, example: `mysample1_out.BAM;mysample2_out.BAM`, multiple_sep: `\";\"`. Write out all SAM alignment records into SAM/BAM files (one per input file needed), \nannotating each line with its feature assignment (as an optional field with tag \u0027XF\u0027). \nSee the -p option to use BAM instead of SAM.\n", - "default": "$id.$key.output_sam_*.BAM" - }, - - "output_sam_format": { - "type": "string", - "description": "Type: `string`, choices: ``sam`, `bam``. Format to use with the --output_sam argument", - "help_text": "Type: `string`, choices: ``sam`, `bam``. Format to use with the --output_sam argument.", - "enum": ["sam", "bam"] - - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "none" + } + + + , + "secondary_alignments": { + "type": + "string", + "description": "Type: `string`, choices: ``score`, `ignore``. Whether to score secondary alignments (0x100 flag)", + "help_text": "Type: `string`, choices: ``score`, `ignore``. Whether to score secondary alignments (0x100 flag).", + "enum": ["score", "ignore"] + + + } + + + , + "supplementary_alignments": { + "type": + "string", + "description": "Type: `string`, choices: ``score`, `ignore``. Whether to score supplementary alignments (0x800 flag)", + "help_text": "Type: `string`, choices: ``score`, `ignore``. Whether to score supplementary alignments (0x800 flag).", + "enum": ["score", "ignore"] + + + } + + + , + "counts_output_sparse": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Store the counts as a sparse matrix (mtx, h5ad, loom)", + "help_text": "Type: `boolean_true`, default: `false`. Store the counts as a sparse matrix (mtx, h5ad, loom)." + , + "default": "False" + } + + +} +}, + + + "input" : { + "title": "Input", + "type": "object", + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: List of `file`, required, example: `mysample1.BAM;mysample2.BAM`, multiple_sep: `\";\"`. Path to the SAM/BAM files containing the mapped reads", + "help_text": "Type: List of `file`, required, example: `mysample1.BAM;mysample2.BAM`, multiple_sep: `\";\"`. Path to the SAM/BAM files containing the mapped reads." + + } + + + , + "reference": { + "type": + "string", + "description": "Type: `file`, required, example: `reference.gtf`. Path to the GTF file containing the features", + "help_text": "Type: `file`, required, example: `reference.gtf`. Path to the GTF file containing the features." + + } + + +} +}, + + + "output" : { + "title": "Output", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.tsv`, example: `htseq-count.tsv`. Filename to output the counts to", + "help_text": "Type: `file`, required, default: `$id.$key.output.tsv`, example: `htseq-count.tsv`. Filename to output the counts to." + , + "default": "$id.$key.output.tsv" + } + + + , + "output_delimiter": { + "type": + "string", + "description": "Type: `string`, example: `\t`. Column delimiter in output", + "help_text": "Type: `string`, example: `\t`. Column delimiter in output." + + } + + + , + "output_sam": { + "type": + "string", + "description": "Type: List of `file`, default: `$id.$key.output_sam_*.BAM`, example: `mysample1_out.BAM;mysample2_out.BAM`, multiple_sep: `\";\"`. Write out all SAM alignment records into SAM/BAM files (one per input file needed), \nannotating each line with its feature assignment (as an optional field with tag \u0027XF\u0027)", + "help_text": "Type: List of `file`, default: `$id.$key.output_sam_*.BAM`, example: `mysample1_out.BAM;mysample2_out.BAM`, multiple_sep: `\";\"`. Write out all SAM alignment records into SAM/BAM files (one per input file needed), \nannotating each line with its feature assignment (as an optional field with tag \u0027XF\u0027). \nSee the -p option to use BAM instead of SAM.\n" + , + "default": "$id.$key.output_sam_*.BAM" + } + + + , + "output_sam_format": { + "type": + "string", + "description": "Type: `string`, choices: ``sam`, `bam``. Format to use with the --output_sam argument", + "help_text": "Type: `string`, choices: ``sam`, `bam``. Format to use with the --output_sam argument.", + "enum": ["sam", "bam"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" + }, + + { + "$ref": "#/definitions/input" + }, + + { + "$ref": "#/definitions/output" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/input" - }, - { - "$ref": "#/definitions/output" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/htseq_count_to_h5mu/.config.vsh.yaml b/target/nextflow/mapping/htseq_count_to_h5mu/.config.vsh.yaml index 98e9f4afa34..7c3a327a49f 100644 --- a/target/nextflow/mapping/htseq_count_to_h5mu/.config.vsh.yaml +++ b/target/nextflow/mapping/htseq_count_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "htseq_count_to_h5mu" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -204,6 +204,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/htseq_count_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/htseq_count_to_h5mu/htseq_count_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/htseq_count_to_h5mu/main.nf b/target/nextflow/mapping/htseq_count_to_h5mu/main.nf index 7e64b6f4658..ccfb32f36c6 100644 --- a/target/nextflow/mapping/htseq_count_to_h5mu/main.nf +++ b/target/nextflow/mapping/htseq_count_to_h5mu/main.nf @@ -1,4 +1,4 @@ -// htseq_count_to_h5mu 0.12.3 +// htseq_count_to_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "htseq_count_to_h5mu", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -298,9 +298,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/htseq_count_to_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/htseq_count_to_h5mu/nextflow.config b/target/nextflow/mapping/htseq_count_to_h5mu/nextflow.config index ab14c87d688..6f5cfc8c813 100644 --- a/target/nextflow/mapping/htseq_count_to_h5mu/nextflow.config +++ b/target/nextflow/mapping/htseq_count_to_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'htseq_count_to_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Convert the htseq table to a h5mu.\n' author = 'Robrecht Cannoodt, Angela Oliveira Pisco' } diff --git a/target/nextflow/mapping/htseq_count_to_h5mu/nextflow_schema.json b/target/nextflow/mapping/htseq_count_to_h5mu/nextflow_schema.json index b69f2d0fb5f..290bc0b9f25 100644 --- a/target/nextflow/mapping/htseq_count_to_h5mu/nextflow_schema.json +++ b/target/nextflow/mapping/htseq_count_to_h5mu/nextflow_schema.json @@ -1,89 +1,127 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "htseq_count_to_h5mu", - "description": "Convert the htseq table to a h5mu.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "htseq_count_to_h5mu", +"description": "Convert the htseq table to a h5mu.\n", +"type": "object", +"definitions": { + + + + "outputs" : { + "title": "Outputs", "type": "object", - "definitions": { - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] - } - - } - }, - "input" : { - "title": "Input", - "type": "object", - "description": "No description", - "properties": { - - "input_id": { - "type": "string", - "description": "Type: List of `string`, required, example: `foo`, multiple_sep: `\";\"`. The obs index for the counts", - "help_text": "Type: List of `string`, required, example: `foo`, multiple_sep: `\";\"`. The obs index for the counts" - }, - - "input_counts": { - "type": "string", - "description": "Type: List of `file`, required, example: `counts.tsv`, multiple_sep: `\";\"`. The counts as a TSV file as output by HTSeq", - "help_text": "Type: List of `file`, required, example: `counts.tsv`, multiple_sep: `\";\"`. The counts as a TSV file as output by HTSeq." - }, - - "reference": { - "type": "string", - "description": "Type: `file`, required, example: `gencode_v41_star`. The GTF file", - "help_text": "Type: `file`, required, example: `gencode_v41_star`. The GTF file." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + + } + + +} +}, + + + "input" : { + "title": "Input", + "type": "object", + "description": "No description", + "properties": { + + + "input_id": { + "type": + "string", + "description": "Type: List of `string`, required, example: `foo`, multiple_sep: `\";\"`. The obs index for the counts", + "help_text": "Type: List of `string`, required, example: `foo`, multiple_sep: `\";\"`. The obs index for the counts" + + } + + + , + "input_counts": { + "type": + "string", + "description": "Type: List of `file`, required, example: `counts.tsv`, multiple_sep: `\";\"`. The counts as a TSV file as output by HTSeq", + "help_text": "Type: List of `file`, required, example: `counts.tsv`, multiple_sep: `\";\"`. The counts as a TSV file as output by HTSeq." + + } + + + , + "reference": { + "type": + "string", + "description": "Type: `file`, required, example: `gencode_v41_star`. The GTF file", + "help_text": "Type: `file`, required, example: `gencode_v41_star`. The GTF file." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/input" }, - "allOf": [ - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/input" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/multi_star/.config.vsh.yaml b/target/nextflow/mapping/multi_star/.config.vsh.yaml index e891bb4bbb9..2fcfe48d2e0 100644 --- a/target/nextflow/mapping/multi_star/.config.vsh.yaml +++ b/target/nextflow/mapping/multi_star/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "multi_star" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -3075,6 +3075,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/multi_star" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/multi_star/multi_star" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/multi_star/main.nf b/target/nextflow/mapping/multi_star/main.nf index b40985a9bf4..311ea13d63f 100644 --- a/target/nextflow/mapping/multi_star/main.nf +++ b/target/nextflow/mapping/multi_star/main.nf @@ -1,4 +1,4 @@ -// multi_star 0.12.3 +// multi_star 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "multi_star", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -3595,9 +3595,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/multi_star", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/multi_star/nextflow.config b/target/nextflow/mapping/multi_star/nextflow.config index 7a054623f93..af23dbf650b 100644 --- a/target/nextflow/mapping/multi_star/nextflow.config +++ b/target/nextflow/mapping/multi_star/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'multi_star' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Align fastq files using STAR.' author = 'Angela Oliveira Pisco, Robrecht Cannoodt' } diff --git a/target/nextflow/mapping/multi_star/nextflow_schema.json b/target/nextflow/mapping/multi_star/nextflow_schema.json index 5e4a7de6707..37ac21f8779 100644 --- a/target/nextflow/mapping/multi_star/nextflow_schema.json +++ b/target/nextflow/mapping/multi_star/nextflow_schema.json @@ -1,114 +1,168 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "multi_star", - "description": "Align fastq files using STAR.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "multi_star", +"description": "Align fastq files using STAR.", +"type": "object", +"definitions": { + + + + "input/output" : { + "title": "Input/Output", "type": "object", - "definitions": { - "input/output" : { - "title": "Input/Output", - "type": "object", - "description": "No description", - "properties": { - - "input_id": { - "type": "string", - "description": "Type: List of `string`, required, example: `mysample;mysample`, multiple_sep: `\";\"`. The ID of the sample being processed", - "help_text": "Type: List of `string`, required, example: `mysample;mysample`, multiple_sep: `\";\"`. The ID of the sample being processed. This vector should have the same length as the `--input_r1` argument." - }, - - "input_r1": { - "type": "string", - "description": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L002_R1_001.fastq.gz`, multiple_sep: `\";\"`. Paths to the sequences to be mapped", - "help_text": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L002_R1_001.fastq.gz`, multiple_sep: `\";\"`. Paths to the sequences to be mapped. If using Illumina paired-end reads, only the R1 files should be passed." - }, - - "input_r2": { - "type": "string", - "description": "Type: List of `file`, example: `mysample_S1_L001_R2_001.fastq.gz;mysample_S1_L002_R2_001.fastq.gz`, multiple_sep: `\";\"`. Paths to the sequences to be mapped", - "help_text": "Type: List of `file`, example: `mysample_S1_L001_R2_001.fastq.gz;mysample_S1_L002_R2_001.fastq.gz`, multiple_sep: `\";\"`. Paths to the sequences to be mapped. If using Illumina paired-end reads, only the R2 files should be passed." - }, - - "reference_index": { - "type": "string", - "description": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference", - "help_text": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference. Corresponds to the --genomeDir argument in the STAR command." - }, - - "reference_gtf": { - "type": "string", - "description": "Type: `file`, required, example: `genes.gtf`. Path to the gtf reference file", - "help_text": "Type: `file`, required, example: `genes.gtf`. Path to the gtf reference file." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory. Corresponds to the --outFileNamePrefix argument in the STAR command.", - "default": "$id.$key.output.output" - } - - } - }, - "processing arguments" : { - "title": "Processing arguments", - "type": "object", - "description": "No description", - "properties": { - - "run_htseq_count": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Whether or not to also run htseq-count after STAR", - "help_text": "Type: `boolean`, default: `true`. Whether or not to also run htseq-count after STAR.", - "default": "True" - }, - - "run_multiqc": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Whether or not to also run MultiQC at the end", - "help_text": "Type: `boolean`, default: `true`. Whether or not to also run MultiQC at the end.", - "default": "True" - }, - - "min_success_rate": { - "type": "number", - "description": "Type: `double`, default: `0.5`. Fail when the success rate is below this threshold", - "help_text": "Type: `double`, default: `0.5`. Fail when the success rate is below this threshold.", - "default": "0.5" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input_id": { + "type": + "string", + "description": "Type: List of `string`, required, example: `mysample;mysample`, multiple_sep: `\";\"`. The ID of the sample being processed", + "help_text": "Type: List of `string`, required, example: `mysample;mysample`, multiple_sep: `\";\"`. The ID of the sample being processed. This vector should have the same length as the `--input_r1` argument." + + } + + + , + "input_r1": { + "type": + "string", + "description": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L002_R1_001.fastq.gz`, multiple_sep: `\";\"`. Paths to the sequences to be mapped", + "help_text": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L002_R1_001.fastq.gz`, multiple_sep: `\";\"`. Paths to the sequences to be mapped. If using Illumina paired-end reads, only the R1 files should be passed." + + } + + + , + "input_r2": { + "type": + "string", + "description": "Type: List of `file`, example: `mysample_S1_L001_R2_001.fastq.gz;mysample_S1_L002_R2_001.fastq.gz`, multiple_sep: `\";\"`. Paths to the sequences to be mapped", + "help_text": "Type: List of `file`, example: `mysample_S1_L001_R2_001.fastq.gz;mysample_S1_L002_R2_001.fastq.gz`, multiple_sep: `\";\"`. Paths to the sequences to be mapped. If using Illumina paired-end reads, only the R2 files should be passed." + + } + + + , + "reference_index": { + "type": + "string", + "description": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference", + "help_text": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference. Corresponds to the --genomeDir argument in the STAR command." + + } + + + , + "reference_gtf": { + "type": + "string", + "description": "Type: `file`, required, example: `genes.gtf`. Path to the gtf reference file", + "help_text": "Type: `file`, required, example: `genes.gtf`. Path to the gtf reference file." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory. Corresponds to the --outFileNamePrefix argument in the STAR command." + , + "default": "$id.$key.output.output" + } + + +} +}, + + + "processing arguments" : { + "title": "Processing arguments", + "type": "object", + "description": "No description", + "properties": { + + + "run_htseq_count": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Whether or not to also run htseq-count after STAR", + "help_text": "Type: `boolean`, default: `true`. Whether or not to also run htseq-count after STAR." + , + "default": "True" + } + + + , + "run_multiqc": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Whether or not to also run MultiQC at the end", + "help_text": "Type: `boolean`, default: `true`. Whether or not to also run MultiQC at the end." + , + "default": "True" + } + + + , + "min_success_rate": { + "type": + "number", + "description": "Type: `double`, default: `0.5`. Fail when the success rate is below this threshold", + "help_text": "Type: `double`, default: `0.5`. Fail when the success rate is below this threshold." + , + "default": "0.5" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/input/output" + }, + + { + "$ref": "#/definitions/processing arguments" }, - "allOf": [ - { - "$ref": "#/definitions/input/output" - }, - { - "$ref": "#/definitions/processing arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/multi_star_to_h5mu/.config.vsh.yaml b/target/nextflow/mapping/multi_star_to_h5mu/.config.vsh.yaml index ba85db61dc0..8e486e6e2c0 100644 --- a/target/nextflow/mapping/multi_star_to_h5mu/.config.vsh.yaml +++ b/target/nextflow/mapping/multi_star_to_h5mu/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "multi_star_to_h5mu" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -174,6 +174,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/multi_star_to_h5mu" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/multi_star_to_h5mu/multi_star_to_h5mu" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/multi_star_to_h5mu/main.nf b/target/nextflow/mapping/multi_star_to_h5mu/main.nf index 1da6d7a6576..470483407d2 100644 --- a/target/nextflow/mapping/multi_star_to_h5mu/main.nf +++ b/target/nextflow/mapping/multi_star_to_h5mu/main.nf @@ -1,4 +1,4 @@ -// multi_star_to_h5mu 0.12.3 +// multi_star_to_h5mu 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "multi_star_to_h5mu", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -258,9 +258,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/multi_star_to_h5mu", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/multi_star_to_h5mu/nextflow.config b/target/nextflow/mapping/multi_star_to_h5mu/nextflow.config index c5f453f114e..75b89fe2947 100644 --- a/target/nextflow/mapping/multi_star_to_h5mu/nextflow.config +++ b/target/nextflow/mapping/multi_star_to_h5mu/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'multi_star_to_h5mu' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Convert the output of `multi_star` to a h5mu.\n' author = 'Robrecht Cannoodt, Angela Oliveira Pisco' } diff --git a/target/nextflow/mapping/multi_star_to_h5mu/nextflow_schema.json b/target/nextflow/mapping/multi_star_to_h5mu/nextflow_schema.json index f40c1335697..a38f5162445 100644 --- a/target/nextflow/mapping/multi_star_to_h5mu/nextflow_schema.json +++ b/target/nextflow/mapping/multi_star_to_h5mu/nextflow_schema.json @@ -1,66 +1,93 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "multi_star_to_h5mu", - "description": "Convert the output of `multi_star` to a h5mu.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "multi_star_to_h5mu", +"description": "Convert the output of `multi_star` to a h5mu.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `/path/to/foo`. The directory created by `multi_star`", - "help_text": "Type: `file`, required, example: `/path/to/foo`. The directory created by `multi_star`" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `/path/to/foo`. The directory created by `multi_star`", + "help_text": "Type: `file`, required, example: `/path/to/foo`. The directory created by `multi_star`" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/samtools_sort/.config.vsh.yaml b/target/nextflow/mapping/samtools_sort/.config.vsh.yaml index dedbf23de8e..1f4dbb4f456 100644 --- a/target/nextflow/mapping/samtools_sort/.config.vsh.yaml +++ b/target/nextflow/mapping/samtools_sort/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "samtools_sort" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -265,6 +265,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/samtools_sort" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/samtools_sort/samtools_sort" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/samtools_sort/main.nf b/target/nextflow/mapping/samtools_sort/main.nf index 4580e8e1798..678e00c4d7b 100644 --- a/target/nextflow/mapping/samtools_sort/main.nf +++ b/target/nextflow/mapping/samtools_sort/main.nf @@ -1,4 +1,4 @@ -// samtools_sort 0.12.3 +// samtools_sort 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "samtools_sort", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -370,9 +370,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/samtools_sort", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/samtools_sort/nextflow.config b/target/nextflow/mapping/samtools_sort/nextflow.config index eb0386297d9..0fd6f643c35 100644 --- a/target/nextflow/mapping/samtools_sort/nextflow.config +++ b/target/nextflow/mapping/samtools_sort/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'samtools_sort' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Sort and (optionally) index alignments.\n\nReads are sorted by leftmost coordinates, or by read name when `--sort_by_read_names` is used.\n\nAn appropriate `@HD-SO` sort order header tag will be added or an existing one updated if necessary.\n\nNote that to generate an index file (by specifying `--output_bai`), the default coordinate sort must be used.\nThus the `--sort_by_read_names` and `--sort_by ` options are incompatible with `--output_bai`. \n' author = 'Robrecht Cannoodt, Angela Oliveira Pisco' } diff --git a/target/nextflow/mapping/samtools_sort/nextflow_schema.json b/target/nextflow/mapping/samtools_sort/nextflow_schema.json index 141979bf12b..3e9b8d6c456 100644 --- a/target/nextflow/mapping/samtools_sort/nextflow_schema.json +++ b/target/nextflow/mapping/samtools_sort/nextflow_schema.json @@ -1,134 +1,195 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "samtools_sort", - "description": "Sort and (optionally) index alignments.\n\nReads are sorted by leftmost coordinates, or by read name when `--sort_by_read_names` is used.\n\nAn appropriate `@HD-SO` sort order header tag will be added or an existing one updated if necessary.\n\nNote that to generate an index file (by specifying `--output_bai`), the default coordinate sort must be used.\nThus the `--sort_by_read_names` and `--sort_by \u003cTAG\u003e` options are incompatible with `--output_bai`. \n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "samtools_sort", +"description": "Sort and (optionally) index alignments.\n\nReads are sorted by leftmost coordinates, or by read name when `--sort_by_read_names` is used.\n\nAn appropriate `@HD-SO` sort order header tag will be added or an existing one updated if necessary.\n\nNote that to generate an index file (by specifying `--output_bai`), the default coordinate sort must be used.\nThus the `--sort_by_read_names` and `--sort_by \u003cTAG\u003e` options are incompatible with `--output_bai`. \n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "minimizer_cluster": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Sort unmapped reads (those in chromosome \"*\") by their sequence minimiser (Schleimer et al", - "help_text": "Type: `boolean_true`, default: `false`. Sort unmapped reads (those in chromosome \"*\") by their sequence minimiser (Schleimer et al., 2003; Roberts et al., 2004), \nalso reverse complementing as appropriate. This has the effect of collating some similar data together, improving the \ncompressibility of the unmapped sequence. The minimiser kmer size is adjusted using the -K option. Note data compressed \nin this manner may need to be name collated prior to conversion back to fastq.\n\nMapped sequences are sorted by chromosome and position. \n", - "default": "False" - }, - - "minimizer_kmer": { - "type": "integer", - "description": "Type: `integer`, example: `20`. Sets the kmer size to be used in the -M option", - "help_text": "Type: `integer`, example: `20`. Sets the kmer size to be used in the -M option." - }, - - "sort_by_read_names": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Sort by read names (i", - "help_text": "Type: `boolean_true`, default: `false`. Sort by read names (i.e., the QNAME field) rather than by chromosomal coordinates.", - "default": "False" - }, - - "sort_by": { - "type": "string", - "description": "Type: `string`. Sort first by this value in the alignment tag, then by position or name (if also using -n)", - "help_text": "Type: `string`. Sort first by this value in the alignment tag, then by position or name (if also using -n)." - }, - - "no_pg": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Do not add a @PG line to the header of the output file", - "help_text": "Type: `boolean_true`, default: `false`. Do not add a @PG line to the header of the output file.", - "default": "False" - } - - } - }, - "input" : { - "title": "Input", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.bam`. Path to the SAM/BAM/CRAM files containing the mapped reads", - "help_text": "Type: `file`, required, example: `input.bam`. Path to the SAM/BAM/CRAM files containing the mapped reads." - } - - } - }, - "output" : { - "title": "Output", - "type": "object", - "description": "No description", - "properties": { - - "output_bam": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output_bam.bam`, example: `output.bam`. Filename to output the counts to", - "help_text": "Type: `file`, required, default: `$id.$key.output_bam.bam`, example: `output.bam`. Filename to output the counts to.", - "default": "$id.$key.output_bam.bam" - }, - - "output_bai": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output_bai.bai`, example: `output.bam.bai`. BAI-format index for BAM file", - "help_text": "Type: `file`, default: `$id.$key.output_bai.bai`, example: `output.bam.bai`. BAI-format index for BAM file.", - "default": "$id.$key.output_bai.bai" - }, - - "output_format": { - "type": "string", - "description": "Type: `string`, example: `bam`, choices: ``sam`, `bam`, `cram``. The output format", - "help_text": "Type: `string`, example: `bam`, choices: ``sam`, `bam`, `cram``. The output format. By default, samtools tries to select a format based on the -o filename extension; if output is to standard output or no format can be deduced, bam is selected.", - "enum": ["sam", "bam", "cram"] + "description": "No description", + "properties": { + + + "minimizer_cluster": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Sort unmapped reads (those in chromosome \"*\") by their sequence minimiser (Schleimer et al", + "help_text": "Type: `boolean_true`, default: `false`. Sort unmapped reads (those in chromosome \"*\") by their sequence minimiser (Schleimer et al., 2003; Roberts et al., 2004), \nalso reverse complementing as appropriate. This has the effect of collating some similar data together, improving the \ncompressibility of the unmapped sequence. The minimiser kmer size is adjusted using the -K option. Note data compressed \nin this manner may need to be name collated prior to conversion back to fastq.\n\nMapped sequences are sorted by chromosome and position. \n" + , + "default": "False" + } + + + , + "minimizer_kmer": { + "type": + "integer", + "description": "Type: `integer`, example: `20`. Sets the kmer size to be used in the -M option", + "help_text": "Type: `integer`, example: `20`. Sets the kmer size to be used in the -M option." - }, - - "compression": { - "type": "integer", - "description": "Type: `integer`, example: `5`. Compression level, from 0 (uncompressed) to 9 (best", - "help_text": "Type: `integer`, example: `5`. Compression level, from 0 (uncompressed) to 9 (best" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "sort_by_read_names": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Sort by read names (i", + "help_text": "Type: `boolean_true`, default: `false`. Sort by read names (i.e., the QNAME field) rather than by chromosomal coordinates." + , + "default": "False" + } + + + , + "sort_by": { + "type": + "string", + "description": "Type: `string`. Sort first by this value in the alignment tag, then by position or name (if also using -n)", + "help_text": "Type: `string`. Sort first by this value in the alignment tag, then by position or name (if also using -n)." + + } + + + , + "no_pg": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Do not add a @PG line to the header of the output file", + "help_text": "Type: `boolean_true`, default: `false`. Do not add a @PG line to the header of the output file." + , + "default": "False" + } + + +} +}, + + + "input" : { + "title": "Input", + "type": "object", + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.bam`. Path to the SAM/BAM/CRAM files containing the mapped reads", + "help_text": "Type: `file`, required, example: `input.bam`. Path to the SAM/BAM/CRAM files containing the mapped reads." + + } + + +} +}, + + + "output" : { + "title": "Output", + "type": "object", + "description": "No description", + "properties": { + + + "output_bam": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output_bam.bam`, example: `output.bam`. Filename to output the counts to", + "help_text": "Type: `file`, required, default: `$id.$key.output_bam.bam`, example: `output.bam`. Filename to output the counts to." + , + "default": "$id.$key.output_bam.bam" + } + + + , + "output_bai": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output_bai.bai`, example: `output.bam.bai`. BAI-format index for BAM file", + "help_text": "Type: `file`, default: `$id.$key.output_bai.bai`, example: `output.bam.bai`. BAI-format index for BAM file." + , + "default": "$id.$key.output_bai.bai" + } + + + , + "output_format": { + "type": + "string", + "description": "Type: `string`, example: `bam`, choices: ``sam`, `bam`, `cram``. The output format", + "help_text": "Type: `string`, example: `bam`, choices: ``sam`, `bam`, `cram``. The output format. By default, samtools tries to select a format based on the -o filename extension; if output is to standard output or no format can be deduced, bam is selected.", + "enum": ["sam", "bam", "cram"] + + + } + + + , + "compression": { + "type": + "integer", + "description": "Type: `integer`, example: `5`. Compression level, from 0 (uncompressed) to 9 (best", + "help_text": "Type: `integer`, example: `5`. Compression level, from 0 (uncompressed) to 9 (best" + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" + }, + + { + "$ref": "#/definitions/input" + }, + + { + "$ref": "#/definitions/output" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/input" - }, - { - "$ref": "#/definitions/output" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/star_align/.config.vsh.yaml b/target/nextflow/mapping/star_align/.config.vsh.yaml index 5b9a13670cf..16d6a77a3a0 100644 --- a/target/nextflow/mapping/star_align/.config.vsh.yaml +++ b/target/nextflow/mapping/star_align/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "star_align" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -2530,6 +2530,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/star_align" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/star_align/star_align" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/star_align/main.nf b/target/nextflow/mapping/star_align/main.nf index a01860e8fa0..055bb223f25 100644 --- a/target/nextflow/mapping/star_align/main.nf +++ b/target/nextflow/mapping/star_align/main.nf @@ -1,4 +1,4 @@ -// star_align 0.12.3 +// star_align 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "star_align", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -2644,9 +2644,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/star_align", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/star_align/nextflow.config b/target/nextflow/mapping/star_align/nextflow.config index 3709eda85c7..4d7fd9f8386 100644 --- a/target/nextflow/mapping/star_align/nextflow.config +++ b/target/nextflow/mapping/star_align/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'star_align' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Align fastq files using STAR.' author = 'Angela Oliveira Pisco, Robrecht Cannoodt' } diff --git a/target/nextflow/mapping/star_align/nextflow_schema.json b/target/nextflow/mapping/star_align/nextflow_schema.json index 161b59159b8..5dba8c5ef4b 100644 --- a/target/nextflow/mapping/star_align/nextflow_schema.json +++ b/target/nextflow/mapping/star_align/nextflow_schema.json @@ -1,64 +1,91 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "star_align", - "description": "Align fastq files using STAR.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "star_align", +"description": "Align fastq files using STAR.", +"type": "object", +"definitions": { + + + + "input/output" : { + "title": "Input/Output", "type": "object", - "definitions": { - "input/output" : { - "title": "Input/Output", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed", - "help_text": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed. Corresponds to the --readFilesIn argument in the STAR command." - }, - - "reference": { - "type": "string", - "description": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference", - "help_text": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference. Corresponds to the --genomeDir argument in the STAR command." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory. Corresponds to the --outFileNamePrefix argument in the STAR command.", - "default": "$id.$key.output.output" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed", + "help_text": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed. Corresponds to the --readFilesIn argument in the STAR command." + + } + + + , + "reference": { + "type": + "string", + "description": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference", + "help_text": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference. Corresponds to the --genomeDir argument in the STAR command." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory. Corresponds to the --outFileNamePrefix argument in the STAR command." + , + "default": "$id.$key.output.output" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/input/output" }, - "allOf": [ - { - "$ref": "#/definitions/input/output" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/star_align_v273a/.config.vsh.yaml b/target/nextflow/mapping/star_align_v273a/.config.vsh.yaml index dcb92f67bbf..f115181214f 100644 --- a/target/nextflow/mapping/star_align_v273a/.config.vsh.yaml +++ b/target/nextflow/mapping/star_align_v273a/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "star_align_v273a" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -2530,6 +2530,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/star_align_v273a" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/star_align_v273a/star_align_v273a" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/star_align_v273a/main.nf b/target/nextflow/mapping/star_align_v273a/main.nf index 03682104bb8..852b3df7a16 100644 --- a/target/nextflow/mapping/star_align_v273a/main.nf +++ b/target/nextflow/mapping/star_align_v273a/main.nf @@ -1,4 +1,4 @@ -// star_align_v273a 0.12.3 +// star_align_v273a 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "star_align_v273a", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -2644,9 +2644,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/star_align_v273a", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/star_align_v273a/nextflow.config b/target/nextflow/mapping/star_align_v273a/nextflow.config index 12713a49708..bc4302335ff 100644 --- a/target/nextflow/mapping/star_align_v273a/nextflow.config +++ b/target/nextflow/mapping/star_align_v273a/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'star_align_v273a' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Align fastq files using STAR.' author = 'Angela Oliveira Pisco, Robrecht Cannoodt' } diff --git a/target/nextflow/mapping/star_align_v273a/nextflow_schema.json b/target/nextflow/mapping/star_align_v273a/nextflow_schema.json index 93a4b284c3b..1997c0616a5 100644 --- a/target/nextflow/mapping/star_align_v273a/nextflow_schema.json +++ b/target/nextflow/mapping/star_align_v273a/nextflow_schema.json @@ -1,64 +1,91 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "star_align_v273a", - "description": "Align fastq files using STAR.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "star_align_v273a", +"description": "Align fastq files using STAR.", +"type": "object", +"definitions": { + + + + "input/output" : { + "title": "Input/Output", "type": "object", - "definitions": { - "input/output" : { - "title": "Input/Output", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed", - "help_text": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed. Corresponds to the --readFilesIn in the STAR command." - }, - - "reference": { - "type": "string", - "description": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference", - "help_text": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference. Corresponds to the --genomeDir in the STAR command." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory. Corresponds to the --outFileNamePrefix in the STAR command.", - "default": "$id.$key.output.output" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed", + "help_text": "Type: List of `file`, required, example: `mysample_S1_L001_R1_001.fastq.gz;mysample_S1_L001_R2_001.fastq.gz`, multiple_sep: `\";\"`. The FASTQ files to be analyzed. Corresponds to the --readFilesIn in the STAR command." + + } + + + , + "reference": { + "type": + "string", + "description": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference", + "help_text": "Type: `file`, required, example: `/path/to/reference`. Path to the reference built by star_build_reference. Corresponds to the --genomeDir in the STAR command." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory. Corresponds to the --outFileNamePrefix in the STAR command." + , + "default": "$id.$key.output.output" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/input/output" }, - "allOf": [ - { - "$ref": "#/definitions/input/output" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/mapping/star_build_reference/.config.vsh.yaml b/target/nextflow/mapping/star_build_reference/.config.vsh.yaml index 39f24970175..7b3b6c9ad69 100644 --- a/target/nextflow/mapping/star_build_reference/.config.vsh.yaml +++ b/target/nextflow/mapping/star_build_reference/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "star_build_reference" namespace: "mapping" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -185,6 +185,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/star_build_reference" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/star_build_reference/star_build_reference" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/mapping/star_build_reference/main.nf b/target/nextflow/mapping/star_build_reference/main.nf index 9d59580527a..4c8ac615877 100644 --- a/target/nextflow/mapping/star_build_reference/main.nf +++ b/target/nextflow/mapping/star_build_reference/main.nf @@ -1,4 +1,4 @@ -// star_build_reference 0.12.3 +// star_build_reference 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "star_build_reference", "namespace" : "mapping", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -254,9 +254,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/mapping/star_build_reference", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/mapping/star_build_reference/nextflow.config b/target/nextflow/mapping/star_build_reference/nextflow.config index 54bf431f934..fda6dff482d 100644 --- a/target/nextflow/mapping/star_build_reference/nextflow.config +++ b/target/nextflow/mapping/star_build_reference/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'star_build_reference' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Create a reference for STAR from a set of fasta files.' author = 'Dries Schaumont' } diff --git a/target/nextflow/mapping/star_build_reference/nextflow_schema.json b/target/nextflow/mapping/star_build_reference/nextflow_schema.json index 221ec30f588..1c7cf142af1 100644 --- a/target/nextflow/mapping/star_build_reference/nextflow_schema.json +++ b/target/nextflow/mapping/star_build_reference/nextflow_schema.json @@ -1,82 +1,116 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "star_build_reference", - "description": "Create a reference for STAR from a set of fasta files.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "star_build_reference", +"description": "Create a reference for STAR from a set of fasta files.", +"type": "object", +"definitions": { + + + + "input/output" : { + "title": "Input/Output", "type": "object", - "definitions": { - "input/output" : { - "title": "Input/Output", - "type": "object", - "description": "No description", - "properties": { - - "genome_fasta": { - "type": "string", - "description": "Type: List of `file`, required, example: `chr1.fasta chr2.fasta`, multiple_sep: `\" \"`. The fasta files to be included in the reference", - "help_text": "Type: List of `file`, required, example: `chr1.fasta chr2.fasta`, multiple_sep: `\" \"`. The fasta files to be included in the reference. Corresponds to the --genomeFastaFiles argument in the STAR command." - }, - - "transcriptome_gtf": { - "type": "string", - "description": "Type: `file`. Specifies the path to the file with annotated transcripts in the standard GTF\nformat", - "help_text": "Type: `file`. Specifies the path to the file with annotated transcripts in the standard GTF\nformat. STAR will extract splice junctions from this file and use them to greatly improve\naccuracy of the mapping. Corresponds to the --sjdbGTFfile argument in the STAR command.\n" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory. Corresponds to the --genomeDir argument in the STAR command.", - "default": "$id.$key.output.output" - } - - } - }, - "genome indexing arguments" : { - "title": "Genome indexing arguments", - "type": "object", - "description": "No description", - "properties": { - - "genomeSAindexNbases": { - "type": "integer", - "description": "Type: `integer`, default: `14`. Length (bases) of the SA pre-indexing string", - "help_text": "Type: `integer`, default: `14`. Length (bases) of the SA pre-indexing string. Typically between 10 and 15.\nLonger strings will use much more memory, but allow faster searches. For small\ngenomes, the parameter {genomeSAindexNbases must be scaled down to\nmin(14, log2(GenomeLength)/2 - 1).\n", - "default": "14" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "genome_fasta": { + "type": + "string", + "description": "Type: List of `file`, required, example: `chr1.fasta chr2.fasta`, multiple_sep: `\" \"`. The fasta files to be included in the reference", + "help_text": "Type: List of `file`, required, example: `chr1.fasta chr2.fasta`, multiple_sep: `\" \"`. The fasta files to be included in the reference. Corresponds to the --genomeFastaFiles argument in the STAR command." + + } + + + , + "transcriptome_gtf": { + "type": + "string", + "description": "Type: `file`. Specifies the path to the file with annotated transcripts in the standard GTF\nformat", + "help_text": "Type: `file`. Specifies the path to the file with annotated transcripts in the standard GTF\nformat. STAR will extract splice junctions from this file and use them to greatly improve\naccuracy of the mapping. Corresponds to the --sjdbGTFfile argument in the STAR command.\n" + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `/path/to/foo`. Path to output directory. Corresponds to the --genomeDir argument in the STAR command." + , + "default": "$id.$key.output.output" + } + + +} +}, + + + "genome indexing arguments" : { + "title": "Genome indexing arguments", + "type": "object", + "description": "No description", + "properties": { + + + "genomeSAindexNbases": { + "type": + "integer", + "description": "Type: `integer`, default: `14`. Length (bases) of the SA pre-indexing string", + "help_text": "Type: `integer`, default: `14`. Length (bases) of the SA pre-indexing string. Typically between 10 and 15.\nLonger strings will use much more memory, but allow faster searches. For small\ngenomes, the parameter {genomeSAindexNbases must be scaled down to\nmin(14, log2(GenomeLength)/2 - 1).\n" + , + "default": "14" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/input/output" + }, + + { + "$ref": "#/definitions/genome indexing arguments" }, - "allOf": [ - { - "$ref": "#/definitions/input/output" - }, - { - "$ref": "#/definitions/genome indexing arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/metadata/add_id/.config.vsh.yaml b/target/nextflow/metadata/add_id/.config.vsh.yaml index be9a9a8efa7..1626d853a61 100644 --- a/target/nextflow/metadata/add_id/.config.vsh.yaml +++ b/target/nextflow/metadata/add_id/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "add_id" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -192,6 +192,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/add_id" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/add_id/add_id" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/metadata/add_id/main.nf b/target/nextflow/metadata/add_id/main.nf index b298c27a847..f84c84cede8 100644 --- a/target/nextflow/metadata/add_id/main.nf +++ b/target/nextflow/metadata/add_id/main.nf @@ -1,4 +1,4 @@ -// add_id 0.12.3 +// add_id 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "add_id", "namespace" : "metadata", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -276,9 +276,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/add_id", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/metadata/add_id/nextflow.config b/target/nextflow/metadata/add_id/nextflow.config index 88b79808515..ea37a6fa8b8 100644 --- a/target/nextflow/metadata/add_id/nextflow.config +++ b/target/nextflow/metadata/add_id/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'add_id' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Add id of .obs. Also allows to make .obs_names (the .obs index) unique \nby prefixing the values with an unique id per .h5mu file.\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/metadata/add_id/nextflow_schema.json b/target/nextflow/metadata/add_id/nextflow_schema.json index e95aa967ada..29df98bbad8 100644 --- a/target/nextflow/metadata/add_id/nextflow_schema.json +++ b/target/nextflow/metadata/add_id/nextflow_schema.json @@ -1,86 +1,125 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "add_id", - "description": "Add id of .obs. Also allows to make .obs_names (the .obs index) unique \nby prefixing the values with an unique id per .h5mu file.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "add_id", +"description": "Add id of .obs. Also allows to make .obs_names (the .obs index) unique \nby prefixing the values with an unique id per .h5mu file.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `sample_path`. Path to the input ", - "help_text": "Type: `file`, required, example: `sample_path`. Path to the input .h5mu." - }, - - "input_id": { - "type": "string", - "description": "Type: `string`, required. The input id", - "help_text": "Type: `string`, required. The input id." - }, - - "obs_output": { - "type": "string", - "description": "Type: `string`, default: `sample_id`. Name of the ", - "help_text": "Type: `string`, default: `sample_id`. Name of the .obs column where to store the id.", - "default": "sample_id" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. ", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. ", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `sample_path`. Path to the input ", + "help_text": "Type: `file`, required, example: `sample_path`. Path to the input .h5mu." - }, - - "make_observation_keys_unique": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Join the id to the ", - "help_text": "Type: `boolean_true`, default: `false`. Join the id to the .obs index (.obs_names).", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "input_id": { + "type": + "string", + "description": "Type: `string`, required. The input id", + "help_text": "Type: `string`, required. The input id." + + } + + + , + "obs_output": { + "type": + "string", + "description": "Type: `string`, default: `sample_id`. Name of the ", + "help_text": "Type: `string`, default: `sample_id`. Name of the .obs column where to store the id." + , + "default": "sample_id" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. ", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. " + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "make_observation_keys_unique": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Join the id to the ", + "help_text": "Type: `boolean_true`, default: `false`. Join the id to the .obs index (.obs_names)." + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/metadata/grep_annotation_column/.config.vsh.yaml b/target/nextflow/metadata/grep_annotation_column/.config.vsh.yaml index 3a6f571dc37..e50c809bd05 100644 --- a/target/nextflow/metadata/grep_annotation_column/.config.vsh.yaml +++ b/target/nextflow/metadata/grep_annotation_column/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "grep_annotation_column" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -239,6 +239,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/grep_annotation_column" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/grep_annotation_column/grep_annotation_column" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/metadata/grep_annotation_column/main.nf b/target/nextflow/metadata/grep_annotation_column/main.nf index 060de1169c2..894f743fae6 100644 --- a/target/nextflow/metadata/grep_annotation_column/main.nf +++ b/target/nextflow/metadata/grep_annotation_column/main.nf @@ -1,4 +1,4 @@ -// grep_annotation_column 0.12.3 +// grep_annotation_column 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "grep_annotation_column", "namespace" : "metadata", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -332,9 +332,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/grep_annotation_column", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/metadata/grep_annotation_column/nextflow.config b/target/nextflow/metadata/grep_annotation_column/nextflow.config index 3a7a5d0b395..a2ddbb9c962 100644 --- a/target/nextflow/metadata/grep_annotation_column/nextflow.config +++ b/target/nextflow/metadata/grep_annotation_column/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'grep_annotation_column' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Perform a regex lookup on a column from the annotation matrices .obs or .var.\nThe annotation matrix can originate from either a modality, or all modalities (global .var or .obs).\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/metadata/grep_annotation_column/nextflow_schema.json b/target/nextflow/metadata/grep_annotation_column/nextflow_schema.json index ceae9c3ec04..7af99853125 100644 --- a/target/nextflow/metadata/grep_annotation_column/nextflow_schema.json +++ b/target/nextflow/metadata/grep_annotation_column/nextflow_schema.json @@ -1,126 +1,183 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "grep_annotation_column", - "description": "Perform a regex lookup on a column from the annotation matrices .obs or .var.\nThe annotation matrix can originate from either a modality, or all modalities (global .var or .obs).\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "grep_annotation_column", +"description": "Perform a regex lookup on a column from the annotation matrices .obs or .var.\nThe annotation matrix can originate from either a modality, or all modalities (global .var or .obs).\n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "Arguments related to the input dataset.", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `sample_path`. Path to the input ", - "help_text": "Type: `file`, required, example: `sample_path`. Path to the input .h5mu." - }, - - "input_column": { - "type": "string", - "description": "Type: `string`. Column to query", - "help_text": "Type: `string`. Column to query. If not specified, use .var_names or .obs_names, depending on the value of --matrix" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, required, example: `rna`. Which modality to get the annotation matrix from", - "help_text": "Type: `string`, required, example: `rna`. Which modality to get the annotation matrix from.\n" - }, - - "matrix": { - "type": "string", - "description": "Type: `string`, example: `var`, choices: ``var`, `obs``. Matrix to fetch the column from that will be searched", - "help_text": "Type: `string`, example: `var`, choices: ``var`, `obs``. Matrix to fetch the column from that will be searched.", - "enum": ["var", "obs"] + "description": "Arguments related to the input dataset.", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `sample_path`. Path to the input ", + "help_text": "Type: `file`, required, example: `sample_path`. Path to the input .h5mu." - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "Arguments related to how the output will be written.", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. ", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. ", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + } + + + , + "input_column": { + "type": + "string", + "description": "Type: `string`. Column to query", + "help_text": "Type: `string`. Column to query. If not specified, use .var_names or .obs_names, depending on the value of --matrix" - }, - - "output_match_column": { - "type": "string", - "description": "Type: `string`, required. Name of the column to write the result to", - "help_text": "Type: `string`, required. Name of the column to write the result to." - }, - - "output_fraction_column": { - "type": "string", - "description": "Type: `string`. For the opposite axis, name of the column to write the fraction of \nobservations that matches to the pattern", - "help_text": "Type: `string`. For the opposite axis, name of the column to write the fraction of \nobservations that matches to the pattern.\n" - } - - } - }, - "query options" : { - "title": "Query options", - "type": "object", - "description": "Options related to the query", - "properties": { - - "regex_pattern": { - "type": "string", - "description": "Type: `string`, required, example: `^[mM][tT]-`. Regex to use to match with the input column", - "help_text": "Type: `string`, required, example: `^[mM][tT]-`. Regex to use to match with the input column." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, required, example: `rna`. Which modality to get the annotation matrix from", + "help_text": "Type: `string`, required, example: `rna`. Which modality to get the annotation matrix from.\n" + + } + + + , + "matrix": { + "type": + "string", + "description": "Type: `string`, example: `var`, choices: ``var`, `obs``. Matrix to fetch the column from that will be searched", + "help_text": "Type: `string`, example: `var`, choices: ``var`, `obs``. Matrix to fetch the column from that will be searched.", + "enum": ["var", "obs"] + + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "Arguments related to how the output will be written.", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. ", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. " + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "output_match_column": { + "type": + "string", + "description": "Type: `string`, required. Name of the column to write the result to", + "help_text": "Type: `string`, required. Name of the column to write the result to." + + } + + + , + "output_fraction_column": { + "type": + "string", + "description": "Type: `string`. For the opposite axis, name of the column to write the fraction of \nobservations that matches to the pattern", + "help_text": "Type: `string`. For the opposite axis, name of the column to write the fraction of \nobservations that matches to the pattern.\n" + + } + + +} +}, + + + "query options" : { + "title": "Query options", + "type": "object", + "description": "Options related to the query", + "properties": { + + + "regex_pattern": { + "type": + "string", + "description": "Type: `string`, required, example: `^[mM][tT]-`. Regex to use to match with the input column", + "help_text": "Type: `string`, required, example: `^[mM][tT]-`. Regex to use to match with the input column." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/query options" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/query options" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/metadata/join_csv/.config.vsh.yaml b/target/nextflow/metadata/join_csv/.config.vsh.yaml index 3de4208c579..a83cca1757c 100644 --- a/target/nextflow/metadata/join_csv/.config.vsh.yaml +++ b/target/nextflow/metadata/join_csv/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "join_csv" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -224,6 +224,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/join_csv" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/join_csv/join_csv" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/metadata/join_csv/main.nf b/target/nextflow/metadata/join_csv/main.nf index 93d41508581..868f1e5f9de 100644 --- a/target/nextflow/metadata/join_csv/main.nf +++ b/target/nextflow/metadata/join_csv/main.nf @@ -1,4 +1,4 @@ -// join_csv 0.12.3 +// join_csv 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "join_csv", "namespace" : "metadata", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -313,9 +313,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/join_csv", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/metadata/join_csv/nextflow.config b/target/nextflow/metadata/join_csv/nextflow.config index 900d214369f..09b472f385b 100644 --- a/target/nextflow/metadata/join_csv/nextflow.config +++ b/target/nextflow/metadata/join_csv/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'join_csv' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Join a csv containing metadata to the .obs or .var field of a mudata file.' author = 'Dries Schaumont' } diff --git a/target/nextflow/metadata/join_csv/nextflow_schema.json b/target/nextflow/metadata/join_csv/nextflow_schema.json index 2fa3592ef1b..8b29808ecde 100644 --- a/target/nextflow/metadata/join_csv/nextflow_schema.json +++ b/target/nextflow/metadata/join_csv/nextflow_schema.json @@ -1,120 +1,173 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "join_csv", - "description": "Join a csv containing metadata to the .obs or .var field of a mudata file.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "join_csv", +"description": "Join a csv containing metadata to the .obs or .var field of a mudata file.", +"type": "object", +"definitions": { + + + + "mudata input" : { + "title": "MuData Input", "type": "object", - "definitions": { - "mudata input" : { - "title": "MuData Input", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "obs_key": { - "type": "string", - "description": "Type: `string`. Obs column name where the sample id can be found for each observation to join on", - "help_text": "Type: `string`. Obs column name where the sample id can be found for each observation to join on.\nUseful when adding metadata to concatenated samples.\nMutually exclusive with `--var_key`.\"\n" - }, - - "var_key": { - "type": "string", - "description": "Type: `string`. Var column name where the sample id can be found for each variable to join on", - "help_text": "Type: `string`. Var column name where the sample id can be found for each variable to join on.\nMutually exclusive with `--obs_key`.\"\n" - } - - } - }, - "mudata output" : { - "title": "MuData Output", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - } - - } - }, - "metadata input" : { - "title": "Metadata Input", - "type": "object", - "description": "No description", - "properties": { - - "input_csv": { - "type": "string", - "description": "Type: `file`, required, example: `metadata.csv`. ", - "help_text": "Type: `file`, required, example: `metadata.csv`. .csv file containing metadata" - }, - - "csv_key": { - "type": "string", - "description": "Type: `string`, default: `id`. column of the the csv that corresponds to the sample id", - "help_text": "Type: `string`, default: `id`. column of the the csv that corresponds to the sample id.", - "default": "id" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "obs_key": { + "type": + "string", + "description": "Type: `string`. Obs column name where the sample id can be found for each observation to join on", + "help_text": "Type: `string`. Obs column name where the sample id can be found for each observation to join on.\nUseful when adding metadata to concatenated samples.\nMutually exclusive with `--var_key`.\"\n" + + } + + + , + "var_key": { + "type": + "string", + "description": "Type: `string`. Var column name where the sample id can be found for each variable to join on", + "help_text": "Type: `string`. Var column name where the sample id can be found for each variable to join on.\nMutually exclusive with `--obs_key`.\"\n" + + } + + +} +}, + + + "mudata output" : { + "title": "MuData Output", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "metadata input" : { + "title": "Metadata Input", + "type": "object", + "description": "No description", + "properties": { + + + "input_csv": { + "type": + "string", + "description": "Type: `file`, required, example: `metadata.csv`. ", + "help_text": "Type: `file`, required, example: `metadata.csv`. .csv file containing metadata" + + } + + + , + "csv_key": { + "type": + "string", + "description": "Type: `string`, default: `id`. column of the the csv that corresponds to the sample id", + "help_text": "Type: `string`, default: `id`. column of the the csv that corresponds to the sample id." + , + "default": "id" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/mudata input" + }, + + { + "$ref": "#/definitions/mudata output" + }, + + { + "$ref": "#/definitions/metadata input" }, - "allOf": [ - { - "$ref": "#/definitions/mudata input" - }, - { - "$ref": "#/definitions/mudata output" - }, - { - "$ref": "#/definitions/metadata input" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/metadata/join_uns_to_obs/.config.vsh.yaml b/target/nextflow/metadata/join_uns_to_obs/.config.vsh.yaml index cc71bc00f6f..97ee148bbb7 100644 --- a/target/nextflow/metadata/join_uns_to_obs/.config.vsh.yaml +++ b/target/nextflow/metadata/join_uns_to_obs/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "join_uns_to_obs" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" arguments: - type: "file" name: "--input" @@ -166,6 +166,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/join_uns_to_obs" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/join_uns_to_obs/join_uns_to_obs" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/metadata/join_uns_to_obs/main.nf b/target/nextflow/metadata/join_uns_to_obs/main.nf index 001f201741f..0fe70ce2c0b 100644 --- a/target/nextflow/metadata/join_uns_to_obs/main.nf +++ b/target/nextflow/metadata/join_uns_to_obs/main.nf @@ -1,4 +1,4 @@ -// join_uns_to_obs 0.12.3 +// join_uns_to_obs 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -24,7 +24,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "join_uns_to_obs", "namespace" : "metadata", - "version" : "0.12.3", + "version" : "0.12.4", "arguments" : [ { "type" : "file", @@ -232,9 +232,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/join_uns_to_obs", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/metadata/join_uns_to_obs/nextflow.config b/target/nextflow/metadata/join_uns_to_obs/nextflow.config index 18335f85f2a..d5b1cd18a90 100644 --- a/target/nextflow/metadata/join_uns_to_obs/nextflow.config +++ b/target/nextflow/metadata/join_uns_to_obs/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'join_uns_to_obs' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Join a data frame of length 1 (1 row index value) in .uns containing metadata to the .obs of a mudata file.' } diff --git a/target/nextflow/metadata/join_uns_to_obs/nextflow_schema.json b/target/nextflow/metadata/join_uns_to_obs/nextflow_schema.json index 87ee9413166..1885dec4c65 100644 --- a/target/nextflow/metadata/join_uns_to_obs/nextflow_schema.json +++ b/target/nextflow/metadata/join_uns_to_obs/nextflow_schema.json @@ -1,79 +1,114 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "join_uns_to_obs", - "description": "Join a data frame of length 1 (1 row index value) in .uns containing metadata to the .obs of a mudata file.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "join_uns_to_obs", +"description": "Join a data frame of length 1 (1 row index value) in .uns containing metadata to the .obs of a mudata file.", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "uns_key": { - "type": "string", - "description": "Type: `string`, required. ", - "help_text": "Type: `string`, required. " - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "uns_key": { + "type": + "string", + "description": "Type: `string`, required. ", + "help_text": "Type: `string`, required. " + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/metadata/move_obsm_to_obs/.config.vsh.yaml b/target/nextflow/metadata/move_obsm_to_obs/.config.vsh.yaml index 99515d2e742..bf0d8a7e2ca 100644 --- a/target/nextflow/metadata/move_obsm_to_obs/.config.vsh.yaml +++ b/target/nextflow/metadata/move_obsm_to_obs/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "move_obsm_to_obs" namespace: "metadata" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -187,6 +187,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/move_obsm_to_obs" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/move_obsm_to_obs/move_obsm_to_obs" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/metadata/move_obsm_to_obs/main.nf b/target/nextflow/metadata/move_obsm_to_obs/main.nf index aebb7c34820..1f7c0cbb978 100644 --- a/target/nextflow/metadata/move_obsm_to_obs/main.nf +++ b/target/nextflow/metadata/move_obsm_to_obs/main.nf @@ -1,4 +1,4 @@ -// move_obsm_to_obs 0.12.3 +// move_obsm_to_obs 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "move_obsm_to_obs", "namespace" : "metadata", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -270,9 +270,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/metadata/move_obsm_to_obs", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/metadata/move_obsm_to_obs/nextflow.config b/target/nextflow/metadata/move_obsm_to_obs/nextflow.config index 54d2a441792..f61d2671f2f 100644 --- a/target/nextflow/metadata/move_obsm_to_obs/nextflow.config +++ b/target/nextflow/metadata/move_obsm_to_obs/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'move_obsm_to_obs' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Move a matrix from .obsm to .obs. Newly created columns in .obs will \nbe created from the .obsm key suffixed with an underscore and the name of the columns\nof the specified .obsm matrix.\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/metadata/move_obsm_to_obs/nextflow_schema.json b/target/nextflow/metadata/move_obsm_to_obs/nextflow_schema.json index 2378dbaca3d..221c9a0926f 100644 --- a/target/nextflow/metadata/move_obsm_to_obs/nextflow_schema.json +++ b/target/nextflow/metadata/move_obsm_to_obs/nextflow_schema.json @@ -1,90 +1,128 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "move_obsm_to_obs", - "description": "Move a matrix from .obsm to .obs. Newly created columns in .obs will \nbe created from the .obsm key suffixed with an underscore and the name of the columns\nof the specified .obsm matrix.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "move_obsm_to_obs", +"description": "Move a matrix from .obsm to .obs. Newly created columns in .obs will \nbe created from the .obsm key suffixed with an underscore and the name of the columns\nof the specified .obsm matrix.\n", +"type": "object", +"definitions": { + + + + "mudata input" : { + "title": "MuData Input", "type": "object", - "definitions": { - "mudata input" : { - "title": "MuData Input", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "obsm_key": { - "type": "string", - "description": "Type: `string`, required. Key of a data structure to move from `", - "help_text": "Type: `string`, required. Key of a data structure to move from `.obsm` to `.obs`." - } - - } - }, - "mudata output" : { - "title": "MuData Output", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "obsm_key": { + "type": + "string", + "description": "Type: `string`, required. Key of a data structure to move from `", + "help_text": "Type: `string`, required. Key of a data structure to move from `.obsm` to `.obs`." + + } + + +} +}, + + + "mudata output" : { + "title": "MuData Output", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/mudata input" + }, + + { + "$ref": "#/definitions/mudata output" }, - "allOf": [ - { - "$ref": "#/definitions/mudata input" - }, - { - "$ref": "#/definitions/mudata output" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/neighbors/bbknn/.config.vsh.yaml b/target/nextflow/neighbors/bbknn/.config.vsh.yaml index f77d5ae8390..ee202d7dd97 100644 --- a/target/nextflow/neighbors/bbknn/.config.vsh.yaml +++ b/target/nextflow/neighbors/bbknn/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "bbknn" namespace: "neighbors" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -284,6 +284,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/neighbors/bbknn" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/neighbors/bbknn/bbknn" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/neighbors/bbknn/main.nf b/target/nextflow/neighbors/bbknn/main.nf index b93b848a879..d329e12a468 100644 --- a/target/nextflow/neighbors/bbknn/main.nf +++ b/target/nextflow/neighbors/bbknn/main.nf @@ -1,4 +1,4 @@ -// bbknn 0.12.3 +// bbknn 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "bbknn", "namespace" : "neighbors", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -374,9 +374,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/neighbors/bbknn", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/neighbors/bbknn/nextflow.config b/target/nextflow/neighbors/bbknn/nextflow.config index fc6fd3a76e2..71c6d999ff1 100644 --- a/target/nextflow/neighbors/bbknn/nextflow.config +++ b/target/nextflow/neighbors/bbknn/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'bbknn' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'BBKNN network generation\n' author = 'Dries De Maeyer, Dries Schaumont' } diff --git a/target/nextflow/neighbors/bbknn/nextflow_schema.json b/target/nextflow/neighbors/bbknn/nextflow_schema.json index d0bf3dedb42..e8f17e69f40 100644 --- a/target/nextflow/neighbors/bbknn/nextflow_schema.json +++ b/target/nextflow/neighbors/bbknn/nextflow_schema.json @@ -1,128 +1,191 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "bbknn", - "description": "BBKNN network generation\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "bbknn", +"description": "BBKNN network generation\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Input h5mu file", - "help_text": "Type: `file`, required. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "obsm_input": { - "type": "string", - "description": "Type: `string`, default: `X_pca`. The dimensionality reduction in `", - "help_text": "Type: `string`, default: `X_pca`. The dimensionality reduction in `.obsm` to use for neighbour detection. Defaults to X_pca.", - "default": "X_pca" - }, - - "obs_batch": { - "type": "string", - "description": "Type: `string`, default: `batch`. ", - "help_text": "Type: `string`, default: `batch`. .obs column name discriminating between your batches.", - "default": "batch" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output ", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output .h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Input h5mu file", + "help_text": "Type: `file`, required. Input h5mu file" - }, - - "uns_output": { - "type": "string", - "description": "Type: `string`, default: `neighbors`. Mandatory ", - "help_text": "Type: `string`, default: `neighbors`. Mandatory .uns slot to store various neighbor output objects.", - "default": "neighbors" - }, - - "obsp_distances": { - "type": "string", - "description": "Type: `string`, default: `distances`. In which ", - "help_text": "Type: `string`, default: `distances`. In which .obsp slot to store the distance matrix between the resulting neighbors.", - "default": "distances" - }, - - "obsp_connectivities": { - "type": "string", - "description": "Type: `string`, default: `connectivities`. In which ", - "help_text": "Type: `string`, default: `connectivities`. In which .obsp slot to store the connectivities matrix between the resulting neighbors.", - "default": "connectivities" - }, - - "n_neighbors_within_batch": { - "type": "integer", - "description": "Type: `integer`, default: `3`. How many top neighbours to report for each batch; total number of neighbours in the initial k-nearest-neighbours computation will be this number times the number of batches", - "help_text": "Type: `integer`, default: `3`. How many top neighbours to report for each batch; total number of neighbours in the initial k-nearest-neighbours computation will be this number times the number of batches.", - "default": "3" - }, - - "n_pcs": { - "type": "integer", - "description": "Type: `integer`, default: `50`. How many dimensions (in case of PCA, principal components) to use in the analysis", - "help_text": "Type: `integer`, default: `50`. How many dimensions (in case of PCA, principal components) to use in the analysis.", - "default": "50" - }, - - "n_trim": { - "type": "integer", - "description": "Type: `integer`. Trim the neighbours of each cell to these many top connectivities", - "help_text": "Type: `integer`. Trim the neighbours of each cell to these many top connectivities. May help with population independence and improve the tidiness of clustering. The lower the value the more independent the individual populations, at the cost of more conserved batch effect. If `None` (default), sets the parameter value automatically to 10 times `neighbors_within_batch` times the number of batches. Set to 0 to skip." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "obsm_input": { + "type": + "string", + "description": "Type: `string`, default: `X_pca`. The dimensionality reduction in `", + "help_text": "Type: `string`, default: `X_pca`. The dimensionality reduction in `.obsm` to use for neighbour detection. Defaults to X_pca." + , + "default": "X_pca" + } + + + , + "obs_batch": { + "type": + "string", + "description": "Type: `string`, default: `batch`. ", + "help_text": "Type: `string`, default: `batch`. .obs column name discriminating between your batches." + , + "default": "batch" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output ", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output .h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "uns_output": { + "type": + "string", + "description": "Type: `string`, default: `neighbors`. Mandatory ", + "help_text": "Type: `string`, default: `neighbors`. Mandatory .uns slot to store various neighbor output objects." + , + "default": "neighbors" + } + + + , + "obsp_distances": { + "type": + "string", + "description": "Type: `string`, default: `distances`. In which ", + "help_text": "Type: `string`, default: `distances`. In which .obsp slot to store the distance matrix between the resulting neighbors." + , + "default": "distances" + } + + + , + "obsp_connectivities": { + "type": + "string", + "description": "Type: `string`, default: `connectivities`. In which ", + "help_text": "Type: `string`, default: `connectivities`. In which .obsp slot to store the connectivities matrix between the resulting neighbors." + , + "default": "connectivities" + } + + + , + "n_neighbors_within_batch": { + "type": + "integer", + "description": "Type: `integer`, default: `3`. How many top neighbours to report for each batch; total number of neighbours in the initial k-nearest-neighbours computation will be this number times the number of batches", + "help_text": "Type: `integer`, default: `3`. How many top neighbours to report for each batch; total number of neighbours in the initial k-nearest-neighbours computation will be this number times the number of batches." + , + "default": "3" + } + + + , + "n_pcs": { + "type": + "integer", + "description": "Type: `integer`, default: `50`. How many dimensions (in case of PCA, principal components) to use in the analysis", + "help_text": "Type: `integer`, default: `50`. How many dimensions (in case of PCA, principal components) to use in the analysis." + , + "default": "50" + } + + + , + "n_trim": { + "type": + "integer", + "description": "Type: `integer`. Trim the neighbours of each cell to these many top connectivities", + "help_text": "Type: `integer`. Trim the neighbours of each cell to these many top connectivities. May help with population independence and improve the tidiness of clustering. The lower the value the more independent the individual populations, at the cost of more conserved batch effect. If `None` (default), sets the parameter value automatically to 10 times `neighbors_within_batch` times the number of batches. Set to 0 to skip." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/neighbors/find_neighbors/.config.vsh.yaml b/target/nextflow/neighbors/find_neighbors/.config.vsh.yaml index 5a5a678ec3a..72154030a36 100644 --- a/target/nextflow/neighbors/find_neighbors/.config.vsh.yaml +++ b/target/nextflow/neighbors/find_neighbors/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "find_neighbors" namespace: "neighbors" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -304,6 +304,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/neighbors/find_neighbors" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/neighbors/find_neighbors/find_neighbors" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/neighbors/find_neighbors/main.nf b/target/nextflow/neighbors/find_neighbors/main.nf index 413261c9451..6fe2d2e84d2 100644 --- a/target/nextflow/neighbors/find_neighbors/main.nf +++ b/target/nextflow/neighbors/find_neighbors/main.nf @@ -1,4 +1,4 @@ -// find_neighbors 0.12.3 +// find_neighbors 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "find_neighbors", "namespace" : "neighbors", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -399,9 +399,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/neighbors/find_neighbors", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/neighbors/find_neighbors/nextflow.config b/target/nextflow/neighbors/find_neighbors/nextflow.config index 18042a2871e..cc69889dff8 100644 --- a/target/nextflow/neighbors/find_neighbors/nextflow.config +++ b/target/nextflow/neighbors/find_neighbors/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'find_neighbors' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Compute a neighborhood graph of observations [McInnes18].\n\nThe neighbor search efficiency of this heavily relies on UMAP [McInnes18], which also provides a method for estimating connectivities of data points - the connectivity of the manifold (method==\'umap\'). If method==\'gauss\', connectivities are computed according to [Coifman05], in the adaption of [Haghverdi16].\n' author = 'Dries De Maeyer, Robrecht Cannoodt' } diff --git a/target/nextflow/neighbors/find_neighbors/nextflow_schema.json b/target/nextflow/neighbors/find_neighbors/nextflow_schema.json index d88b61109ca..abc7b48efa6 100644 --- a/target/nextflow/neighbors/find_neighbors/nextflow_schema.json +++ b/target/nextflow/neighbors/find_neighbors/nextflow_schema.json @@ -1,124 +1,183 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "find_neighbors", - "description": "Compute a neighborhood graph of observations [McInnes18].\n\nThe neighbor search efficiency of this heavily relies on UMAP [McInnes18], which also provides a method for estimating connectivities of data points - the connectivity of the manifold (method==\u0027umap\u0027). If method==\u0027gauss\u0027, connectivities are computed according to [Coifman05], in the adaption of [Haghverdi16].\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "find_neighbors", +"description": "Compute a neighborhood graph of observations [McInnes18].\n\nThe neighbor search efficiency of this heavily relies on UMAP [McInnes18], which also provides a method for estimating connectivities of data points - the connectivity of the manifold (method==\u0027umap\u0027). If method==\u0027gauss\u0027, connectivities are computed according to [Coifman05], in the adaption of [Haghverdi16].\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "obsm_input": { - "type": "string", - "description": "Type: `string`, default: `X_pca`. Which ", - "help_text": "Type: `string`, default: `X_pca`. Which .obsm slot to use as a starting PCA embedding.", - "default": "X_pca" - }, - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file containing the found neighbors", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file containing the found neighbors.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "uns_output": { - "type": "string", - "description": "Type: `string`, default: `neighbors`. Mandatory ", - "help_text": "Type: `string`, default: `neighbors`. Mandatory .uns slot to store various neighbor output objects.", - "default": "neighbors" - }, - - "obsp_distances": { - "type": "string", - "description": "Type: `string`, default: `distances`. In which ", - "help_text": "Type: `string`, default: `distances`. In which .obsp slot to store the distance matrix between the resulting neighbors.", - "default": "distances" - }, - - "obsp_connectivities": { - "type": "string", - "description": "Type: `string`, default: `connectivities`. In which ", - "help_text": "Type: `string`, default: `connectivities`. In which .obsp slot to store the connectivities matrix between the resulting neighbors.", - "default": "connectivities" - }, - - "metric": { - "type": "string", - "description": "Type: `string`, default: `euclidean`, choices: ``cityblock`, `cosine`, `euclidean`, `l1`, `l2`, `manhattan`, `braycurtis`, `canberra`, `chebyshev`, `correlation`, `dice`, `hamming`, `jaccard`, `kulsinski`, `mahalanobis`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`, `sokalmichener`, `sokalsneath`, `sqeuclidean`, `yule``. The distance metric to be used in the generation of the nearest neighborhood network", - "help_text": "Type: `string`, default: `euclidean`, choices: ``cityblock`, `cosine`, `euclidean`, `l1`, `l2`, `manhattan`, `braycurtis`, `canberra`, `chebyshev`, `correlation`, `dice`, `hamming`, `jaccard`, `kulsinski`, `mahalanobis`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`, `sokalmichener`, `sokalsneath`, `sqeuclidean`, `yule``. The distance metric to be used in the generation of the nearest neighborhood network.", - "enum": ["cityblock", "cosine", "euclidean", "l1", "l2", "manhattan", "braycurtis", "canberra", "chebyshev", "correlation", "dice", "hamming", "jaccard", "kulsinski", "mahalanobis", "minkowski", "rogerstanimoto", "russellrao", "seuclidean", "sokalmichener", "sokalsneath", "sqeuclidean", "yule"] + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " , - "default": "euclidean" - }, - - "num_neighbors": { - "type": "integer", - "description": "Type: `integer`, default: `15`. The size of local neighborhood (in terms of number of neighboring data points) used for manifold approximation", - "help_text": "Type: `integer`, default: `15`. The size of local neighborhood (in terms of number of neighboring data points) used for manifold approximation. Larger values result in more global views of the manifold, while smaller values result in more local data being preserved. In general values should be in the range 2 to 100. If knn is True, number of nearest neighbors to be searched. If knn is False, a Gaussian kernel width is set to the distance of the n_neighbors neighbor.", - "default": "15" - }, - - "seed": { - "type": "integer", - "description": "Type: `integer`, default: `0`. A random seed", - "help_text": "Type: `integer`, default: `0`. A random seed.", - "default": "0" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "rna" + } + + + , + "obsm_input": { + "type": + "string", + "description": "Type: `string`, default: `X_pca`. Which ", + "help_text": "Type: `string`, default: `X_pca`. Which .obsm slot to use as a starting PCA embedding." + , + "default": "X_pca" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file containing the found neighbors", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file containing the found neighbors." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "uns_output": { + "type": + "string", + "description": "Type: `string`, default: `neighbors`. Mandatory ", + "help_text": "Type: `string`, default: `neighbors`. Mandatory .uns slot to store various neighbor output objects." + , + "default": "neighbors" + } + + + , + "obsp_distances": { + "type": + "string", + "description": "Type: `string`, default: `distances`. In which ", + "help_text": "Type: `string`, default: `distances`. In which .obsp slot to store the distance matrix between the resulting neighbors." + , + "default": "distances" + } + + + , + "obsp_connectivities": { + "type": + "string", + "description": "Type: `string`, default: `connectivities`. In which ", + "help_text": "Type: `string`, default: `connectivities`. In which .obsp slot to store the connectivities matrix between the resulting neighbors." + , + "default": "connectivities" + } + + + , + "metric": { + "type": + "string", + "description": "Type: `string`, default: `euclidean`, choices: ``cityblock`, `cosine`, `euclidean`, `l1`, `l2`, `manhattan`, `braycurtis`, `canberra`, `chebyshev`, `correlation`, `dice`, `hamming`, `jaccard`, `kulsinski`, `mahalanobis`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`, `sokalmichener`, `sokalsneath`, `sqeuclidean`, `yule``. The distance metric to be used in the generation of the nearest neighborhood network", + "help_text": "Type: `string`, default: `euclidean`, choices: ``cityblock`, `cosine`, `euclidean`, `l1`, `l2`, `manhattan`, `braycurtis`, `canberra`, `chebyshev`, `correlation`, `dice`, `hamming`, `jaccard`, `kulsinski`, `mahalanobis`, `minkowski`, `rogerstanimoto`, `russellrao`, `seuclidean`, `sokalmichener`, `sokalsneath`, `sqeuclidean`, `yule``. The distance metric to be used in the generation of the nearest neighborhood network.", + "enum": ["cityblock", "cosine", "euclidean", "l1", "l2", "manhattan", "braycurtis", "canberra", "chebyshev", "correlation", "dice", "hamming", "jaccard", "kulsinski", "mahalanobis", "minkowski", "rogerstanimoto", "russellrao", "seuclidean", "sokalmichener", "sokalsneath", "sqeuclidean", "yule"] + + , + "default": "euclidean" + } + + + , + "num_neighbors": { + "type": + "integer", + "description": "Type: `integer`, default: `15`. The size of local neighborhood (in terms of number of neighboring data points) used for manifold approximation", + "help_text": "Type: `integer`, default: `15`. The size of local neighborhood (in terms of number of neighboring data points) used for manifold approximation. Larger values result in more global views of the manifold, while smaller values result in more local data being preserved. In general values should be in the range 2 to 100. If knn is True, number of nearest neighbors to be searched. If knn is False, a Gaussian kernel width is set to the distance of the n_neighbors neighbor." + , + "default": "15" + } + + + , + "seed": { + "type": + "integer", + "description": "Type: `integer`, default: `0`. A random seed", + "help_text": "Type: `integer`, default: `0`. A random seed." + , + "default": "0" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/process_10xh5/filter_10xh5/.config.vsh.yaml b/target/nextflow/process_10xh5/filter_10xh5/.config.vsh.yaml index 5a0d1dbef6d..812f4ffb213 100644 --- a/target/nextflow/process_10xh5/filter_10xh5/.config.vsh.yaml +++ b/target/nextflow/process_10xh5/filter_10xh5/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "filter_10xh5" namespace: "process_10xh5" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -190,6 +190,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/process_10xh5/filter_10xh5" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/process_10xh5/filter_10xh5/filter_10xh5" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/process_10xh5/filter_10xh5/main.nf b/target/nextflow/process_10xh5/filter_10xh5/main.nf index faff379d179..593bc46b812 100644 --- a/target/nextflow/process_10xh5/filter_10xh5/main.nf +++ b/target/nextflow/process_10xh5/filter_10xh5/main.nf @@ -1,4 +1,4 @@ -// filter_10xh5 0.12.3 +// filter_10xh5 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "filter_10xh5", "namespace" : "process_10xh5", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -267,9 +267,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/process_10xh5/filter_10xh5", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/process_10xh5/filter_10xh5/nextflow.config b/target/nextflow/process_10xh5/filter_10xh5/nextflow.config index a1318b935b8..08ba7c0cadd 100644 --- a/target/nextflow/process_10xh5/filter_10xh5/nextflow.config +++ b/target/nextflow/process_10xh5/filter_10xh5/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'filter_10xh5' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Filter a 10x h5 dataset.\n' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/process_10xh5/filter_10xh5/nextflow_schema.json b/target/nextflow/process_10xh5/filter_10xh5/nextflow_schema.json index 30490b5589a..3bce03774d3 100644 --- a/target/nextflow/process_10xh5/filter_10xh5/nextflow_schema.json +++ b/target/nextflow/process_10xh5/filter_10xh5/nextflow_schema.json @@ -1,85 +1,124 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "filter_10xh5", - "description": "Filter a 10x h5 dataset.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "filter_10xh5", +"description": "Filter a 10x h5 dataset.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. An h5 file from the 10x genomics website", - "help_text": "Type: `file`, required, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. An h5 file from the 10x genomics website." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5`, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix_filtered.h5`. Output h5 file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5`, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix_filtered.h5`. Output h5 file.", - "default": "$id.$key.output.h5" - }, - - "min_library_size": { - "type": "integer", - "description": "Type: `integer`, default: `0`. Minimum library size", - "help_text": "Type: `integer`, default: `0`. Minimum library size.", - "default": "0" - }, - - "min_cells_per_gene": { - "type": "integer", - "description": "Type: `integer`, default: `0`. Minimum number of cells per gene", - "help_text": "Type: `integer`, default: `0`. Minimum number of cells per gene.", - "default": "0" - }, - - "keep_feature_types": { - "type": "string", - "description": "Type: List of `string`, example: `Antibody Capture`, multiple_sep: `\":\"`. Specify which feature types will never be filtered out", - "help_text": "Type: List of `string`, example: `Antibody Capture`, multiple_sep: `\":\"`. Specify which feature types will never be filtered out" - }, - - "verbose": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Increase verbosity", - "help_text": "Type: `boolean_true`, default: `false`. Increase verbosity", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. An h5 file from the 10x genomics website", + "help_text": "Type: `file`, required, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix.h5`. An h5 file from the 10x genomics website." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5`, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix_filtered.h5`. Output h5 file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5`, example: `pbmc_1k_protein_v3_raw_feature_bc_matrix_filtered.h5`. Output h5 file." + , + "default": "$id.$key.output.h5" + } + + + , + "min_library_size": { + "type": + "integer", + "description": "Type: `integer`, default: `0`. Minimum library size", + "help_text": "Type: `integer`, default: `0`. Minimum library size." + , + "default": "0" + } + + + , + "min_cells_per_gene": { + "type": + "integer", + "description": "Type: `integer`, default: `0`. Minimum number of cells per gene", + "help_text": "Type: `integer`, default: `0`. Minimum number of cells per gene." + , + "default": "0" + } + + + , + "keep_feature_types": { + "type": + "string", + "description": "Type: List of `string`, example: `Antibody Capture`, multiple_sep: `\":\"`. Specify which feature types will never be filtered out", + "help_text": "Type: List of `string`, example: `Antibody Capture`, multiple_sep: `\":\"`. Specify which feature types will never be filtered out" + + } + + + , + "verbose": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Increase verbosity", + "help_text": "Type: `boolean_true`, default: `false`. Increase verbosity" + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/qc/calculate_qc_metrics/.config.vsh.yaml b/target/nextflow/qc/calculate_qc_metrics/.config.vsh.yaml index ff0ff847a5a..8cb769d646f 100644 --- a/target/nextflow/qc/calculate_qc_metrics/.config.vsh.yaml +++ b/target/nextflow/qc/calculate_qc_metrics/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "calculate_qc_metrics" namespace: "qc" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -230,6 +230,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/qc/calculate_qc_metrics" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/qc/calculate_qc_metrics/calculate_qc_metrics" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/qc/calculate_qc_metrics/main.nf b/target/nextflow/qc/calculate_qc_metrics/main.nf index b884cef0609..b5074e7c89b 100644 --- a/target/nextflow/qc/calculate_qc_metrics/main.nf +++ b/target/nextflow/qc/calculate_qc_metrics/main.nf @@ -1,4 +1,4 @@ -// calculate_qc_metrics 0.12.3 +// calculate_qc_metrics 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "calculate_qc_metrics", "namespace" : "qc", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -306,9 +306,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/qc/calculate_qc_metrics", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/qc/calculate_qc_metrics/nextflow.config b/target/nextflow/qc/calculate_qc_metrics/nextflow.config index b81c428a5af..7a79e470d77 100644 --- a/target/nextflow/qc/calculate_qc_metrics/nextflow.config +++ b/target/nextflow/qc/calculate_qc_metrics/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'calculate_qc_metrics' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Add basic quality control metrics to an .h5mu file.\n\nThe metrics are comparable to what scanpy.pp.calculate_qc_metrics output,\nalthough they have slightly different names:\n\nVar metrics (name in this component -> name in scanpy):\n - pct_dropout -> pct_dropout_by_{expr_type}\n - num_nonzero_obs -> n_cells_by_{expr_type}\n - obs_mean -> mean_{expr_type}\n - total_counts -> total_{expr_type}\n\nObs metrics:\n - num_nonzero_vars -> n_genes_by_{expr_type}\n - pct_{var_qc_metrics} -> pct_{expr_type}_{qc_var}\n - total_counts_{var_qc_metrics} -> total_{expr_type}_{qc_var}\n - pct_of_counts_in_top_{top_n_vars}_vars -> pct_{expr_type}_in_top_{n}_{var_type}\n - total_counts -> total_{expr_type}\n \n' author = 'Dries Schaumont' } diff --git a/target/nextflow/qc/calculate_qc_metrics/nextflow_schema.json b/target/nextflow/qc/calculate_qc_metrics/nextflow_schema.json index 4dd04b7ec2b..f034d5ca2ca 100644 --- a/target/nextflow/qc/calculate_qc_metrics/nextflow_schema.json +++ b/target/nextflow/qc/calculate_qc_metrics/nextflow_schema.json @@ -1,108 +1,158 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "calculate_qc_metrics", - "description": "Add basic quality control metrics to an .h5mu file.\n\nThe metrics are comparable to what scanpy.pp.calculate_qc_metrics output,\nalthough they have slightly different names:\n\nVar metrics (name in this component -\u003e name in scanpy):\n - pct_dropout -\u003e pct_dropout_by_{expr_type}\n - num_nonzero_obs -\u003e n_cells_by_{expr_type}\n - obs_mean -\u003e mean_{expr_type}\n - total_counts -\u003e total_{expr_type}\n\nObs metrics:\n - num_nonzero_vars -\u003e n_genes_by_{expr_type}\n - pct_{var_qc_metrics} -\u003e pct_{expr_type}_{qc_var}\n - total_counts_{var_qc_metrics} -\u003e total_{expr_type}_{qc_var}\n - pct_of_counts_in_top_{top_n_vars}_vars -\u003e pct_{expr_type}_in_top_{n}_{var_type}\n - total_counts -\u003e total_{expr_type}\n \n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "calculate_qc_metrics", +"description": "Add basic quality control metrics to an .h5mu file.\n\nThe metrics are comparable to what scanpy.pp.calculate_qc_metrics output,\nalthough they have slightly different names:\n\nVar metrics (name in this component -\u003e name in scanpy):\n - pct_dropout -\u003e pct_dropout_by_{expr_type}\n - num_nonzero_obs -\u003e n_cells_by_{expr_type}\n - obs_mean -\u003e mean_{expr_type}\n - total_counts -\u003e total_{expr_type}\n\nObs metrics:\n - num_nonzero_vars -\u003e n_genes_by_{expr_type}\n - pct_{var_qc_metrics} -\u003e pct_{expr_type}_{qc_var}\n - total_counts_{var_qc_metrics} -\u003e total_{expr_type}_{qc_var}\n - pct_of_counts_in_top_{top_n_vars}_vars -\u003e pct_{expr_type}_in_top_{n}_{var_type}\n - total_counts -\u003e total_{expr_type}\n \n", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "layer": { - "type": "string", - "description": "Type: `string`, example: `raw_counts`. ", - "help_text": "Type: `string`, example: `raw_counts`. " - }, - - "var_qc_metrics": { - "type": "string", - "description": "Type: List of `string`, example: `ercc,highly_variable,mitochondrial`, multiple_sep: `\",\"`. Keys to select a boolean (containing only True or False) column from ", - "help_text": "Type: List of `string`, example: `ercc,highly_variable,mitochondrial`, multiple_sep: `\",\"`. Keys to select a boolean (containing only True or False) column from .var.\nFor each cell, calculate the proportion of total values for genes which are labeled \u0027True\u0027, \ncompared to the total sum of the values for all genes.\n" - }, - - "var_qc_metrics_fill_na_value": { - "type": "boolean", - "description": "Type: `boolean`. Fill any \u0027NA\u0027 values found in the columns specified with --var_qc_metrics to \u0027True\u0027 or \u0027False\u0027", - "help_text": "Type: `boolean`. Fill any \u0027NA\u0027 values found in the columns specified with --var_qc_metrics to \u0027True\u0027 or \u0027False\u0027.\nas False.\n" - }, - - "top_n_vars": { - "type": "string", - "description": "Type: List of `integer`, multiple_sep: `\",\"`. Number of top vars to be used to calculate cumulative proportions", - "help_text": "Type: List of `integer`, multiple_sep: `\",\"`. Number of top vars to be used to calculate cumulative proportions.\nIf not specified, proportions are not calculated. `--top_n_vars 20,50` finds\ncumulative proportion to the 20th and 50th most expressed vars.\n" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "layer": { + "type": + "string", + "description": "Type: `string`, example: `raw_counts`. ", + "help_text": "Type: `string`, example: `raw_counts`. " + + } + + + , + "var_qc_metrics": { + "type": + "string", + "description": "Type: List of `string`, example: `ercc,highly_variable,mitochondrial`, multiple_sep: `\",\"`. Keys to select a boolean (containing only True or False) column from ", + "help_text": "Type: List of `string`, example: `ercc,highly_variable,mitochondrial`, multiple_sep: `\",\"`. Keys to select a boolean (containing only True or False) column from .var.\nFor each cell, calculate the proportion of total values for genes which are labeled \u0027True\u0027, \ncompared to the total sum of the values for all genes.\n" + + } + + + , + "var_qc_metrics_fill_na_value": { + "type": + "boolean", + "description": "Type: `boolean`. Fill any \u0027NA\u0027 values found in the columns specified with --var_qc_metrics to \u0027True\u0027 or \u0027False\u0027", + "help_text": "Type: `boolean`. Fill any \u0027NA\u0027 values found in the columns specified with --var_qc_metrics to \u0027True\u0027 or \u0027False\u0027.\nas False.\n" + + } + + + , + "top_n_vars": { + "type": + "string", + "description": "Type: List of `integer`, multiple_sep: `\",\"`. Number of top vars to be used to calculate cumulative proportions", + "help_text": "Type: List of `integer`, multiple_sep: `\",\"`. Number of top vars to be used to calculate cumulative proportions.\nIf not specified, proportions are not calculated. `--top_n_vars 20,50` finds\ncumulative proportion to the 20th and 50th most expressed vars.\n" + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/qc/fastqc/.config.vsh.yaml b/target/nextflow/qc/fastqc/.config.vsh.yaml index cb068c0f230..d04e4308891 100644 --- a/target/nextflow/qc/fastqc/.config.vsh.yaml +++ b/target/nextflow/qc/fastqc/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "fastqc" namespace: "qc" - version: "0.12.3" + version: "0.12.4" arguments: - type: "string" name: "--mode" @@ -151,6 +151,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/qc/fastqc" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/qc/fastqc/fastqc" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/qc/fastqc/main.nf b/target/nextflow/qc/fastqc/main.nf index 0902adc5c64..2294d984ab1 100644 --- a/target/nextflow/qc/fastqc/main.nf +++ b/target/nextflow/qc/fastqc/main.nf @@ -1,4 +1,4 @@ -// fastqc 0.12.3 +// fastqc 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -24,7 +24,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "fastqc", "namespace" : "qc", - "version" : "0.12.3", + "version" : "0.12.4", "arguments" : [ { "type" : "string", @@ -208,9 +208,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/qc/fastqc", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/qc/fastqc/nextflow.config b/target/nextflow/qc/fastqc/nextflow.config index a04bfa01236..f4090c86c99 100644 --- a/target/nextflow/qc/fastqc/nextflow.config +++ b/target/nextflow/qc/fastqc/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'fastqc' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Fastqc component, please see https://www.bioinformatics.babraham.ac.uk/projects/fastqc/. This component can take one or more files (by means of shell globbing) or a complete directory.\n' } diff --git a/target/nextflow/qc/fastqc/nextflow_schema.json b/target/nextflow/qc/fastqc/nextflow_schema.json index dbec0923f26..a282d4e8632 100644 --- a/target/nextflow/qc/fastqc/nextflow_schema.json +++ b/target/nextflow/qc/fastqc/nextflow_schema.json @@ -1,73 +1,104 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "fastqc", - "description": "Fastqc component, please see https://www.bioinformatics.babraham.ac.uk/projects/fastqc/. This component can take one or more files (by means of shell globbing) or a complete directory.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "fastqc", +"description": "Fastqc component, please see https://www.bioinformatics.babraham.ac.uk/projects/fastqc/. This component can take one or more files (by means of shell globbing) or a complete directory.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "mode": { - "type": "string", - "description": "Type: `string`, default: `files`, choices: ``files`, `dir``. The mode in which the component works", - "help_text": "Type: `string`, default: `files`, choices: ``files`, `dir``. The mode in which the component works. Can be either files or dir.", - "enum": ["files", "dir"] + "description": "No description", + "properties": { + + + "mode": { + "type": + "string", + "description": "Type: `string`, default: `files`, choices: ``files`, `dir``. The mode in which the component works", + "help_text": "Type: `string`, default: `files`, choices: ``files`, `dir``. The mode in which the component works. Can be either files or dir.", + "enum": ["files", "dir"] + , - "default": "files" - }, - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `fastq_dir/`. Directory containing input fastq files", - "help_text": "Type: `file`, required, example: `fastq_dir/`. Directory containing input fastq files." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `qc/`. Output directory to write reports to", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `qc/`. Output directory to write reports to.", - "default": "$id.$key.output.output" - }, - - "threads": { - "type": "integer", - "description": "Type: `integer`. Specifies the number of files which can be processed simultaneously", - "help_text": "Type: `integer`. Specifies the number of files which can be processed simultaneously. Each thread will be allocated 250MB of\nmemory.\n" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "files" + } + + + , + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `fastq_dir/`. Directory containing input fastq files", + "help_text": "Type: `file`, required, example: `fastq_dir/`. Directory containing input fastq files." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `qc/`. Output directory to write reports to", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `qc/`. Output directory to write reports to." + , + "default": "$id.$key.output.output" + } + + + , + "threads": { + "type": + "integer", + "description": "Type: `integer`. Specifies the number of files which can be processed simultaneously", + "help_text": "Type: `integer`. Specifies the number of files which can be processed simultaneously. Each thread will be allocated 250MB of\nmemory.\n" + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/qc/multiqc/.config.vsh.yaml b/target/nextflow/qc/multiqc/.config.vsh.yaml index 99c382ce3a6..236a878b939 100644 --- a/target/nextflow/qc/multiqc/.config.vsh.yaml +++ b/target/nextflow/qc/multiqc/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "multiqc" namespace: "qc" - version: "0.12.3" + version: "0.12.4" arguments: - type: "file" name: "--input" @@ -135,6 +135,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/qc/multiqc" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/qc/multiqc/multiqc" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/qc/multiqc/main.nf b/target/nextflow/qc/multiqc/main.nf index e4b0858e85f..8475dc0c71d 100644 --- a/target/nextflow/qc/multiqc/main.nf +++ b/target/nextflow/qc/multiqc/main.nf @@ -1,4 +1,4 @@ -// multiqc 0.12.3 +// multiqc 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -24,7 +24,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "multiqc", "namespace" : "qc", - "version" : "0.12.3", + "version" : "0.12.4", "arguments" : [ { "type" : "file", @@ -193,9 +193,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/qc/multiqc", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/qc/multiqc/nextflow.config b/target/nextflow/qc/multiqc/nextflow.config index 92a640398ff..85ce6d27df4 100644 --- a/target/nextflow/qc/multiqc/nextflow.config +++ b/target/nextflow/qc/multiqc/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'multiqc' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'MultiQC aggregates results from bioinformatics analyses across many samples into a single report.\nIt searches a given directory for analysis logs and compiles a HTML report. It\'s a general use tool, perfect for summarising the output from numerous bioinformatics tools.\n' } diff --git a/target/nextflow/qc/multiqc/nextflow_schema.json b/target/nextflow/qc/multiqc/nextflow_schema.json index 5616bd74a8e..0e0dd93540b 100644 --- a/target/nextflow/qc/multiqc/nextflow_schema.json +++ b/target/nextflow/qc/multiqc/nextflow_schema.json @@ -1,58 +1,81 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "multiqc", - "description": "MultiQC aggregates results from bioinformatics analyses across many samples into a single report.\nIt searches a given directory for analysis logs and compiles a HTML report. It\u0027s a general use tool, perfect for summarising the output from numerous bioinformatics tools.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "multiqc", +"description": "MultiQC aggregates results from bioinformatics analyses across many samples into a single report.\nIt searches a given directory for analysis logs and compiles a HTML report. It\u0027s a general use tool, perfect for summarising the output from numerous bioinformatics tools.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: List of `file`, required, example: `input.txt`, multiple_sep: `\":\"`. Inputs for MultiQC", - "help_text": "Type: List of `file`, required, example: `input.txt`, multiple_sep: `\":\"`. Inputs for MultiQC." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `report`. Create report in the specified output directory", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `report`. Create report in the specified output directory.", - "default": "$id.$key.output.output" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: List of `file`, required, example: `input.txt`, multiple_sep: `\":\"`. Inputs for MultiQC", + "help_text": "Type: List of `file`, required, example: `input.txt`, multiple_sep: `\":\"`. Inputs for MultiQC." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `report`. Create report in the specified output directory", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `report`. Create report in the specified output directory." + , + "default": "$id.$key.output.output" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/query/cellxgene_census/.config.vsh.yaml b/target/nextflow/query/cellxgene_census/.config.vsh.yaml index 7ea63f9e06b..7dccd30c5af 100644 --- a/target/nextflow/query/cellxgene_census/.config.vsh.yaml +++ b/target/nextflow/query/cellxgene_census/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "cellxgene_census" namespace: "query" - version: "0.12.3" + version: "0.12.4" authors: - name: "Matthias Beyens" info: @@ -255,6 +255,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/query/cellxgene_census" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/query/cellxgene_census/cellxgene_census" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/query/cellxgene_census/main.nf b/target/nextflow/query/cellxgene_census/main.nf index 0ad390f627a..dc6eeb5c733 100644 --- a/target/nextflow/query/cellxgene_census/main.nf +++ b/target/nextflow/query/cellxgene_census/main.nf @@ -1,4 +1,4 @@ -// cellxgene_census 0.12.3 +// cellxgene_census 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "cellxgene_census", "namespace" : "query", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Matthias Beyens", @@ -353,9 +353,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/query/cellxgene_census", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/query/cellxgene_census/nextflow.config b/target/nextflow/query/cellxgene_census/nextflow.config index a0e0d0280f1..8b1b584f334 100644 --- a/target/nextflow/query/cellxgene_census/nextflow.config +++ b/target/nextflow/query/cellxgene_census/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'cellxgene_census' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Query CellxGene Census or user-specified TileDBSoma object, and eventually fetch cell and gene metadata or/and expression counts.' author = 'Matthias Beyens, Dries De Maeyer' } diff --git a/target/nextflow/query/cellxgene_census/nextflow_schema.json b/target/nextflow/query/cellxgene_census/nextflow_schema.json index bdee1313566..40eef071b53 100644 --- a/target/nextflow/query/cellxgene_census/nextflow_schema.json +++ b/target/nextflow/query/cellxgene_census/nextflow_schema.json @@ -1,130 +1,187 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "cellxgene_census", - "description": "Query CellxGene Census or user-specified TileDBSoma object, and eventually fetch cell and gene metadata or/and expression counts.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "cellxgene_census", +"description": "Query CellxGene Census or user-specified TileDBSoma object, and eventually fetch cell and gene metadata or/and expression counts.", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "Arguments related to the input (aka query) dataset.", - "properties": { - - "input_database": { - "type": "string", - "description": "Type: `string`, default: `CellxGene`, example: `s3://`. Full input database S3 prefix URL", - "help_text": "Type: `string`, default: `CellxGene`, example: `s3://`. Full input database S3 prefix URL. Default: CellxGene Census", - "default": "CellxGene" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. Which modality to store the output in", - "help_text": "Type: `string`, default: `rna`. Which modality to store the output in.", - "default": "rna" - }, - - "cellxgene_release": { - "type": "string", - "description": "Type: `string`, default: `2023-05-15`. CellxGene Census release date", - "help_text": "Type: `string`, default: `2023-05-15`. CellxGene Census release date. More information: https://chanzuckerberg.github.io/cellxgene-census/cellxgene_census_docsite_data_release_info.html", - "default": "2023-05-15" - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "Output arguments.", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", - "enum": ["gzip", "lzf"] + "description": "Arguments related to the input (aka query) dataset.", + "properties": { + + + "input_database": { + "type": + "string", + "description": "Type: `string`, default: `CellxGene`, example: `s3://`. Full input database S3 prefix URL", + "help_text": "Type: `string`, default: `CellxGene`, example: `s3://`. Full input database S3 prefix URL. Default: CellxGene Census" + , + "default": "CellxGene" + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. Which modality to store the output in", + "help_text": "Type: `string`, default: `rna`. Which modality to store the output in." + , + "default": "rna" + } + + + , + "cellxgene_release": { + "type": + "string", + "description": "Type: `string`, default: `2023-05-15`. CellxGene Census release date", + "help_text": "Type: `string`, default: `2023-05-15`. CellxGene Census release date. More information: https://chanzuckerberg.github.io/cellxgene-census/cellxgene_census_docsite_data_release_info.html" + , + "default": "2023-05-15" + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "Output arguments.", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`, example: `output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. ", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "query" : { + "title": "Query", + "type": "object", + "description": "Arguments related to the query.", + "properties": { + + + "species": { + "type": + "string", + "description": "Type: `string`, default: `homo_sapiens`, example: `homo_sapiens`, choices: ``homo_sapiens`, `mus_musculus``. Specie(s) of interest", + "help_text": "Type: `string`, default: `homo_sapiens`, example: `homo_sapiens`, choices: ``homo_sapiens`, `mus_musculus``. Specie(s) of interest. If not specified, Homo Sapiens will be queried.", + "enum": ["homo_sapiens", "mus_musculus"] - } - - } - }, - "query" : { - "title": "Query", - "type": "object", - "description": "Arguments related to the query.", - "properties": { - - "species": { - "type": "string", - "description": "Type: `string`, default: `homo_sapiens`, example: `homo_sapiens`, choices: ``homo_sapiens`, `mus_musculus``. Specie(s) of interest", - "help_text": "Type: `string`, default: `homo_sapiens`, example: `homo_sapiens`, choices: ``homo_sapiens`, `mus_musculus``. Specie(s) of interest. If not specified, Homo Sapiens will be queried.", - "enum": ["homo_sapiens", "mus_musculus"] , - "default": "homo_sapiens" - }, - - "cell_query": { - "type": "string", - "description": "Type: `string`, example: `is_primary_data == True and cell_type_ontology_term_id in [\u0027CL:0000136\u0027, \u0027CL:1000311\u0027, \u0027CL:0002616\u0027] and suspension_type == \u0027cell\u0027`. The query for selecting the cells as defined by the cellxgene census schema", - "help_text": "Type: `string`, example: `is_primary_data == True and cell_type_ontology_term_id in [\u0027CL:0000136\u0027, \u0027CL:1000311\u0027, \u0027CL:0002616\u0027] and suspension_type == \u0027cell\u0027`. The query for selecting the cells as defined by the cellxgene census schema." - }, - - "cells_filter_columns": { - "type": "string", - "description": "Type: List of `string`, example: `dataset_id:tissue:assay:disease:cell_type`, multiple_sep: `\":\"`. The query for selecting the cells as defined by the cellxgene census schema", - "help_text": "Type: List of `string`, example: `dataset_id:tissue:assay:disease:cell_type`, multiple_sep: `\":\"`. The query for selecting the cells as defined by the cellxgene census schema." - }, - - "min_cells_filter_columns": { - "type": "number", - "description": "Type: `double`, example: `100`. Minimum of amount of summed cells_filter_columns cells", - "help_text": "Type: `double`, example: `100`. Minimum of amount of summed cells_filter_columns cells" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "homo_sapiens" + } + + + , + "cell_query": { + "type": + "string", + "description": "Type: `string`, example: `is_primary_data == True and cell_type_ontology_term_id in [\u0027CL:0000136\u0027, \u0027CL:1000311\u0027, \u0027CL:0002616\u0027] and suspension_type == \u0027cell\u0027`. The query for selecting the cells as defined by the cellxgene census schema", + "help_text": "Type: `string`, example: `is_primary_data == True and cell_type_ontology_term_id in [\u0027CL:0000136\u0027, \u0027CL:1000311\u0027, \u0027CL:0002616\u0027] and suspension_type == \u0027cell\u0027`. The query for selecting the cells as defined by the cellxgene census schema." + + } + + + , + "cells_filter_columns": { + "type": + "string", + "description": "Type: List of `string`, example: `dataset_id:tissue:assay:disease:cell_type`, multiple_sep: `\":\"`. The query for selecting the cells as defined by the cellxgene census schema", + "help_text": "Type: List of `string`, example: `dataset_id:tissue:assay:disease:cell_type`, multiple_sep: `\":\"`. The query for selecting the cells as defined by the cellxgene census schema." + + } + + + , + "min_cells_filter_columns": { + "type": + "number", + "description": "Type: `double`, example: `100`. Minimum of amount of summed cells_filter_columns cells", + "help_text": "Type: `double`, example: `100`. Minimum of amount of summed cells_filter_columns cells" + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/query" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/query" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/reference/build_bdrhap_reference/.config.vsh.yaml b/target/nextflow/reference/build_bdrhap_reference/.config.vsh.yaml index 7e87683edd5..f9850d9ec6f 100644 --- a/target/nextflow/reference/build_bdrhap_reference/.config.vsh.yaml +++ b/target/nextflow/reference/build_bdrhap_reference/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "build_bdrhap_reference" namespace: "reference" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -181,6 +181,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/reference/build_bdrhap_reference" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/reference/build_bdrhap_reference/build_bdrhap_reference" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/reference/build_bdrhap_reference/main.nf b/target/nextflow/reference/build_bdrhap_reference/main.nf index 0135dd2ac1a..b657f52f956 100644 --- a/target/nextflow/reference/build_bdrhap_reference/main.nf +++ b/target/nextflow/reference/build_bdrhap_reference/main.nf @@ -1,4 +1,4 @@ -// build_bdrhap_reference 0.12.3 +// build_bdrhap_reference 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "build_bdrhap_reference", "namespace" : "reference", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -267,9 +267,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/reference/build_bdrhap_reference", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/reference/build_bdrhap_reference/nextflow.config b/target/nextflow/reference/build_bdrhap_reference/nextflow.config index 833454546b5..0aa24e8d240 100644 --- a/target/nextflow/reference/build_bdrhap_reference/nextflow.config +++ b/target/nextflow/reference/build_bdrhap_reference/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'build_bdrhap_reference' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Compile a reference into a STAR index compatible with the BD Rhapsody pipeline.' author = 'Angela Oliveira Pisco, Robrecht Cannoodt' } diff --git a/target/nextflow/reference/build_bdrhap_reference/nextflow_schema.json b/target/nextflow/reference/build_bdrhap_reference/nextflow_schema.json index e3f0578f9ac..c1aa06c35ea 100644 --- a/target/nextflow/reference/build_bdrhap_reference/nextflow_schema.json +++ b/target/nextflow/reference/build_bdrhap_reference/nextflow_schema.json @@ -1,64 +1,91 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "build_bdrhap_reference", - "description": "Compile a reference into a STAR index compatible with the BD Rhapsody pipeline.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "build_bdrhap_reference", +"description": "Compile a reference into a STAR index compatible with the BD Rhapsody pipeline.", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "genome_fasta": { - "type": "string", - "description": "Type: `file`, required, example: `genome_sequence.fa.gz`. Reference genome fasta", - "help_text": "Type: `file`, required, example: `genome_sequence.fa.gz`. Reference genome fasta." - }, - - "transcriptome_gtf": { - "type": "string", - "description": "Type: `file`, required, example: `transcriptome_annotation.gtf.gz`. Reference transcriptome annotation", - "help_text": "Type: `file`, required, example: `transcriptome_annotation.gtf.gz`. Reference transcriptome annotation." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.gz`, example: `star_index.tar.gz`. Star index", - "help_text": "Type: `file`, required, default: `$id.$key.output.gz`, example: `star_index.tar.gz`. Star index", - "default": "$id.$key.output.gz" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "genome_fasta": { + "type": + "string", + "description": "Type: `file`, required, example: `genome_sequence.fa.gz`. Reference genome fasta", + "help_text": "Type: `file`, required, example: `genome_sequence.fa.gz`. Reference genome fasta." + + } + + + , + "transcriptome_gtf": { + "type": + "string", + "description": "Type: `file`, required, example: `transcriptome_annotation.gtf.gz`. Reference transcriptome annotation", + "help_text": "Type: `file`, required, example: `transcriptome_annotation.gtf.gz`. Reference transcriptome annotation." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.gz`, example: `star_index.tar.gz`. Star index", + "help_text": "Type: `file`, required, default: `$id.$key.output.gz`, example: `star_index.tar.gz`. Star index" + , + "default": "$id.$key.output.gz" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/reference/build_cellranger_reference/.config.vsh.yaml b/target/nextflow/reference/build_cellranger_reference/.config.vsh.yaml index c37de090499..4fb149e6340 100644 --- a/target/nextflow/reference/build_cellranger_reference/.config.vsh.yaml +++ b/target/nextflow/reference/build_cellranger_reference/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "build_cellranger_reference" namespace: "reference" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -182,6 +182,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/reference/build_cellranger_reference" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/reference/build_cellranger_reference/build_cellranger_reference" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/reference/build_cellranger_reference/main.nf b/target/nextflow/reference/build_cellranger_reference/main.nf index 50b7ce7c806..b56982f534e 100644 --- a/target/nextflow/reference/build_cellranger_reference/main.nf +++ b/target/nextflow/reference/build_cellranger_reference/main.nf @@ -1,4 +1,4 @@ -// build_cellranger_reference 0.12.3 +// build_cellranger_reference 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "build_cellranger_reference", "namespace" : "reference", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -268,9 +268,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/reference/build_cellranger_reference", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/reference/build_cellranger_reference/nextflow.config b/target/nextflow/reference/build_cellranger_reference/nextflow.config index 3fd391dcb12..9418dcf3275 100644 --- a/target/nextflow/reference/build_cellranger_reference/nextflow.config +++ b/target/nextflow/reference/build_cellranger_reference/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'build_cellranger_reference' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Build a Cell Ranger-compatible reference folder from user-supplied genome FASTA and gene GTF files. Creates a new folder named after the genome.' author = 'Angela Oliveira Pisco, Robrecht Cannoodt' } diff --git a/target/nextflow/reference/build_cellranger_reference/nextflow_schema.json b/target/nextflow/reference/build_cellranger_reference/nextflow_schema.json index 2dfc3c92efc..1641ad1b08c 100644 --- a/target/nextflow/reference/build_cellranger_reference/nextflow_schema.json +++ b/target/nextflow/reference/build_cellranger_reference/nextflow_schema.json @@ -1,64 +1,91 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "build_cellranger_reference", - "description": "Build a Cell Ranger-compatible reference folder from user-supplied genome FASTA and gene GTF files. Creates a new folder named after the genome.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "build_cellranger_reference", +"description": "Build a Cell Ranger-compatible reference folder from user-supplied genome FASTA and gene GTF files. Creates a new folder named after the genome.", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "genome_fasta": { - "type": "string", - "description": "Type: `file`, required, example: `genome_sequence.fa.gz`. Reference genome fasta", - "help_text": "Type: `file`, required, example: `genome_sequence.fa.gz`. Reference genome fasta." - }, - - "transcriptome_gtf": { - "type": "string", - "description": "Type: `file`, required, example: `transcriptome_annotation.gtf.gz`. Reference transcriptome annotation", - "help_text": "Type: `file`, required, example: `transcriptome_annotation.gtf.gz`. Reference transcriptome annotation." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `cellranger_reference`. Output folder", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `cellranger_reference`. Output folder", - "default": "$id.$key.output.output" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "genome_fasta": { + "type": + "string", + "description": "Type: `file`, required, example: `genome_sequence.fa.gz`. Reference genome fasta", + "help_text": "Type: `file`, required, example: `genome_sequence.fa.gz`. Reference genome fasta." + + } + + + , + "transcriptome_gtf": { + "type": + "string", + "description": "Type: `file`, required, example: `transcriptome_annotation.gtf.gz`. Reference transcriptome annotation", + "help_text": "Type: `file`, required, example: `transcriptome_annotation.gtf.gz`. Reference transcriptome annotation." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`, example: `cellranger_reference`. Output folder", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`, example: `cellranger_reference`. Output folder" + , + "default": "$id.$key.output.output" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/reference/make_reference/.config.vsh.yaml b/target/nextflow/reference/make_reference/.config.vsh.yaml index ffa4fde4afc..8923203c593 100644 --- a/target/nextflow/reference/make_reference/.config.vsh.yaml +++ b/target/nextflow/reference/make_reference/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "make_reference" namespace: "reference" - version: "0.12.3" + version: "0.12.4" authors: - name: "Angela Oliveira Pisco" roles: @@ -207,6 +207,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/reference/make_reference" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/reference/make_reference/make_reference" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/reference/make_reference/main.nf b/target/nextflow/reference/make_reference/main.nf index d915b934e2c..84c4e0164ed 100644 --- a/target/nextflow/reference/make_reference/main.nf +++ b/target/nextflow/reference/make_reference/main.nf @@ -1,4 +1,4 @@ -// make_reference 0.12.3 +// make_reference 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "make_reference", "namespace" : "reference", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Angela Oliveira Pisco", @@ -286,9 +286,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/reference/make_reference", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/reference/make_reference/nextflow.config b/target/nextflow/reference/make_reference/nextflow.config index e525a0ce00d..efc17f8d31b 100644 --- a/target/nextflow/reference/make_reference/nextflow.config +++ b/target/nextflow/reference/make_reference/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'make_reference' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Preprocess and build a transcriptome reference.\n\nExample input files are:\n - `genome_fasta`: https://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_41/GRCh38.primary_assembly.genome.fa.gz\n - `transcriptome_gtf`: https://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_41/gencode.v41.annotation.gtf.gz\n - `ercc`: https://assets.thermofisher.com/TFS-Assets/LSG/manuals/ERCC92.zip\n' author = 'Angela Oliveira Pisco, Robrecht Cannoodt' } diff --git a/target/nextflow/reference/make_reference/nextflow_schema.json b/target/nextflow/reference/make_reference/nextflow_schema.json index b960a369176..f0e22993ef6 100644 --- a/target/nextflow/reference/make_reference/nextflow_schema.json +++ b/target/nextflow/reference/make_reference/nextflow_schema.json @@ -1,83 +1,122 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "make_reference", - "description": "Preprocess and build a transcriptome reference.\n\nExample input files are:\n - `genome_fasta`: https://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_41/GRCh38.primary_assembly.genome.fa.gz\n - `transcriptome_gtf`: https://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_41/gencode.v41.annotation.gtf.gz\n - `ercc`: https://assets.thermofisher.com/TFS-Assets/LSG/manuals/ERCC92.zip\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "make_reference", +"description": "Preprocess and build a transcriptome reference.\n\nExample input files are:\n - `genome_fasta`: https://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_41/GRCh38.primary_assembly.genome.fa.gz\n - `transcriptome_gtf`: https://ftp.ebi.ac.uk/pub/databases/gencode/Gencode_human/release_41/gencode.v41.annotation.gtf.gz\n - `ercc`: https://assets.thermofisher.com/TFS-Assets/LSG/manuals/ERCC92.zip\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "genome_fasta": { - "type": "string", - "description": "Type: `file`, required, example: `genome_fasta.fa.gz`. Reference genome fasta", - "help_text": "Type: `file`, required, example: `genome_fasta.fa.gz`. Reference genome fasta. Example: " - }, - - "transcriptome_gtf": { - "type": "string", - "description": "Type: `file`, required, example: `transcriptome.gtf.gz`. Reference transcriptome annotation", - "help_text": "Type: `file`, required, example: `transcriptome.gtf.gz`. Reference transcriptome annotation." - }, - - "ercc": { - "type": "string", - "description": "Type: `file`, example: `ercc.zip`. ERCC sequence and annotation file", - "help_text": "Type: `file`, example: `ercc.zip`. ERCC sequence and annotation file." - }, - - "subset_regex": { - "type": "string", - "description": "Type: `string`, example: `(ERCC-00002|chr1)`. Will subset the reference chromosomes using the given regex", - "help_text": "Type: `string`, example: `(ERCC-00002|chr1)`. Will subset the reference chromosomes using the given regex." - }, - - "output_fasta": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output_fasta.gz`, example: `genome_sequence.fa.gz`. Output genome sequence fasta", - "help_text": "Type: `file`, required, default: `$id.$key.output_fasta.gz`, example: `genome_sequence.fa.gz`. Output genome sequence fasta.", - "default": "$id.$key.output_fasta.gz" - }, - - "output_gtf": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output_gtf.gz`, example: `transcriptome_annotation.gtf.gz`. Output transcriptome annotation gtf", - "help_text": "Type: `file`, required, default: `$id.$key.output_gtf.gz`, example: `transcriptome_annotation.gtf.gz`. Output transcriptome annotation gtf.", - "default": "$id.$key.output_gtf.gz" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "genome_fasta": { + "type": + "string", + "description": "Type: `file`, required, example: `genome_fasta.fa.gz`. Reference genome fasta", + "help_text": "Type: `file`, required, example: `genome_fasta.fa.gz`. Reference genome fasta. Example: " + + } + + + , + "transcriptome_gtf": { + "type": + "string", + "description": "Type: `file`, required, example: `transcriptome.gtf.gz`. Reference transcriptome annotation", + "help_text": "Type: `file`, required, example: `transcriptome.gtf.gz`. Reference transcriptome annotation." + + } + + + , + "ercc": { + "type": + "string", + "description": "Type: `file`, example: `ercc.zip`. ERCC sequence and annotation file", + "help_text": "Type: `file`, example: `ercc.zip`. ERCC sequence and annotation file." + + } + + + , + "subset_regex": { + "type": + "string", + "description": "Type: `string`, example: `(ERCC-00002|chr1)`. Will subset the reference chromosomes using the given regex", + "help_text": "Type: `string`, example: `(ERCC-00002|chr1)`. Will subset the reference chromosomes using the given regex." + + } + + + , + "output_fasta": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output_fasta.gz`, example: `genome_sequence.fa.gz`. Output genome sequence fasta", + "help_text": "Type: `file`, required, default: `$id.$key.output_fasta.gz`, example: `genome_sequence.fa.gz`. Output genome sequence fasta." + , + "default": "$id.$key.output_fasta.gz" + } + + + , + "output_gtf": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output_gtf.gz`, example: `transcriptome_annotation.gtf.gz`. Output transcriptome annotation gtf", + "help_text": "Type: `file`, required, default: `$id.$key.output_gtf.gz`, example: `transcriptome_annotation.gtf.gz`. Output transcriptome annotation gtf." + , + "default": "$id.$key.output_gtf.gz" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/report/mermaid/.config.vsh.yaml b/target/nextflow/report/mermaid/.config.vsh.yaml index 8f07f821e35..29c0e241526 100644 --- a/target/nextflow/report/mermaid/.config.vsh.yaml +++ b/target/nextflow/report/mermaid/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "mermaid" namespace: "report" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -180,6 +180,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/report/mermaid" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/report/mermaid/mermaid" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/report/mermaid/main.nf b/target/nextflow/report/mermaid/main.nf index 81597f72fb1..b06fc6a7aab 100644 --- a/target/nextflow/report/mermaid/main.nf +++ b/target/nextflow/report/mermaid/main.nf @@ -1,4 +1,4 @@ -// mermaid 0.12.3 +// mermaid 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "mermaid", "namespace" : "report", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -254,9 +254,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/report/mermaid", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/report/mermaid/nextflow.config b/target/nextflow/report/mermaid/nextflow.config index a2788a9845e..d95905ffb16 100644 --- a/target/nextflow/report/mermaid/nextflow.config +++ b/target/nextflow/report/mermaid/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'mermaid' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Generates a network from mermaid code.\n' author = 'Dries De Maeyer' } diff --git a/target/nextflow/report/mermaid/nextflow_schema.json b/target/nextflow/report/mermaid/nextflow_schema.json index 9117514aa29..3ccef175850 100644 --- a/target/nextflow/report/mermaid/nextflow_schema.json +++ b/target/nextflow/report/mermaid/nextflow_schema.json @@ -1,87 +1,126 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "mermaid", - "description": "Generates a network from mermaid code.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "mermaid", +"description": "Generates a network from mermaid code.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Input directory", - "help_text": "Type: `file`, required. Input directory" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. Generated network as output", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Generated network as output.", - "default": "$id.$key.output.output" - }, - - "output_format": { - "type": "string", - "description": "Type: `string`, choices: ``svg`, `png`, `pdf``. Output format for the generated image", - "help_text": "Type: `string`, choices: ``svg`, `png`, `pdf``. Output format for the generated image. By default will be inferred from the extension \nof the file specified with --output.\n", - "enum": ["svg", "png", "pdf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Input directory", + "help_text": "Type: `file`, required. Input directory" - }, - - "width": { - "type": "integer", - "description": "Type: `integer`, default: `800`. Width of the page", - "help_text": "Type: `integer`, default: `800`. Width of the page", - "default": "800" - }, - - "height": { - "type": "integer", - "description": "Type: `integer`, default: `600`. Height of the page", - "help_text": "Type: `integer`, default: `600`. Height of the page", - "default": "600" - }, - - "background_color": { - "type": "string", - "description": "Type: `string`, default: `white`, example: `#F0F0F0`. Background color for pngs/svgs (not pdfs)", - "help_text": "Type: `string`, default: `white`, example: `#F0F0F0`. Background color for pngs/svgs (not pdfs)", - "default": "white" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. Generated network as output", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Generated network as output." + , + "default": "$id.$key.output.output" + } + + + , + "output_format": { + "type": + "string", + "description": "Type: `string`, choices: ``svg`, `png`, `pdf``. Output format for the generated image", + "help_text": "Type: `string`, choices: ``svg`, `png`, `pdf``. Output format for the generated image. By default will be inferred from the extension \nof the file specified with --output.\n", + "enum": ["svg", "png", "pdf"] + + + } + + + , + "width": { + "type": + "integer", + "description": "Type: `integer`, default: `800`. Width of the page", + "help_text": "Type: `integer`, default: `800`. Width of the page" + , + "default": "800" + } + + + , + "height": { + "type": + "integer", + "description": "Type: `integer`, default: `600`. Height of the page", + "help_text": "Type: `integer`, default: `600`. Height of the page" + , + "default": "600" + } + + + , + "background_color": { + "type": + "string", + "description": "Type: `string`, default: `white`, example: `#F0F0F0`. Background color for pngs/svgs (not pdfs)", + "help_text": "Type: `string`, default: `white`, example: `#F0F0F0`. Background color for pngs/svgs (not pdfs)" + , + "default": "white" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/transfer/publish/.config.vsh.yaml b/target/nextflow/transfer/publish/.config.vsh.yaml index 94bb9755a43..099a2122b8d 100644 --- a/target/nextflow/transfer/publish/.config.vsh.yaml +++ b/target/nextflow/transfer/publish/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "publish" namespace: "transfer" - version: "0.12.3" + version: "0.12.4" authors: - name: "Toni Verbeiren" roles: @@ -120,6 +120,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transfer/publish" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transfer/publish/publish" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/transfer/publish/main.nf b/target/nextflow/transfer/publish/main.nf index 354f635ef87..8caa1b903b7 100644 --- a/target/nextflow/transfer/publish/main.nf +++ b/target/nextflow/transfer/publish/main.nf @@ -1,4 +1,4 @@ -// publish 0.12.3 +// publish 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "publish", "namespace" : "transfer", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Toni Verbeiren", @@ -176,9 +176,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/transfer/publish", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/transfer/publish/nextflow.config b/target/nextflow/transfer/publish/nextflow.config index e5d69ba26da..e90f5f6cc05 100644 --- a/target/nextflow/transfer/publish/nextflow.config +++ b/target/nextflow/transfer/publish/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'publish' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Publish an artifact and optionally rename with parameters' author = 'Toni Verbeiren' } diff --git a/target/nextflow/transfer/publish/nextflow_schema.json b/target/nextflow/transfer/publish/nextflow_schema.json index 697c42bed79..22417228cd8 100644 --- a/target/nextflow/transfer/publish/nextflow_schema.json +++ b/target/nextflow/transfer/publish/nextflow_schema.json @@ -1,58 +1,81 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "publish", - "description": "Publish an artifact and optionally rename with parameters", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "publish", +"description": "Publish an artifact and optionally rename with parameters", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Input filename", - "help_text": "Type: `file`, required. Input filename" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. Output filename", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output filename", - "default": "$id.$key.output.output" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Input filename", + "help_text": "Type: `file`, required. Input filename" + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. Output filename", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output filename" + , + "default": "$id.$key.output.output" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/transform/clr/.config.vsh.yaml b/target/nextflow/transform/clr/.config.vsh.yaml index 7d983156f1f..d13d5d690e7 100644 --- a/target/nextflow/transform/clr/.config.vsh.yaml +++ b/target/nextflow/transform/clr/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "clr" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -183,6 +183,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/clr" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/clr/clr" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/transform/clr/main.nf b/target/nextflow/transform/clr/main.nf index fbacb5d71a7..270b8588416 100644 --- a/target/nextflow/transform/clr/main.nf +++ b/target/nextflow/transform/clr/main.nf @@ -1,4 +1,4 @@ -// clr 0.12.3 +// clr 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "clr", "namespace" : "transform", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -262,9 +262,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/clr", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/transform/clr/nextflow.config b/target/nextflow/transform/clr/nextflow.config index 611553cd2c8..cc6a5a3cd21 100644 --- a/target/nextflow/transform/clr/nextflow.config +++ b/target/nextflow/transform/clr/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'clr' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Perform CLR normalization on CITE-seq data (Stoeckius et al., 2017).\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/transform/clr/nextflow_schema.json b/target/nextflow/transform/clr/nextflow_schema.json index 68adcf8557d..043ac6b8144 100644 --- a/target/nextflow/transform/clr/nextflow_schema.json +++ b/target/nextflow/transform/clr/nextflow_schema.json @@ -1,79 +1,114 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "clr", - "description": "Perform CLR normalization on CITE-seq data (Stoeckius et al., 2017).\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "clr", +"description": "Perform CLR normalization on CITE-seq data (Stoeckius et al., 2017).\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `prot`. ", - "help_text": "Type: `string`, default: `prot`. ", - "default": "prot" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "output_layer": { - "type": "string", - "description": "Type: `string`. Output layer to use", - "help_text": "Type: `string`. Output layer to use. By default, use X." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `prot`. ", + "help_text": "Type: `string`, default: `prot`. " + , + "default": "prot" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "output_layer": { + "type": + "string", + "description": "Type: `string`. Output layer to use", + "help_text": "Type: `string`. Output layer to use. By default, use X." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/transform/delete_layer/.config.vsh.yaml b/target/nextflow/transform/delete_layer/.config.vsh.yaml index c637b0deb48..5b6705e0e51 100644 --- a/target/nextflow/transform/delete_layer/.config.vsh.yaml +++ b/target/nextflow/transform/delete_layer/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "delete_layer" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -191,6 +191,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/delete_layer" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/delete_layer/delete_layer" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/transform/delete_layer/main.nf b/target/nextflow/transform/delete_layer/main.nf index bdd4cbb2e5b..a359b2ebc80 100644 --- a/target/nextflow/transform/delete_layer/main.nf +++ b/target/nextflow/transform/delete_layer/main.nf @@ -1,4 +1,4 @@ -// delete_layer 0.12.3 +// delete_layer 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "delete_layer", "namespace" : "transform", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -277,9 +277,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/delete_layer", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/transform/delete_layer/nextflow.config b/target/nextflow/transform/delete_layer/nextflow.config index 340a7bf206e..974fb132735 100644 --- a/target/nextflow/transform/delete_layer/nextflow.config +++ b/target/nextflow/transform/delete_layer/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'delete_layer' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Delete an anndata layer from one or more modalities.\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/transform/delete_layer/nextflow_schema.json b/target/nextflow/transform/delete_layer/nextflow_schema.json index 222b694010d..09e743fd401 100644 --- a/target/nextflow/transform/delete_layer/nextflow_schema.json +++ b/target/nextflow/transform/delete_layer/nextflow_schema.json @@ -1,86 +1,125 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "delete_layer", - "description": "Delete an anndata layer from one or more modalities.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "delete_layer", +"description": "Delete an anndata layer from one or more modalities.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "layer": { - "type": "string", - "description": "Type: List of `string`, required, multiple_sep: `\":\"`. Input layer to remove", - "help_text": "Type: List of `string`, required, multiple_sep: `\":\"`. Input layer to remove" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "missing_ok": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Do not raise an error if the layer does not exist for all modalities", - "help_text": "Type: `boolean_true`, default: `false`. Do not raise an error if the layer does not exist for all modalities.", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "layer": { + "type": + "string", + "description": "Type: List of `string`, required, multiple_sep: `\":\"`. Input layer to remove", + "help_text": "Type: List of `string`, required, multiple_sep: `\":\"`. Input layer to remove" + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "missing_ok": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Do not raise an error if the layer does not exist for all modalities", + "help_text": "Type: `boolean_true`, default: `false`. Do not raise an error if the layer does not exist for all modalities." + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/transform/log1p/.config.vsh.yaml b/target/nextflow/transform/log1p/.config.vsh.yaml index 6dfef8173a4..735bf74a31b 100644 --- a/target/nextflow/transform/log1p/.config.vsh.yaml +++ b/target/nextflow/transform/log1p/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "log1p" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -220,6 +220,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/log1p" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/log1p/log1p" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/transform/log1p/main.nf b/target/nextflow/transform/log1p/main.nf index 018e33275f8..8200de683ba 100644 --- a/target/nextflow/transform/log1p/main.nf +++ b/target/nextflow/transform/log1p/main.nf @@ -1,4 +1,4 @@ -// log1p 0.12.3 +// log1p 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "log1p", "namespace" : "transform", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -315,9 +315,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/log1p", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) @@ -326,6 +326,7 @@ tempscript=".viash_script.sh" cat > "$tempscript" << VIASHMAIN import scanpy as sc import mudata as mu +import anndata as ad import sys ## VIASH START @@ -382,12 +383,24 @@ mdata.var_names_make_unique() mod = par["modality"] logger.info("Performing log transformation on modality %s", mod) data = mdata.mod[mod] -new_layer = sc.pp.log1p(data, - base=par["base"], - copy=True if par['output_layer'] else False) -if new_layer: - data.layers[par['output_layer']] = new_layer.X - data.uns['log1p'] = new_layer.uns['log1p'] + +# Make our own copy with not a lot of data +# this avoid excessive memory usage and accidental overwrites +input_layer = data.layers[par["input_layer"]] \\\\ + if par["input_layer"] else data.X +data_for_scanpy = ad.AnnData(X=input_layer.copy()) +sc.pp.log1p(data_for_scanpy, + base=par["base"], + layer=None, # use X + copy=False) # allow overwrites in the copy that was made + +# Scanpy will overwrite the input layer. +# So fetch input layer from the copy and use it to populate the output slot +if par["output_layer"]: + data.layers[par["output_layer"]] = data_for_scanpy.X +else: + data.X = data_for_scanpy.X +data.uns['log1p'] = data_for_scanpy.uns['log1p'].copy() logger.info("Writing to file %s", par["output"]) mdata.write_h5mu(filename=par["output"], compression=par["output_compression"]) diff --git a/target/nextflow/transform/log1p/nextflow.config b/target/nextflow/transform/log1p/nextflow.config index 5993fe87207..968128e5b75 100644 --- a/target/nextflow/transform/log1p/nextflow.config +++ b/target/nextflow/transform/log1p/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'log1p' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Logarithmize the data matrix. Computes X = log(X + 1), where log denotes the natural logarithm unless a different base is given.\n' author = 'Dries De Maeyer, Robrecht Cannoodt' } diff --git a/target/nextflow/transform/log1p/nextflow_schema.json b/target/nextflow/transform/log1p/nextflow_schema.json index 0e2fde9b00d..b05824b7fbb 100644 --- a/target/nextflow/transform/log1p/nextflow_schema.json +++ b/target/nextflow/transform/log1p/nextflow_schema.json @@ -1,91 +1,134 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "log1p", - "description": "Logarithmize the data matrix. Computes X = log(X + 1), where log denotes the natural logarithm unless a different base is given.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "log1p", +"description": "Logarithmize the data matrix. Computes X = log(X + 1), where log denotes the natural logarithm unless a different base is given.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "input_layer": { - "type": "string", - "description": "Type: `string`. Input layer to use", - "help_text": "Type: `string`. Input layer to use. If None, X is normalized" - }, - - "output_layer": { - "type": "string", - "description": "Type: `string`. Output layer to use", - "help_text": "Type: `string`. Output layer to use. By default, use X." - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "base": { - "type": "number", - "description": "Type: `double`, example: `2`. ", - "help_text": "Type: `double`, example: `2`. " - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "input_layer": { + "type": + "string", + "description": "Type: `string`. Input layer to use", + "help_text": "Type: `string`. Input layer to use. If None, X is normalized" + + } + + + , + "output_layer": { + "type": + "string", + "description": "Type: `string`. Output layer to use", + "help_text": "Type: `string`. Output layer to use. By default, use X." + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "base": { + "type": + "number", + "description": "Type: `double`, example: `2`. ", + "help_text": "Type: `double`, example: `2`. " + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/transform/normalize_total/.config.vsh.yaml b/target/nextflow/transform/normalize_total/.config.vsh.yaml index f23f99b8927..9f6bb962f90 100644 --- a/target/nextflow/transform/normalize_total/.config.vsh.yaml +++ b/target/nextflow/transform/normalize_total/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "normalize_total" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries De Maeyer" roles: @@ -237,6 +237,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/normalize_total" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/normalize_total/normalize_total" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/transform/normalize_total/main.nf b/target/nextflow/transform/normalize_total/main.nf index 3b2ce549a7b..5f3c0d1859c 100644 --- a/target/nextflow/transform/normalize_total/main.nf +++ b/target/nextflow/transform/normalize_total/main.nf @@ -1,4 +1,4 @@ -// normalize_total 0.12.3 +// normalize_total 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -28,7 +28,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "normalize_total", "namespace" : "transform", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries De Maeyer", @@ -324,9 +324,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/normalize_total", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/transform/normalize_total/nextflow.config b/target/nextflow/transform/normalize_total/nextflow.config index b6d6189bb28..204717adda0 100644 --- a/target/nextflow/transform/normalize_total/nextflow.config +++ b/target/nextflow/transform/normalize_total/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'normalize_total' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Normalize counts per cell.\n\nNormalize each cell by total counts over all genes, so that every cell has the same total count after normalization. If choosing target_sum=1e6, this is CPM normalization.\n\nIf exclude_highly_expressed=True, very highly expressed genes are excluded from the computation of the normalization factor (size factor) for each cell. This is meaningful as these can strongly influence the resulting normalized values for all other genes [Weinreb17].\n' author = 'Dries De Maeyer, Robrecht Cannoodt' } diff --git a/target/nextflow/transform/normalize_total/nextflow_schema.json b/target/nextflow/transform/normalize_total/nextflow_schema.json index bce523cd7f3..9a1389e85dc 100644 --- a/target/nextflow/transform/normalize_total/nextflow_schema.json +++ b/target/nextflow/transform/normalize_total/nextflow_schema.json @@ -1,99 +1,146 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "normalize_total", - "description": "Normalize counts per cell.\n\nNormalize each cell by total counts over all genes, so that every cell has the same total count after normalization. If choosing target_sum=1e6, this is CPM normalization.\n\nIf exclude_highly_expressed=True, very highly expressed genes are excluded from the computation of the normalization factor (size factor) for each cell. This is meaningful as these can strongly influence the resulting normalized values for all other genes [Weinreb17].\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "normalize_total", +"description": "Normalize counts per cell.\n\nNormalize each cell by total counts over all genes, so that every cell has the same total count after normalization. If choosing target_sum=1e6, this is CPM normalization.\n\nIf exclude_highly_expressed=True, very highly expressed genes are excluded from the computation of the normalization factor (size factor) for each cell. This is meaningful as these can strongly influence the resulting normalized values for all other genes [Weinreb17].\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. ", - "help_text": "Type: `string`, default: `rna`. ", - "default": "rna" - }, - - "input_layer": { - "type": "string", - "description": "Type: `string`. Input layer to use", - "help_text": "Type: `string`. Input layer to use. By default, X is normalized" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "output_layer": { - "type": "string", - "description": "Type: `string`. Output layer to use", - "help_text": "Type: `string`. Output layer to use. By default, use X." - }, - - "target_sum": { - "type": "integer", - "description": "Type: `integer`, default: `10000`. If None, after normalization, each observation (cell) has a total count equal to the median of total counts for observations (cells) before normalization", - "help_text": "Type: `integer`, default: `10000`. If None, after normalization, each observation (cell) has a total count equal to the median of total counts for observations (cells) before normalization.", - "default": "10000" - }, - - "exclude_highly_expressed": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. Exclude (very) highly expressed genes for the computation of the normalization factor (size factor) for each cell", - "help_text": "Type: `boolean_true`, default: `false`. Exclude (very) highly expressed genes for the computation of the normalization factor (size factor) for each cell. A gene is considered highly expressed, if it has more than max_fraction of the total counts in at least one cell. The not-excluded genes will sum up to target_sum.", - "default": "False" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. ", + "help_text": "Type: `string`, default: `rna`. " + , + "default": "rna" + } + + + , + "input_layer": { + "type": + "string", + "description": "Type: `string`. Input layer to use", + "help_text": "Type: `string`. Input layer to use. By default, X is normalized" + + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "output_layer": { + "type": + "string", + "description": "Type: `string`. Output layer to use", + "help_text": "Type: `string`. Output layer to use. By default, use X." + + } + + + , + "target_sum": { + "type": + "integer", + "description": "Type: `integer`, default: `10000`. If None, after normalization, each observation (cell) has a total count equal to the median of total counts for observations (cells) before normalization", + "help_text": "Type: `integer`, default: `10000`. If None, after normalization, each observation (cell) has a total count equal to the median of total counts for observations (cells) before normalization." + , + "default": "10000" + } + + + , + "exclude_highly_expressed": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. Exclude (very) highly expressed genes for the computation of the normalization factor (size factor) for each cell", + "help_text": "Type: `boolean_true`, default: `false`. Exclude (very) highly expressed genes for the computation of the normalization factor (size factor) for each cell. A gene is considered highly expressed, if it has more than max_fraction of the total counts in at least one cell. The not-excluded genes will sum up to target_sum." + , + "default": "False" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/transform/regress_out/.config.vsh.yaml b/target/nextflow/transform/regress_out/.config.vsh.yaml index 9bffc20ea66..4fb5f0809e3 100644 --- a/target/nextflow/transform/regress_out/.config.vsh.yaml +++ b/target/nextflow/transform/regress_out/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "regress_out" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -190,6 +190,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/regress_out" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/regress_out/regress_out" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/transform/regress_out/main.nf b/target/nextflow/transform/regress_out/main.nf index 53de04a888f..f8fe1b348e9 100644 --- a/target/nextflow/transform/regress_out/main.nf +++ b/target/nextflow/transform/regress_out/main.nf @@ -1,4 +1,4 @@ -// regress_out 0.12.3 +// regress_out 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "regress_out", "namespace" : "transform", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -270,9 +270,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/regress_out", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/transform/regress_out/nextflow.config b/target/nextflow/transform/regress_out/nextflow.config index 36073fc822e..d23086c2fbb 100644 --- a/target/nextflow/transform/regress_out/nextflow.config +++ b/target/nextflow/transform/regress_out/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'regress_out' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Regress out (mostly) unwanted sources of variation.\nUses simple linear regression. This is inspired by Seurat\'s regressOut function in R [Satija15]. \nNote that this function tends to overcorrect in certain circumstances as described in issue theislab/scanpy#526.\nSee https://github.com/theislab/scanpy/issues/526.\n' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/transform/regress_out/nextflow_schema.json b/target/nextflow/transform/regress_out/nextflow_schema.json index 394303af4ac..5bc7d2c6cac 100644 --- a/target/nextflow/transform/regress_out/nextflow_schema.json +++ b/target/nextflow/transform/regress_out/nextflow_schema.json @@ -1,79 +1,114 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "regress_out", - "description": "Regress out (mostly) unwanted sources of variation.\nUses simple linear regression. This is inspired by Seurat\u0027s regressOut function in R [Satija15]. \nNote that this function tends to overcorrect in certain circumstances as described in issue theislab/scanpy#526.\nSee https://github.com/theislab/scanpy/issues/526.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "regress_out", +"description": "Regress out (mostly) unwanted sources of variation.\nUses simple linear regression. This is inspired by Seurat\u0027s regressOut function in R [Satija15]. \nNote that this function tends to overcorrect in certain circumstances as described in issue theislab/scanpy#526.\nSee https://github.com/theislab/scanpy/issues/526.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file" - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. Which modality (one or more) to run this component on", - "help_text": "Type: `string`, default: `rna`. Which modality (one or more) to run this component on.", - "default": "rna" - }, - - "obs_keys": { - "type": "string", - "description": "Type: List of `string`, multiple_sep: `\":\"`. Which ", - "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Which .obs keys to regress on." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. Which modality (one or more) to run this component on", + "help_text": "Type: `string`, default: `rna`. Which modality (one or more) to run this component on." + , + "default": "rna" + } + + + , + "obs_keys": { + "type": + "string", + "description": "Type: List of `string`, multiple_sep: `\":\"`. Which ", + "help_text": "Type: List of `string`, multiple_sep: `\":\"`. Which .obs keys to regress on." + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/transform/scale/.config.vsh.yaml b/target/nextflow/transform/scale/.config.vsh.yaml index 5a8ea4ffe27..28fd067b84c 100644 --- a/target/nextflow/transform/scale/.config.vsh.yaml +++ b/target/nextflow/transform/scale/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scale" namespace: "transform" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -200,6 +200,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/scale" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/scale/scale" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/transform/scale/main.nf b/target/nextflow/transform/scale/main.nf index c731506a950..868ca4bd15b 100644 --- a/target/nextflow/transform/scale/main.nf +++ b/target/nextflow/transform/scale/main.nf @@ -1,4 +1,4 @@ -// scale 0.12.3 +// scale 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "scale", "namespace" : "transform", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -285,9 +285,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/transform/scale", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/transform/scale/nextflow.config b/target/nextflow/transform/scale/nextflow.config index 600f084181c..74828936a99 100644 --- a/target/nextflow/transform/scale/nextflow.config +++ b/target/nextflow/transform/scale/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'scale' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Scale data to unit variance and zero mean.\n' author = 'Dries Schaumont' } diff --git a/target/nextflow/transform/scale/nextflow_schema.json b/target/nextflow/transform/scale/nextflow_schema.json index c7c845010eb..ef05bb5baf0 100644 --- a/target/nextflow/transform/scale/nextflow_schema.json +++ b/target/nextflow/transform/scale/nextflow_schema.json @@ -1,86 +1,125 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "scale", - "description": "Scale data to unit variance and zero mean.\n", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "scale", +"description": "Scale data to unit variance and zero mean.\n", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", - "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file." - }, - - "modality": { - "type": "string", - "description": "Type: `string`, default: `rna`. List of modalities to process", - "help_text": "Type: `string`, default: `rna`. List of modalities to process.", - "default": "rna" - }, - - "max_value": { - "type": "number", - "description": "Type: `double`. Clip (truncate) to this value after scaling", - "help_text": "Type: `double`. Clip (truncate) to this value after scaling. Does not clip by default." - }, - - "zero_center": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. If False, omit zero-centering variables, which allows to handle sparse input efficiently", - "help_text": "Type: `boolean`, default: `true`. If False, omit zero-centering variables, which allows to handle sparse input efficiently.", - "default": "True" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", - "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file.", - "default": "$id.$key.output.h5mu" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required, example: `input.h5mu`. Input h5mu file", + "help_text": "Type: `file`, required, example: `input.h5mu`. Input h5mu file." - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "modality": { + "type": + "string", + "description": "Type: `string`, default: `rna`. List of modalities to process", + "help_text": "Type: `string`, default: `rna`. List of modalities to process." + , + "default": "rna" + } + + + , + "max_value": { + "type": + "number", + "description": "Type: `double`. Clip (truncate) to this value after scaling", + "help_text": "Type: `double`. Clip (truncate) to this value after scaling. Does not clip by default." + + } + + + , + "zero_center": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. If False, omit zero-centering variables, which allows to handle sparse input efficiently", + "help_text": "Type: `boolean`, default: `true`. If False, omit zero-centering variables, which allows to handle sparse input efficiently." + , + "default": "True" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file", + "help_text": "Type: `file`, required, default: `$id.$key.output.h5mu`. Output h5mu file." + , + "default": "$id.$key.output.h5mu" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/velocity/scvelo/.config.vsh.yaml b/target/nextflow/velocity/scvelo/.config.vsh.yaml index 1032a08d4d1..315757ebed7 100644 --- a/target/nextflow/velocity/scvelo/.config.vsh.yaml +++ b/target/nextflow/velocity/scvelo/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "scvelo" namespace: "velocity" - version: "0.12.3" + version: "0.12.4" authors: - name: "Dries Schaumont" roles: @@ -271,6 +271,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/velocity/scvelo" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/velocity/scvelo/scvelo" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/velocity/scvelo/main.nf b/target/nextflow/velocity/scvelo/main.nf index 2f2a020d322..d02caa1fdfe 100644 --- a/target/nextflow/velocity/scvelo/main.nf +++ b/target/nextflow/velocity/scvelo/main.nf @@ -1,4 +1,4 @@ -// scvelo 0.12.3 +// scvelo 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "scvelo", "namespace" : "velocity", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Dries Schaumont", @@ -365,9 +365,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/velocity/scvelo", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/velocity/scvelo/nextflow.config b/target/nextflow/velocity/scvelo/nextflow.config index 957ad6a2c86..7d8abc5310a 100644 --- a/target/nextflow/velocity/scvelo/nextflow.config +++ b/target/nextflow/velocity/scvelo/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'scvelo' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' author = 'Dries Schaumont' } diff --git a/target/nextflow/velocity/scvelo/nextflow_schema.json b/target/nextflow/velocity/scvelo/nextflow_schema.json index 07df84b903b..d6881694a4f 100644 --- a/target/nextflow/velocity/scvelo/nextflow_schema.json +++ b/target/nextflow/velocity/scvelo/nextflow_schema.json @@ -1,161 +1,237 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "scvelo", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "scvelo", +"description": "No description", +"type": "object", +"definitions": { + + + + "inputs" : { + "title": "Inputs", + "type": "object", "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Velocyto loom file", + "help_text": "Type: `file`, required. Velocyto loom file." + + } + + +} +}, + + + "outputs" : { + "title": "Outputs", + "type": "object", + "description": "No description", + "properties": { + + + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. Output directory", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output directory. If it does not exist, will be created." + , + "default": "$id.$key.output.output" + } + + + , + "output_compression": { + "type": + "string", + "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", + "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", + "enum": ["gzip", "lzf"] + + + } + + +} +}, + + + "filtering and normalization" : { + "title": "Filtering and normalization", + "type": "object", + "description": "Arguments for filtering, normalization an log transform (see scvelo.pp.filter_and_normalize function)", + "properties": { + + + "min_counts": { + "type": + "integer", + "description": "Type: `integer`. Minimum number of counts required for a gene to pass filtering (spliced)", + "help_text": "Type: `integer`. Minimum number of counts required for a gene to pass filtering (spliced)." + + } + + + , + "min_counts_u": { + "type": + "integer", + "description": "Type: `integer`. Minimum number of counts required for a gene to pass filtering (unspliced)", + "help_text": "Type: `integer`. Minimum number of counts required for a gene to pass filtering (unspliced)." + + } + + + , + "min_cells": { + "type": + "integer", + "description": "Type: `integer`. Minimum number of cells expressed required to pass filtering (spliced)", + "help_text": "Type: `integer`. Minimum number of cells expressed required to pass filtering (spliced)." + + } + + + , + "min_cells_u": { + "type": + "integer", + "description": "Type: `integer`. Minimum number of cells expressed required to pass filtering (unspliced)", + "help_text": "Type: `integer`. Minimum number of cells expressed required to pass filtering (unspliced)." + + } + + + , + "min_shared_counts": { + "type": + "integer", + "description": "Type: `integer`. Minimum number of counts (both unspliced and spliced) required for a gene", + "help_text": "Type: `integer`. Minimum number of counts (both unspliced and spliced) required for a gene." + + } + + + , + "min_shared_cells": { + "type": + "integer", + "description": "Type: `integer`. Minimum number of cells required to be expressed (both unspliced and spliced)", + "help_text": "Type: `integer`. Minimum number of cells required to be expressed (both unspliced and spliced)." + + } + + + , + "n_top_genes": { + "type": + "integer", + "description": "Type: `integer`. Number of genes to keep", + "help_text": "Type: `integer`. Number of genes to keep." + + } + + + , + "log_transform": { + "type": + "boolean", + "description": "Type: `boolean`, default: `true`. Do not log transform counts", + "help_text": "Type: `boolean`, default: `true`. Do not log transform counts." + , + "default": "True" + } + + +} +}, + + + "fitting parameters" : { + "title": "Fitting parameters", + "type": "object", + "description": "Arguments for fitting the data", + "properties": { + + + "n_principal_components": { + "type": + "integer", + "description": "Type: `integer`. Number of principal components to use for calculating moments", + "help_text": "Type: `integer`. Number of principal components to use for calculating moments." + + } + + + , + "n_neighbors": { + "type": + "integer", + "description": "Type: `integer`, default: `30`. Number of neighbors to use", + "help_text": "Type: `integer`, default: `30`. Number of neighbors to use. First/second-order moments are computed for each\ncell across its nearest neighbors, where the neighbor graph is obtained from\neuclidean distances in PCA space.\n" + , + "default": "30" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", "type": "object", - "definitions": { - "inputs" : { - "title": "Inputs", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Velocyto loom file", - "help_text": "Type: `file`, required. Velocyto loom file." - } - - } - }, - "outputs" : { - "title": "Outputs", - "type": "object", - "description": "No description", - "properties": { - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. Output directory", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Output directory. If it does not exist, will be created.", - "default": "$id.$key.output.output" - }, - - "output_compression": { - "type": "string", - "description": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object", - "help_text": "Type: `string`, example: `gzip`, choices: ``gzip`, `lzf``. The compression format to be used on the output h5mu object.", - "enum": ["gzip", "lzf"] + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - } - - } - }, - "filtering and normalization" : { - "title": "Filtering and normalization", - "type": "object", - "description": "Arguments for filtering, normalization an log transform (see scvelo.pp.filter_and_normalize function)", - "properties": { - - "min_counts": { - "type": "integer", - "description": "Type: `integer`. Minimum number of counts required for a gene to pass filtering (spliced)", - "help_text": "Type: `integer`. Minimum number of counts required for a gene to pass filtering (spliced)." - }, - - "min_counts_u": { - "type": "integer", - "description": "Type: `integer`. Minimum number of counts required for a gene to pass filtering (unspliced)", - "help_text": "Type: `integer`. Minimum number of counts required for a gene to pass filtering (unspliced)." - }, - - "min_cells": { - "type": "integer", - "description": "Type: `integer`. Minimum number of cells expressed required to pass filtering (spliced)", - "help_text": "Type: `integer`. Minimum number of cells expressed required to pass filtering (spliced)." - }, - - "min_cells_u": { - "type": "integer", - "description": "Type: `integer`. Minimum number of cells expressed required to pass filtering (unspliced)", - "help_text": "Type: `integer`. Minimum number of cells expressed required to pass filtering (unspliced)." - }, - - "min_shared_counts": { - "type": "integer", - "description": "Type: `integer`. Minimum number of counts (both unspliced and spliced) required for a gene", - "help_text": "Type: `integer`. Minimum number of counts (both unspliced and spliced) required for a gene." - }, - - "min_shared_cells": { - "type": "integer", - "description": "Type: `integer`. Minimum number of cells required to be expressed (both unspliced and spliced)", - "help_text": "Type: `integer`. Minimum number of cells required to be expressed (both unspliced and spliced)." - }, - - "n_top_genes": { - "type": "integer", - "description": "Type: `integer`. Number of genes to keep", - "help_text": "Type: `integer`. Number of genes to keep." - }, - - "log_transform": { - "type": "boolean", - "description": "Type: `boolean`, default: `true`. Do not log transform counts", - "help_text": "Type: `boolean`, default: `true`. Do not log transform counts.", - "default": "True" - } - - } - }, - "fitting parameters" : { - "title": "Fitting parameters", - "type": "object", - "description": "Arguments for fitting the data", - "properties": { - - "n_principal_components": { - "type": "integer", - "description": "Type: `integer`. Number of principal components to use for calculating moments", - "help_text": "Type: `integer`. Number of principal components to use for calculating moments." - }, - - "n_neighbors": { - "type": "integer", - "description": "Type: `integer`, default: `30`. Number of neighbors to use", - "help_text": "Type: `integer`, default: `30`. Number of neighbors to use. First/second-order moments are computed for each\ncell across its nearest neighbors, where the neighbor graph is obtained from\neuclidean distances in PCA space.\n", - "default": "30" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/inputs" + }, + + { + "$ref": "#/definitions/outputs" + }, + + { + "$ref": "#/definitions/filtering and normalization" + }, + + { + "$ref": "#/definitions/fitting parameters" }, - "allOf": [ - { - "$ref": "#/definitions/inputs" - }, - { - "$ref": "#/definitions/outputs" - }, - { - "$ref": "#/definitions/filtering and normalization" - }, - { - "$ref": "#/definitions/fitting parameters" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] } diff --git a/target/nextflow/velocity/velocyto/.config.vsh.yaml b/target/nextflow/velocity/velocyto/.config.vsh.yaml index 4ebb609e299..aa9b3619f52 100644 --- a/target/nextflow/velocity/velocyto/.config.vsh.yaml +++ b/target/nextflow/velocity/velocyto/.config.vsh.yaml @@ -1,7 +1,7 @@ functionality: name: "velocyto" namespace: "velocity" - version: "0.12.3" + version: "0.12.4" authors: - name: "Robrecht Cannoodt" roles: @@ -220,6 +220,6 @@ info: output: "/home/runner/work/openpipeline/openpipeline/target/nextflow/velocity/velocyto" executable: "/home/runner/work/openpipeline/openpipeline/target/nextflow/velocity/velocyto/velocyto" viash_version: "0.7.5" - git_commit: "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f" + git_commit: "a075b9f384e200b357c4c85801062a980ddb3383" git_remote: "https://github.com/openpipelines-bio/openpipeline" - git_tag: "0.12.2-3-g827d483cf7" + git_tag: "0.12.3-3-ga075b9f384" diff --git a/target/nextflow/velocity/velocyto/main.nf b/target/nextflow/velocity/velocyto/main.nf index 26248b02f97..8d114d7bb1b 100644 --- a/target/nextflow/velocity/velocyto/main.nf +++ b/target/nextflow/velocity/velocyto/main.nf @@ -1,4 +1,4 @@ -// velocyto 0.12.3 +// velocyto 0.12.4 // // This wrapper script is auto-generated by viash 0.7.5 and is thus a derivative // work thereof. This software comes with ABSOLUTELY NO WARRANTY from Data @@ -27,7 +27,7 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "functionality" : { "name" : "velocyto", "namespace" : "velocity", - "version" : "0.12.3", + "version" : "0.12.4", "authors" : [ { "name" : "Robrecht Cannoodt", @@ -314,9 +314,9 @@ thisConfig = processConfig(jsonSlurper.parseText('''{ "platform" : "nextflow", "output" : "/home/runner/work/openpipeline/openpipeline/target/nextflow/velocity/velocyto", "viash_version" : "0.7.5", - "git_commit" : "827d483cf7d8844f3a3745b724f1d9cdeb3c7a2f", + "git_commit" : "a075b9f384e200b357c4c85801062a980ddb3383", "git_remote" : "https://github.com/openpipelines-bio/openpipeline", - "git_tag" : "0.12.2-3-g827d483cf7" + "git_tag" : "0.12.3-3-ga075b9f384" } }''')) diff --git a/target/nextflow/velocity/velocyto/nextflow.config b/target/nextflow/velocity/velocyto/nextflow.config index dee9b3b802e..7ef917db6d8 100644 --- a/target/nextflow/velocity/velocyto/nextflow.config +++ b/target/nextflow/velocity/velocyto/nextflow.config @@ -2,7 +2,7 @@ manifest { name = 'velocyto' mainScript = 'main.nf' nextflowVersion = '!>=20.12.1-edge' - version = '0.12.3' + version = '0.12.4' description = 'Runs the velocity analysis on a BAM file, outputting a loom file.' author = 'Robrecht Cannoodt' } diff --git a/target/nextflow/velocity/velocyto/nextflow_schema.json b/target/nextflow/velocity/velocyto/nextflow_schema.json index 9cadeb25581..ea06a74c7c3 100644 --- a/target/nextflow/velocity/velocyto/nextflow_schema.json +++ b/target/nextflow/velocity/velocyto/nextflow_schema.json @@ -1,86 +1,125 @@ { - "$schema": "http://json-schema.org/draft-07/schema", - "title": "velocyto", - "description": "Runs the velocity analysis on a BAM file, outputting a loom file.", +"$schema": "http://json-schema.org/draft-07/schema", +"title": "velocyto", +"description": "Runs the velocity analysis on a BAM file, outputting a loom file.", +"type": "object", +"definitions": { + + + + "arguments" : { + "title": "Arguments", "type": "object", - "definitions": { - "arguments" : { - "title": "Arguments", - "type": "object", - "description": "No description", - "properties": { - - "input": { - "type": "string", - "description": "Type: `file`, required. Path to BAM file", - "help_text": "Type: `file`, required. Path to BAM file" - }, - - "transcriptome": { - "type": "string", - "description": "Type: `file`, required. Path to GTF file", - "help_text": "Type: `file`, required. Path to GTF file" - }, - - "barcode": { - "type": "string", - "description": "Type: `file`. Valid barcodes file, to filter the bam", - "help_text": "Type: `file`. Valid barcodes file, to filter the bam. If --bcfile is not specified all the cell barcodes will be included.\nCell barcodes should be specified in the bcfile as the \u0027CB\u0027 tag for each read\n" - }, - - "without_umi": { - "type": "boolean", - "description": "Type: `boolean_true`, default: `false`. foo", - "help_text": "Type: `boolean_true`, default: `false`. foo", - "default": "False" - }, - - "output": { - "type": "string", - "description": "Type: `file`, required, default: `$id.$key.output.output`. Velocyto loom file", - "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Velocyto loom file", - "default": "$id.$key.output.output" - }, - - "logic": { - "type": "string", - "description": "Type: `string`, default: `Default`, choices: ``Default`, `Permissive10X`, `Intermediate10X`, `ValidatedIntrons10X`, `Stricter10X`, `ObservedSpanning10X`, `Discordant10X`, `SmartSeq2``. The logic to use for the filtering", - "help_text": "Type: `string`, default: `Default`, choices: ``Default`, `Permissive10X`, `Intermediate10X`, `ValidatedIntrons10X`, `Stricter10X`, `ObservedSpanning10X`, `Discordant10X`, `SmartSeq2``. The logic to use for the filtering.", - "enum": ["Default", "Permissive10X", "Intermediate10X", "ValidatedIntrons10X", "Stricter10X", "ObservedSpanning10X", "Discordant10X", "SmartSeq2"] + "description": "No description", + "properties": { + + + "input": { + "type": + "string", + "description": "Type: `file`, required. Path to BAM file", + "help_text": "Type: `file`, required. Path to BAM file" + + } + + + , + "transcriptome": { + "type": + "string", + "description": "Type: `file`, required. Path to GTF file", + "help_text": "Type: `file`, required. Path to GTF file" + + } + + + , + "barcode": { + "type": + "string", + "description": "Type: `file`. Valid barcodes file, to filter the bam", + "help_text": "Type: `file`. Valid barcodes file, to filter the bam. If --bcfile is not specified all the cell barcodes will be included.\nCell barcodes should be specified in the bcfile as the \u0027CB\u0027 tag for each read\n" + + } + + + , + "without_umi": { + "type": + "boolean", + "description": "Type: `boolean_true`, default: `false`. foo", + "help_text": "Type: `boolean_true`, default: `false`. foo" , - "default": "Default" - } - - } - }, - "nextflow input-output arguments" : { - "title": "Nextflow input-output arguments", - "type": "object", - "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", - "properties": { - - "publish_dir": { - "type": "string", - "description": "Type: `string`, required, example: `output/`. Path to an output directory", - "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." - }, - - "param_list": { - "type": "string", - "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", - "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", - "hidden": true - } - - } - } + "default": "False" + } + + + , + "output": { + "type": + "string", + "description": "Type: `file`, required, default: `$id.$key.output.output`. Velocyto loom file", + "help_text": "Type: `file`, required, default: `$id.$key.output.output`. Velocyto loom file" + , + "default": "$id.$key.output.output" + } + + + , + "logic": { + "type": + "string", + "description": "Type: `string`, default: `Default`, choices: ``Default`, `Permissive10X`, `Intermediate10X`, `ValidatedIntrons10X`, `Stricter10X`, `ObservedSpanning10X`, `Discordant10X`, `SmartSeq2``. The logic to use for the filtering", + "help_text": "Type: `string`, default: `Default`, choices: ``Default`, `Permissive10X`, `Intermediate10X`, `ValidatedIntrons10X`, `Stricter10X`, `ObservedSpanning10X`, `Discordant10X`, `SmartSeq2``. The logic to use for the filtering.", + "enum": ["Default", "Permissive10X", "Intermediate10X", "ValidatedIntrons10X", "Stricter10X", "ObservedSpanning10X", "Discordant10X", "SmartSeq2"] + + , + "default": "Default" + } + + +} +}, + + + "nextflow input-output arguments" : { + "title": "Nextflow input-output arguments", + "type": "object", + "description": "Input/output parameters for Nextflow itself. Please note that both publishDir and publish_dir are supported but at least one has to be configured.", + "properties": { + + + "publish_dir": { + "type": + "string", + "description": "Type: `string`, required, example: `output/`. Path to an output directory", + "help_text": "Type: `string`, required, example: `output/`. Path to an output directory." + + } + + + , + "param_list": { + "type": + "string", + "description": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel", + "help_text": "Type: `string`, example: `my_params.yaml`. Allows inputting multiple parameter sets to initialise a Nextflow channel. A `param_list` can either be a list of maps, a csv file, a json file, a yaml file, or simply a yaml blob.\n\n* A list of maps (as-is) where the keys of each map corresponds to the arguments of the pipeline. Example: in a `nextflow.config` file: `param_list: [ [\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027], [\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027] ]`.\n* A csv file should have column names which correspond to the different arguments of this pipeline. Example: `--param_list data.csv` with columns `id,input`.\n* A json or a yaml file should be a list of maps, each of which has keys corresponding to the arguments of the pipeline. Example: `--param_list data.json` with contents `[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]`.\n* A yaml blob can also be passed directly as a string. Example: `--param_list \"[ {\u0027id\u0027: \u0027foo\u0027, \u0027input\u0027: \u0027foo.txt\u0027}, {\u0027id\u0027: \u0027bar\u0027, \u0027input\u0027: \u0027bar.txt\u0027} ]\"`.\n\nWhen passing a csv, json or yaml file, relative path names are relativized to the location of the parameter file. No relativation is performed when `param_list` is a list of maps (as-is) or a yaml blob.", + "hidden": true + + } + + +} +} +}, +"allOf": [ + + { + "$ref": "#/definitions/arguments" }, - "allOf": [ - { - "$ref": "#/definitions/arguments" - }, - { - "$ref": "#/definitions/nextflow input-output arguments" - } - ] + + { + "$ref": "#/definitions/nextflow input-output arguments" + } +] }