diff --git a/renku/data/shacl_shape.json b/renku/data/shacl_shape.json index f24954b6c8..82dc8fdbc9 100644 --- a/renku/data/shacl_shape.json +++ b/renku/data/shacl_shape.json @@ -1468,10 +1468,24 @@ "maxCount": 1 }, { - "path": "schema:mapsTo", - "sh:class": { - "@id": "renku:CommandParameterBase" - } + "path": "renku:mapsTo", + "or": [ + { + "sh:class": { + "@id": "renku:CommandParameter" + } + }, + { + "sh:class": { + "@id": "renku:CommandInput" + } + }, + { + "sh:class": { + "@id": "renku:CommandOutput" + } + } + ] } ] }, diff --git a/tests/cli/test_datasets.py b/tests/cli/test_datasets.py index 0a3999be7f..42c5095a0c 100644 --- a/tests/cli/test_datasets.py +++ b/tests/cli/test_datasets.py @@ -473,6 +473,9 @@ def test_multiple_file_to_dataset(tmpdir, runner, project, client, load_dataset_ result = runner.invoke(cli, ["dataset", "add", "dataset"] + paths, catch_exceptions=False) assert 0 == result.exit_code, format_result_exception(result) + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_add_with_relative_path(runner, client, directory_tree, subdirectory): """Test adding data with relative path.""" @@ -676,8 +679,7 @@ def test_dataset_add_many(tmpdir, runner, project, client): def test_dataset_file_path_from_subdirectory(runner, client, subdirectory, load_dataset_with_injection): - """Test adding a file into a dataset and check path independent - of the CWD""" + """Test adding a file into a dataset and check path independent of the CWD.""" # create a dataset result = runner.invoke(cli, ["dataset", "create", "dataset"]) assert 0 == result.exit_code, format_result_exception(result) @@ -992,7 +994,7 @@ def test_dataset_unlink_file_abort_unlinking(tmpdir, runner, project): def test_dataset_unlink_file(tmpdir, runner, client, subdirectory, load_dataset_with_injection): - """Test unlinking of file and check removal from dataset""" + """Test unlinking of file and check removal from dataset.""" # create a dataset result = runner.invoke(cli, ["dataset", "create", "my-dataset"]) assert 0 == result.exit_code, format_result_exception(result) @@ -1039,6 +1041,9 @@ def test_dataset_rm(runner, client, directory_tree, subdirectory, load_dataset_w result = runner.invoke(cli, ["doctor"], catch_exceptions=False) assert 0 == result.exit_code, format_result_exception(result) + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_dataset_rm_failure(runner, client): """Test errors in removal of a dataset.""" @@ -1126,6 +1131,9 @@ def test_dataset_edit(runner, client, project, dirty, subdirectory, load_dataset assert 1 == len(dataset.annotations) assert new_metadata == dataset.annotations[0].body + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize("dirty", [False, True]) def test_dataset_edit_no_change(runner, client, project, dirty): @@ -1171,6 +1179,7 @@ def test_dataset_provider_resolution_dataverse(doi_responses, uri): def test_dataset_tag(tmpdir, runner, client, subdirectory, get_datasets_provenance_with_injection): + """Test that dataset tags can be created.""" result = runner.invoke(cli, ["dataset", "create", "my-dataset"]) assert 0 == result.exit_code, format_result_exception(result) assert "OK" in result.output @@ -1200,9 +1209,13 @@ def test_dataset_tag(tmpdir, runner, client, subdirectory, get_datasets_provenan all_tags = datasets_provenance.get_all_tags(dataset) assert {dataset.id} == {t.dataset_id.value for t in all_tags} + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize("form", ["tabular", "json-ld"]) def test_dataset_ls_tags(tmpdir, runner, project, client, form, load_dataset_with_injection): + """Test listing of dataset tags.""" result = runner.invoke(cli, ["dataset", "create", "my-dataset"]) assert 0 == result.exit_code, format_result_exception(result) assert "OK" in result.output @@ -1238,6 +1251,7 @@ def test_dataset_ls_tags(tmpdir, runner, project, client, form, load_dataset_wit def test_dataset_rm_tag(tmpdir, runner, client, subdirectory, load_dataset_with_injection): + """Test removing of dataset tags.""" result = runner.invoke(cli, ["dataset", "create", "my-dataset"]) assert 0 == result.exit_code, format_result_exception(result) assert "OK" in result.output @@ -1273,8 +1287,12 @@ def test_dataset_rm_tag(tmpdir, runner, client, subdirectory, load_dataset_with_ assert 2 == result.exit_code assert "not found" in result.output + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_dataset_rm_tags_multiple(tmpdir, runner, project, client): + """Test removing multiple dataset tags at once.""" result = runner.invoke(cli, ["dataset", "create", "my-dataset"]) assert 0 == result.exit_code, format_result_exception(result) assert "OK" in result.output @@ -1300,6 +1318,7 @@ def test_dataset_rm_tags_multiple(tmpdir, runner, project, client): def test_dataset_rm_tags_failure(tmpdir, runner, project, client): + """Test removing non-existent dataset tag.""" result = runner.invoke(cli, ["dataset", "rm-tags", "my-dataset", "1"], catch_exceptions=False) assert 1 == result.exit_code @@ -1726,7 +1745,7 @@ def test_external_file_update(runner, client, directory_tree, subdirectory): assert current_commit != previous_commit -@pytest.mark.skip("renku update doesn't support new database, reenable once it does") +@pytest.mark.skip("renku update follows symlinks when calculating hashes and doesn't respect external files") @pytest.mark.serial def test_workflow_with_external_file(runner, client, directory_tree, run, subdirectory, no_lfs_size_limit): """Check using external files in workflows.""" @@ -2027,6 +2046,9 @@ def test_datasets_provenance_after_update(runner, client, directory_tree, get_da assert current_version.identifier != current_version.initial_identifier + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_datasets_provenance_after_adding_tag( runner, client, get_datasets_provenance_with_injection, load_dataset_with_injection diff --git a/tests/cli/test_integration_datasets.py b/tests/cli/test_integration_datasets.py index 78e2c64955..373a9cff0b 100644 --- a/tests/cli/test_integration_datasets.py +++ b/tests/cli/test_integration_datasets.py @@ -97,6 +97,9 @@ def test_dataset_import_real_doi(runner, client, doi, prefix, sleep_after, load_ assert dataset.date_created is None assert dataset.date_published is not None + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize( "doi, input", @@ -143,6 +146,9 @@ def test_dataset_import_real_param(doi, input, runner, project, sleep_after, cli result = runner.invoke(cli, ["dataset", "ls"]) assert 0 == result.exit_code, format_result_exception(result) + str(result.stderr_bytes) + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize( "doi", [("10.5281/zenodo.3239984", "n"), ("zenodo.org/record/3239986", "n"), ("10.5281/zenodo.3239982", "n")] @@ -163,7 +169,7 @@ def test_dataset_import_uri_404(doi, runner, project, sleep_after): @retry_failed @pytest.mark.vcr def test_dataset_import_real_doi_warnings(runner, project, sleep_after): - """Test dataset import for existing DOI and dataset""" + """Test dataset import for existing DOI and dataset.""" result = runner.invoke(cli, ["dataset", "import", "10.5281/zenodo.1438326"], input="y") assert 0 == result.exit_code, format_result_exception(result) + str(result.stderr_bytes) assert "Warning: Newer version found" in result.output @@ -323,6 +329,9 @@ def test_dataset_import_renku_provider(runner, client, uri, load_dataset_with_in canonical_uri = "https://dev.renku.ch/datasets/860f6b5b46364c83b6a9b38ef198bcc0" assert dataset.same_as.url["@id"] == canonical_uri + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.integration @retry_failed @@ -482,7 +491,7 @@ def test_dataset_import_renku_provider_errors(runner, project, uri, message): @pytest.mark.vcr @pytest.mark.parametrize("url", ["https://dev.renku.ch/datasets/e3e1beba05594fdd8e4682963cec9fe2"]) def test_dataset_reimport_renkulab_dataset(runner, project, url): - """Test dataset import for existing dataset""" + """Test dataset import for existing dataset.""" assert 0 == runner.invoke(cli, ["dataset", "import", url], input="y").exit_code result = runner.invoke(cli, ["dataset", "import", url], input="y") @@ -558,6 +567,9 @@ def test_dataset_export_upload_file( assert "Exported to:" in result.output assert output in result.output + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.integration @retry_failed diff --git a/tests/cli/test_move.py b/tests/cli/test_move.py index 848e69357d..d46f42f35b 100644 --- a/tests/cli/test_move.py +++ b/tests/cli/test_move.py @@ -51,6 +51,9 @@ def test_move(runner, client): assert f"{src1} -> {dst1}" in result.output assert f"{src2} -> {dst2}" in result.output + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_move_outside_paths(runner, client, directory_tree): """Test move from/to outside paths is not possible.""" diff --git a/tests/cli/test_project.py b/tests/cli/test_project.py index 8de13b17f3..72749561ca 100644 --- a/tests/cli/test_project.py +++ b/tests/cli/test_project.py @@ -98,6 +98,9 @@ def test_project_edit(runner, client, subdirectory, client_database_injection_ma assert "Renku Version:" in result.output assert "Keywords:" in result.output + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_project_edit_no_change(runner, client): """Check project metadata editing does not commit when there is no change.""" diff --git a/tests/cli/test_rerun.py b/tests/cli/test_rerun.py index 54f1c9e212..a2d055f74e 100644 --- a/tests/cli/test_rerun.py +++ b/tests/cli/test_rerun.py @@ -66,7 +66,7 @@ def rerun(): ("𒁃.c", "𒁏.txt"), ], ) -def test_rerun_with_special_paths(project, renku_cli, provider, source, output): +def test_rerun_with_special_paths(project, renku_cli, runner, provider, source, output): """Test rerun with unicode/whitespace filenames.""" cwd = Path(project) source = cwd / source @@ -88,6 +88,9 @@ def rerun(): assert content != output.read_text().strip(), "The output should have changed." + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize("provider", available_workflow_providers()) @pytest.mark.parametrize("source, content", [("input1", "input1 new-input2 old"), ("input2", "input1 old-input2 new")]) @@ -295,6 +298,9 @@ def test_rerun_overridden_outputs_partially(project, renku_cli, runner): assert "r2" in result.output assert "r3" not in result.output + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_rerun_multiple_paths_common_output(project, renku_cli, runner): """Test when multiple paths generate the same output only the most recent path will be rerun.""" diff --git a/tests/cli/test_rollback.py b/tests/cli/test_rollback.py index 83fd3e0064..39f2ba1548 100644 --- a/tests/cli/test_rollback.py +++ b/tests/cli/test_rollback.py @@ -83,6 +83,9 @@ def test_rollback(client, runner, project): result = runner.invoke(cli, ["rollback"], input="7\ny") assert 0 == result.exit_code, format_result_exception(result) + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + result = runner.invoke(cli, ["workflow", "ls"]) assert 0 == result.exit_code, format_result_exception(result) assert 2 == len(result.output.splitlines()) diff --git a/tests/cli/test_run.py b/tests/cli/test_run.py index 3b1c6550f0..255775eafe 100644 --- a/tests/cli/test_run.py +++ b/tests/cli/test_run.py @@ -95,7 +95,7 @@ def test_run_external_command_file(runner, client, project, run_shell, client_da assert plan.command.endswith("/echo") -def test_run_metadata(renku_cli, client, client_database_injection_manager): +def test_run_metadata(renku_cli, runner, client, client_database_injection_manager): """Test run with workflow metadata.""" exit_code, activity = renku_cli( "run", "--name", "run-1", "--description", "first run", "--keyword", "key1", "--keyword", "key2", "touch", "foo" @@ -114,6 +114,9 @@ def test_run_metadata(renku_cli, client, client_database_injection_manager): assert "first run" == plan.description assert {"key1", "key2"} == set(plan.keywords) + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize( "command, name", diff --git a/tests/cli/test_template.py b/tests/cli/test_template.py index 30b8cc68d7..15e2494557 100644 --- a/tests/cli/test_template.py +++ b/tests/cli/test_template.py @@ -146,6 +146,9 @@ def test_template_set(runner, client, client_database_injection_manager): assert __template_version__ == client.project.template_version assert __template_version__ == client.project.template_ref + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_template_set_overwrites_modified(runner, client, client_database_injection_manager): """Test setting a new template in a project overwrite modified files.""" @@ -230,6 +233,9 @@ def test_template_update(runner, client, client_database_injection_manager): assert 0 == result.exit_code, format_result_exception(result) assert "Template is up-to-date" in result.output + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_template_update_latest_version(runner, client): """Test updating template that is the latest version.""" diff --git a/tests/cli/test_update.py b/tests/cli/test_update.py index 913af3549d..618e9bf934 100644 --- a/tests/cli/test_update.py +++ b/tests/cli/test_update.py @@ -64,6 +64,9 @@ def test_update(runner, client, renku_cli, client_database_injection_manager, pr # NOTE: No ActivityCollection is created if update include only one activity assert [] == activity_collections + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize("provider", available_workflow_providers()) def test_update_multiple_steps(runner, client, renku_cli, client_database_injection_manager, provider): diff --git a/tests/cli/test_workflow.py b/tests/cli/test_workflow.py index 538a209a0f..103707680b 100644 --- a/tests/cli/test_workflow.py +++ b/tests/cli/test_workflow.py @@ -146,6 +146,9 @@ def test_workflow_compose(runner, project, run_shell, client): assert composite_plan.mappings[1].default_value == "other_output.csv" assert composite_plan.mappings[1].description == "the final output file produced" + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_workflow_compose_from_paths(runner, project, run_shell, client): """Test renku workflow compose with input/output paths.""" @@ -322,7 +325,7 @@ def test_workflow_show(runner, project, run_shell, client): def test_workflow_remove_command(runner, project): - """test workflow remove with builder.""" + """Test workflow remove with builder.""" workflow_name = "test_workflow" result = runner.invoke(cli, ["workflow", "remove", workflow_name]) @@ -336,7 +339,7 @@ def test_workflow_remove_command(runner, project): def test_workflow_export_command(runner, project): - """test workflow export with builder.""" + """Test workflow export with builder.""" result = runner.invoke(cli, ["run", "--success-code", "0", "--no-output", "--name", "run1", "touch", "data.csv"]) assert 0 == result.exit_code, format_result_exception(result) @@ -466,6 +469,9 @@ def _get_plan_id(output): assert len(edited_composite_plan.mappings) == 1 assert edited_composite_plan.mappings[0].mapped_parameters[0].name == "param1" + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_workflow_show_outputs_with_directory(runner, client, run): """Output files in directory are not shown as separate outputs.""" @@ -517,7 +523,7 @@ def test_workflow_show_outputs_with_directory(runner, client, run): ], ) def test_workflow_execute_command(runner, run_shell, project, capsys, client, provider, yaml, workflows, parameters): - """test workflow execute.""" + """Test workflow execute.""" for wf in workflows: output = run_shell(f"renku run --name {wf[0]} -- {wf[1]}") @@ -600,6 +606,9 @@ def _flatten_dict(obj, key_string=""): for o in outputs: assert Path(o).resolve().exists() + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize("provider", available_workflow_providers()) def test_workflow_execute_command_with_api_parameter_set(runner, run_shell, project, capsys, client, provider): @@ -626,6 +635,9 @@ def test_workflow_execute_command_with_api_parameter_set(runner, run_shell, proj assert "goodbye\n" == output.read_text() + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize("provider", available_workflow_providers()) def test_workflow_execute_command_with_api_input_set(runner, run_shell, project, capsys, client, provider): @@ -658,6 +670,9 @@ def test_workflow_execute_command_with_api_input_set(runner, run_shell, project, assert "my other input string\n" == output.read_text() + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize("provider", available_workflow_providers()) def test_workflow_execute_command_with_api_output_set(runner, run_shell, project, capsys, client, provider): @@ -687,6 +702,9 @@ def test_workflow_execute_command_with_api_output_set(runner, run_shell, project assert "test" == other_output.read_text() + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_workflow_execute_command_with_api_duplicate_output(runner, run_shell, project, capsys, client): """Test executing a workflow with duplicate output with differing path.""" @@ -960,6 +978,9 @@ def test_workflow_compose_execute(runner, project, run_shell, client): assert "xyz\n" == Path("output4").read_text() + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + @pytest.mark.parametrize("provider", available_workflow_providers()) @pytest.mark.parametrize( @@ -1042,6 +1063,9 @@ def test_workflow_iterate(runner, run_shell, client, workflow, parameters, provi for o in outputs: assert Path(o).resolve().exists() + result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"]) + assert 0 == result.exit_code, format_result_exception(result) + def test_workflow_cycle_detection(run_shell, project, capsys, client): """Test creating a cycle is not possible with renku run or workflow execute.""" @@ -1077,7 +1101,7 @@ def test_workflow_cycle_detection(run_shell, project, capsys, client): @pytest.mark.skipif(sys.platform == "darwin", reason="GitHub macOS image doesn't include Docker") def test_workflow_execute_docker_toil(runner, client, run_shell, caplog): - """test workflow execute using docker with the toil provider.""" + """Test workflow execute using docker with the toil provider.""" caplog.set_level(logging.INFO) write_and_commit_file(client.repository, "input", "first line\nsecond line") @@ -1097,7 +1121,7 @@ def test_workflow_execute_docker_toil(runner, client, run_shell, caplog): def test_workflow_execute_docker_toil_stderr(runner, client, run_shell): - """test workflow execute using docker with the toil provider and stderr redirection.""" + """Test workflow execute using docker with the toil provider and stderr redirection.""" write_and_commit_file(client.repository, "input", "first line\nsecond line") output = client.path / "output"