Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

fix(core): fix SHACL shape for MappingParameter and add SHACL checks to more tests #2811

Merged
merged 1 commit into from Apr 6, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
22 changes: 18 additions & 4 deletions renku/data/shacl_shape.json
Expand Up @@ -1468,10 +1468,24 @@
"maxCount": 1
},
{
"path": "schema:mapsTo",
"sh:class": {
"@id": "renku:CommandParameterBase"
}
"path": "renku:mapsTo",
"or": [
{
"sh:class": {
"@id": "renku:CommandParameter"
}
},
{
"sh:class": {
"@id": "renku:CommandInput"
}
},
{
"sh:class": {
"@id": "renku:CommandOutput"
}
}
]
}
]
},
Expand Down
30 changes: 26 additions & 4 deletions tests/cli/test_datasets.py
Expand Up @@ -473,6 +473,9 @@ def test_multiple_file_to_dataset(tmpdir, runner, project, client, load_dataset_
result = runner.invoke(cli, ["dataset", "add", "dataset"] + paths, catch_exceptions=False)
assert 0 == result.exit_code, format_result_exception(result)

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


def test_add_with_relative_path(runner, client, directory_tree, subdirectory):
"""Test adding data with relative path."""
Expand Down Expand Up @@ -676,8 +679,7 @@ def test_dataset_add_many(tmpdir, runner, project, client):


def test_dataset_file_path_from_subdirectory(runner, client, subdirectory, load_dataset_with_injection):
"""Test adding a file into a dataset and check path independent
of the CWD"""
"""Test adding a file into a dataset and check path independent of the CWD."""
# create a dataset
result = runner.invoke(cli, ["dataset", "create", "dataset"])
assert 0 == result.exit_code, format_result_exception(result)
Expand Down Expand Up @@ -992,7 +994,7 @@ def test_dataset_unlink_file_abort_unlinking(tmpdir, runner, project):


def test_dataset_unlink_file(tmpdir, runner, client, subdirectory, load_dataset_with_injection):
"""Test unlinking of file and check removal from dataset"""
"""Test unlinking of file and check removal from dataset."""
# create a dataset
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
assert 0 == result.exit_code, format_result_exception(result)
Expand Down Expand Up @@ -1039,6 +1041,9 @@ def test_dataset_rm(runner, client, directory_tree, subdirectory, load_dataset_w
result = runner.invoke(cli, ["doctor"], catch_exceptions=False)
assert 0 == result.exit_code, format_result_exception(result)

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


def test_dataset_rm_failure(runner, client):
"""Test errors in removal of a dataset."""
Expand Down Expand Up @@ -1126,6 +1131,9 @@ def test_dataset_edit(runner, client, project, dirty, subdirectory, load_dataset
assert 1 == len(dataset.annotations)
assert new_metadata == dataset.annotations[0].body

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


@pytest.mark.parametrize("dirty", [False, True])
def test_dataset_edit_no_change(runner, client, project, dirty):
Expand Down Expand Up @@ -1171,6 +1179,7 @@ def test_dataset_provider_resolution_dataverse(doi_responses, uri):


def test_dataset_tag(tmpdir, runner, client, subdirectory, get_datasets_provenance_with_injection):
"""Test that dataset tags can be created."""
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
assert 0 == result.exit_code, format_result_exception(result)
assert "OK" in result.output
Expand Down Expand Up @@ -1200,9 +1209,13 @@ def test_dataset_tag(tmpdir, runner, client, subdirectory, get_datasets_provenan
all_tags = datasets_provenance.get_all_tags(dataset)
assert {dataset.id} == {t.dataset_id.value for t in all_tags}

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


@pytest.mark.parametrize("form", ["tabular", "json-ld"])
def test_dataset_ls_tags(tmpdir, runner, project, client, form, load_dataset_with_injection):
"""Test listing of dataset tags."""
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
assert 0 == result.exit_code, format_result_exception(result)
assert "OK" in result.output
Expand Down Expand Up @@ -1238,6 +1251,7 @@ def test_dataset_ls_tags(tmpdir, runner, project, client, form, load_dataset_wit


def test_dataset_rm_tag(tmpdir, runner, client, subdirectory, load_dataset_with_injection):
"""Test removing of dataset tags."""
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
assert 0 == result.exit_code, format_result_exception(result)
assert "OK" in result.output
Expand Down Expand Up @@ -1273,8 +1287,12 @@ def test_dataset_rm_tag(tmpdir, runner, client, subdirectory, load_dataset_with_
assert 2 == result.exit_code
assert "not found" in result.output

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


def test_dataset_rm_tags_multiple(tmpdir, runner, project, client):
"""Test removing multiple dataset tags at once."""
result = runner.invoke(cli, ["dataset", "create", "my-dataset"])
assert 0 == result.exit_code, format_result_exception(result)
assert "OK" in result.output
Expand All @@ -1300,6 +1318,7 @@ def test_dataset_rm_tags_multiple(tmpdir, runner, project, client):


def test_dataset_rm_tags_failure(tmpdir, runner, project, client):
"""Test removing non-existent dataset tag."""
result = runner.invoke(cli, ["dataset", "rm-tags", "my-dataset", "1"], catch_exceptions=False)

assert 1 == result.exit_code
Expand Down Expand Up @@ -1726,7 +1745,7 @@ def test_external_file_update(runner, client, directory_tree, subdirectory):
assert current_commit != previous_commit


@pytest.mark.skip("renku update doesn't support new database, reenable once it does")
@pytest.mark.skip("renku update follows symlinks when calculating hashes and doesn't respect external files")
@pytest.mark.serial
def test_workflow_with_external_file(runner, client, directory_tree, run, subdirectory, no_lfs_size_limit):
"""Check using external files in workflows."""
Expand Down Expand Up @@ -2027,6 +2046,9 @@ def test_datasets_provenance_after_update(runner, client, directory_tree, get_da

assert current_version.identifier != current_version.initial_identifier

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


def test_datasets_provenance_after_adding_tag(
runner, client, get_datasets_provenance_with_injection, load_dataset_with_injection
Expand Down
16 changes: 14 additions & 2 deletions tests/cli/test_integration_datasets.py
Expand Up @@ -97,6 +97,9 @@ def test_dataset_import_real_doi(runner, client, doi, prefix, sleep_after, load_
assert dataset.date_created is None
assert dataset.date_published is not None

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


@pytest.mark.parametrize(
"doi, input",
Expand Down Expand Up @@ -143,6 +146,9 @@ def test_dataset_import_real_param(doi, input, runner, project, sleep_after, cli
result = runner.invoke(cli, ["dataset", "ls"])
assert 0 == result.exit_code, format_result_exception(result) + str(result.stderr_bytes)

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


@pytest.mark.parametrize(
"doi", [("10.5281/zenodo.3239984", "n"), ("zenodo.org/record/3239986", "n"), ("10.5281/zenodo.3239982", "n")]
Expand All @@ -163,7 +169,7 @@ def test_dataset_import_uri_404(doi, runner, project, sleep_after):
@retry_failed
@pytest.mark.vcr
def test_dataset_import_real_doi_warnings(runner, project, sleep_after):
"""Test dataset import for existing DOI and dataset"""
"""Test dataset import for existing DOI and dataset."""
result = runner.invoke(cli, ["dataset", "import", "10.5281/zenodo.1438326"], input="y")
assert 0 == result.exit_code, format_result_exception(result) + str(result.stderr_bytes)
assert "Warning: Newer version found" in result.output
Expand Down Expand Up @@ -323,6 +329,9 @@ def test_dataset_import_renku_provider(runner, client, uri, load_dataset_with_in
canonical_uri = "https://dev.renku.ch/datasets/860f6b5b46364c83b6a9b38ef198bcc0"
assert dataset.same_as.url["@id"] == canonical_uri

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


@pytest.mark.integration
@retry_failed
Expand Down Expand Up @@ -482,7 +491,7 @@ def test_dataset_import_renku_provider_errors(runner, project, uri, message):
@pytest.mark.vcr
@pytest.mark.parametrize("url", ["https://dev.renku.ch/datasets/e3e1beba05594fdd8e4682963cec9fe2"])
def test_dataset_reimport_renkulab_dataset(runner, project, url):
"""Test dataset import for existing dataset"""
"""Test dataset import for existing dataset."""
assert 0 == runner.invoke(cli, ["dataset", "import", url], input="y").exit_code

result = runner.invoke(cli, ["dataset", "import", url], input="y")
Expand Down Expand Up @@ -558,6 +567,9 @@ def test_dataset_export_upload_file(
assert "Exported to:" in result.output
assert output in result.output

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


@pytest.mark.integration
@retry_failed
Expand Down
3 changes: 3 additions & 0 deletions tests/cli/test_move.py
Expand Up @@ -51,6 +51,9 @@ def test_move(runner, client):
assert f"{src1} -> {dst1}" in result.output
assert f"{src2} -> {dst2}" in result.output

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


def test_move_outside_paths(runner, client, directory_tree):
"""Test move from/to outside paths is not possible."""
Expand Down
3 changes: 3 additions & 0 deletions tests/cli/test_project.py
Expand Up @@ -98,6 +98,9 @@ def test_project_edit(runner, client, subdirectory, client_database_injection_ma
assert "Renku Version:" in result.output
assert "Keywords:" in result.output

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


def test_project_edit_no_change(runner, client):
"""Check project metadata editing does not commit when there is no change."""
Expand Down
8 changes: 7 additions & 1 deletion tests/cli/test_rerun.py
Expand Up @@ -66,7 +66,7 @@ def rerun():
("𒁃.c", "𒁏.txt"),
],
)
def test_rerun_with_special_paths(project, renku_cli, provider, source, output):
def test_rerun_with_special_paths(project, renku_cli, runner, provider, source, output):
"""Test rerun with unicode/whitespace filenames."""
cwd = Path(project)
source = cwd / source
Expand All @@ -88,6 +88,9 @@ def rerun():

assert content != output.read_text().strip(), "The output should have changed."

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


@pytest.mark.parametrize("provider", available_workflow_providers())
@pytest.mark.parametrize("source, content", [("input1", "input1 new-input2 old"), ("input2", "input1 old-input2 new")])
Expand Down Expand Up @@ -295,6 +298,9 @@ def test_rerun_overridden_outputs_partially(project, renku_cli, runner):
assert "r2" in result.output
assert "r3" not in result.output

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


def test_rerun_multiple_paths_common_output(project, renku_cli, runner):
"""Test when multiple paths generate the same output only the most recent path will be rerun."""
Expand Down
3 changes: 3 additions & 0 deletions tests/cli/test_rollback.py
Expand Up @@ -83,6 +83,9 @@ def test_rollback(client, runner, project):
result = runner.invoke(cli, ["rollback"], input="7\ny")
assert 0 == result.exit_code, format_result_exception(result)

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)

result = runner.invoke(cli, ["workflow", "ls"])
assert 0 == result.exit_code, format_result_exception(result)
assert 2 == len(result.output.splitlines())
Expand Down
5 changes: 4 additions & 1 deletion tests/cli/test_run.py
Expand Up @@ -95,7 +95,7 @@ def test_run_external_command_file(runner, client, project, run_shell, client_da
assert plan.command.endswith("/echo")


def test_run_metadata(renku_cli, client, client_database_injection_manager):
def test_run_metadata(renku_cli, runner, client, client_database_injection_manager):
"""Test run with workflow metadata."""
exit_code, activity = renku_cli(
"run", "--name", "run-1", "--description", "first run", "--keyword", "key1", "--keyword", "key2", "touch", "foo"
Expand All @@ -114,6 +114,9 @@ def test_run_metadata(renku_cli, client, client_database_injection_manager):
assert "first run" == plan.description
assert {"key1", "key2"} == set(plan.keywords)

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


@pytest.mark.parametrize(
"command, name",
Expand Down
6 changes: 6 additions & 0 deletions tests/cli/test_template.py
Expand Up @@ -146,6 +146,9 @@ def test_template_set(runner, client, client_database_injection_manager):
assert __template_version__ == client.project.template_version
assert __template_version__ == client.project.template_ref

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


def test_template_set_overwrites_modified(runner, client, client_database_injection_manager):
"""Test setting a new template in a project overwrite modified files."""
Expand Down Expand Up @@ -230,6 +233,9 @@ def test_template_update(runner, client, client_database_injection_manager):
assert 0 == result.exit_code, format_result_exception(result)
assert "Template is up-to-date" in result.output

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


def test_template_update_latest_version(runner, client):
"""Test updating template that is the latest version."""
Expand Down
3 changes: 3 additions & 0 deletions tests/cli/test_update.py
Expand Up @@ -64,6 +64,9 @@ def test_update(runner, client, renku_cli, client_database_injection_manager, pr
# NOTE: No ActivityCollection is created if update include only one activity
assert [] == activity_collections

result = runner.invoke(cli, ["graph", "export", "--format", "json-ld", "--strict"])
assert 0 == result.exit_code, format_result_exception(result)


@pytest.mark.parametrize("provider", available_workflow_providers())
def test_update_multiple_steps(runner, client, renku_cli, client_database_injection_manager, provider):
Expand Down