Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions src/schema/provenance_schema.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,7 @@ ENTITIES:
transient: true
generated: true
description: "The datasets that are contained in the collection."
# A few time-consuming properties (with read triggers) of each dataset are excluded
on_read_trigger: get_collection_datasets

############################################# Dataset #############################################
Expand Down Expand Up @@ -766,11 +767,10 @@ ENTITIES:
description: 'List of datasets to remove from a Upload. Provide as a json array of the dataset uuids like: ["232934234234234234234270c0ea6c51d604a850558ef2247d0b4", "230948203482234234234a57bfe9c056d08a0f8e6cd612baa3bfa"]'
# Use after_update_trigger instead of before_update_trigger since we are not updating this property
after_update_trigger: unlink_datasets_from_upload
# Different from the handling of Collection (only returns dataset_uuids)
datasets:
type: list
generated: true # Disallow user input from request json when being created
transient: true
description: "The datasets that are contained in this Upload."
# A few time-consuming read triggers are excluded
# A few time-consuming properties (with read triggers) of each dataset are excluded
on_read_trigger: get_upload_datasets
8 changes: 4 additions & 4 deletions src/schema/schema_triggers.py
Original file line number Diff line number Diff line change
Expand Up @@ -702,9 +702,9 @@ def get_dataset_collections(property_key, normalized_type, user_token, existing_
# Get back the list of collection dicts
collections_list = schema_neo4j_queries.get_dataset_collections(schema_manager.get_neo4j_driver_instance(), existing_data_dict['uuid'])

# Exclude dataset_uuids from each resulting collection
# Exclude datasets from each resulting collection
# We don't want to show too much nested information
properties_to_skip = ['dataset_uuids']
properties_to_skip = ['datasets']
complete_entities_list = schema_manager.get_complete_entities_list(user_token, collections_list, properties_to_skip)

return property_key, schema_manager.normalize_entities_list_for_response(complete_entities_list)
Expand Down Expand Up @@ -740,9 +740,9 @@ def get_dataset_upload(property_key, normalized_type, user_token, existing_data_
upload_dict = schema_neo4j_queries.get_dataset_upload(schema_manager.get_neo4j_driver_instance(), existing_data_dict['uuid'])

if upload_dict:
# Exclude dataset_uuids from each resulting Upload
# Exclude datasets from each resulting Upload
# We don't want to show too much nested information
properties_to_skip = ['dataset_uuids']
properties_to_skip = ['datasets']
complete_upload_dict = schema_manager.get_complete_entity_result(user_token, upload_dict, properties_to_skip)
return_dict = schema_manager.normalize_entity_result_for_response(complete_upload_dict)

Expand Down