Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
35 changes: 1 addition & 34 deletions src/app.py
Original file line number Diff line number Diff line change
Expand Up @@ -769,42 +769,9 @@ def get_entity_provenance(id):
# Normalize the raw provenance nodes based on the yaml schema
normalized_provenance_dict = {
'relationships': raw_provenance_dict['relationships'],
'nodes': []
'nodes': raw_provenance_dict['nodes']
}

for node_dict in raw_provenance_dict['nodes']:
# The schema yaml doesn't handle Lab nodes, just leave it as is
if (node_dict['label'] == 'Entity') and (node_dict['entity_type'] != 'Lab'):
# Generate trigger data
# Skip some of the properties that are time-consuming to generate via triggers:
# director_ancestor for Sample, and direct_ancestors for Dataset
# Also skip next_revision_uuid and previous_revision_uuid for Dataset to avoid additional
# checks when the target Dataset is public but the revisions are not public
properties_to_skip = [
'direct_ancestors',
'direct_ancestor',
'next_revision_uuid',
'previous_revision_uuid'
]

# We'll need to return all the properties (except the ones to skip from above list)
# including those generated by `on_read_trigger` to have a complete result
# The 'on_read_trigger' doesn't really need a token
complete_entity_dict = schema_manager.get_complete_entity_result(token, node_dict, properties_to_skip)

# Filter out properties not defined or not to be exposed in the schema yaml
normalized_entity_dict = schema_manager.normalize_entity_result_for_response(complete_entity_dict)

# Now the node to be used by provenance is all regulated by the schema
normalized_provenance_dict['nodes'].append(normalized_entity_dict)
elif node_dict['label'] == 'Activity':
# Normalize Activity nodes too
normalized_activity_dict = schema_manager.normalize_activity_result_for_response(node_dict)
normalized_provenance_dict['nodes'].append(normalized_activity_dict)
else:
# Skip Entity Lab nodes
normalized_provenance_dict['nodes'].append(node_dict)

provenance_json = provenance.get_provenance_history(uuid, normalized_provenance_dict, auth_helper_instance)

# Response with the provenance details
Expand Down
2 changes: 1 addition & 1 deletion src/app_neo4j_queries.py
Original file line number Diff line number Diff line change
Expand Up @@ -577,7 +577,7 @@ def get_provenance(neo4j_driver, uuid, depth):
# More info on apoc.path.subgraphAll() procedure: https://neo4j.com/labs/apoc/4.0/graph-querying/expand-subgraph/
query = (f"MATCH (n:Entity) "
f"WHERE n.uuid = '{uuid}' "
f"CALL apoc.path.subgraphAll(n, {{ {max_level_str} relationshipFilter:'<ACTIVITY_INPUT|<ACTIVITY_OUTPUT' }}) "
f"CALL apoc.path.subgraphAll(n, {{ {max_level_str} relationshipFilter:'<ACTIVITY_INPUT|<ACTIVITY_OUTPUT', labelFilter:'-Lab' }}) "
f"YIELD nodes, relationships "
f"WITH [node in nodes | node {{ .*, label:labels(node)[0] }} ] as nodes, "
f"[rel in relationships | rel {{ .*, fromNode: {{ label:labels(startNode(rel))[0], uuid:startNode(rel).uuid }}, toNode: {{ label:labels(endNode(rel))[0], uuid:endNode(rel).uuid }}, rel_data: {{ type: type(rel) }} }} ] as rels "
Expand Down