diff --git a/demo/imdb/imdb_queries.py b/demo/imdb/imdb_queries.py index 002f10050b..c9e7370a9f 100644 --- a/demo/imdb/imdb_queries.py +++ b/demo/imdb/imdb_queries.py @@ -367,6 +367,34 @@ def __init__(self, actors=None, movies=None): ['Tim Blake Nelson']] ) + ################################################################## + ### grand_budapest_hotel_cast_and_their_other_roles + ################################################################## + + self.grand_budapest_hotel_cast_and_their_other_roles = QueryInfo( + + query="""MATCH (a:actor)-[:act]->(h:movie {title: 'The Grand Budapest Hotel'}) + OPTIONAL MATCH (a)-[:act]->(m:movie) WHERE m <> h + RETURN a.name, m.title + ORDER BY a.name, m.title""", + + description='All actors in The Grand Budapest Hotel and their other movies', + reversible=False, + max_run_time_ms=4, + expected_result=[['Adrien Brody', None], + ['Bill Murray', 'The Jungle Book'], + ['F. Murray Abraham', None], + ['Harvey Keitel', 'The Ridiculous 6'], + ['Harvey Keitel', 'Youth'], + ['Jeff Goldblum', 'Independence Day: Resurgence'], + ['Jude Law', 'Spy'], + ['Mathieu Amalric', None], + ['Ralph Fiennes', 'A Bigger Splash'], + ['Ralph Fiennes', 'Spectre'], + ['Willem Dafoe', 'John Wick'], + ['Willem Dafoe', 'The Fault in Our Stars']] + ) + self.queries_info = [ self.number_of_actors_query, self.actors_played_with_nicolas_cage_query, @@ -384,7 +412,8 @@ def __init__(self, actors=None, movies=None): self.eighties_movies_index_scan, self.find_titles_starting_with_american_query, self.same_year_higher_rating_than_huntforthewilderpeople_query, - self.all_actors_named_tim + self.all_actors_named_tim, + self.grand_budapest_hotel_cast_and_their_other_roles ] def queries(self): diff --git a/docs/client_spec.md b/docs/client_spec.md index f8cc246278..6a9e275d9f 100644 --- a/docs/client_spec.md +++ b/docs/client_spec.md @@ -25,9 +25,9 @@ Instructions on how to efficiently convert these IDs in the [Procedure Calls](#p Additionally, two enums are exposed: -[ColumnType](https://github.com/RedisGraph/RedisGraph/blob/ff108d7e21061025166a35d29be1a1cb5bac6d55/src/resultset/formatters/resultset_formatter.h#L14-L19) indicates what type of value is held in each column (more formally, that offset into each row of the result set). Each entry in the header row will be a 2-array, with this enum in the first position and the column name string in the second. +[ColumnType](https://github.com/RedisGraph/RedisGraph/blob/ff108d7e21061025166a35d29be1a1cb5bac6d55/src/resultset/formatters/resultset_formatter.h#L14-L19), which as of RedisGraph v2.1.0 will always be `COLUMN_SCALAR`. This enum is retained for backwards compatibility, and may be ignored by the client unless RedisGraph versions older than v2.1.0 must be supported. -[PropertyType](https://github.com/RedisGraph/RedisGraph/blob/ff108d7e21061025166a35d29be1a1cb5bac6d55/src/resultset/formatters/resultset_formatter.h#L21-L28) indicates the data type (such as integer or string) of each returned scalar value. Each scalar values is emitted as a 2-array, with this enum in the first position and the actual value in the second. A column can consist exclusively of scalar values, such as both of the columns created by `RETURN a.value, 'this literal string'`. Each property on a graph entity also has a scalar as its value, so this construction is nested in each value of the properties array when a column contains a node or relationship. +[ValueType](https://github.com/RedisGraph/RedisGraph/blob/ff108d7e21061025166a35d29be1a1cb5bac6d55/src/resultset/formatters/resultset_formatter.h#L21-L28) indicates the data type (such as Node, integer, or string) of each returned value. Each value is emitted as a 2-array, with this enum in the first position and the actual value in the second. Each property on a graph entity also has a scalar as its value, so this construction is nested in each value of the properties array when a column contains a node or relationship. ## Decoding the result set @@ -69,24 +69,26 @@ Verbose (default): Compact: ```sh 127.0.0.1:6379> GRAPH.QUERY demo "MATCH (a)-[e]->(b) RETURN a, e, b.name" --compact -1) 1) 1) (integer) 2 +1) 1) 1) (integer) 1 2) "a" - 2) 1) (integer) 3 + 2) 1) (integer) 1 2) "e" 3) 1) (integer) 1 2) "b.name" -2) 1) 1) 1) (integer) 0 +2) 1) 1) 1) (integer) 8 2) 1) (integer) 0 - 3) 1) 1) (integer) 0 - 2) (integer) 2 - 3) "Tree" - 2) 1) (integer) 0 - 2) (integer) 0 - 3) (integer) 0 - 4) (integer) 1 - 5) 1) 1) (integer) 1 - 2) (integer) 2 - 3) "Autumn" + 2) 1) (integer) 0 + 3) 1) 1) (integer) 0 + 2) (integer) 2 + 3) "Tree" + 2) 1) (integer) 7 + 2) 1) (integer) 0 + 2) (integer) 0 + 3) (integer) 0 + 4) (integer) 1 + 5) 1) 1) (integer) 1 + 2) (integer) 2 + 3) "Autumn" 3) 1) (integer) 2 2) "Apple" 3) 1) "Query internal execution time: 1.085412 milliseconds" @@ -119,15 +121,15 @@ Rather than introspecting on the query being emitted, the client implementation Our sample query `MATCH (a)-[e]->(b) RETURN a, e, b.name` generated the header: ```sh -1) 1) (integer) 2 +1) 1) (integer) 1 2) "a" -2) 1) (integer) 3 - 2) "e" 3) 1) (integer) 1 - 2) "b.name" + 3) "e" +4) 1) (integer) 1 + 3) "b.name" ``` -The 3 array members correspond, in order, to the 3 entities described in the RETURN clause. +The 4 array members correspond, in order, to the 3 entities described in the RETURN clause. Each is emitted as a 2-array: ```sh @@ -135,9 +137,7 @@ Each is emitted as a 2-array: 2) column name (string) ``` -It is the client's responsibility to store [ColumnType enum](https://github.com/RedisGraph/RedisGraph/blob/ff108d7e21061025166a35d29be1a1cb5bac6d55/src/resultset/formatters/resultset_formatter.h#L14-L19). RedisGraph guarantees that this enum may be extended in the future, but the existing values will not be altered. - -In this case, `a` corresponds to a Node column, `e` corresponds to a Relation column, and `b.name` corresponds to a Scalar column. No other column types are currently supported. +The first element is the [ColumnType enum](https://github.com/RedisGraph/RedisGraph/blob/master/src/resultset/formatters/resultset_formatter.h#L14-L19), which as of RedisGraph v2.1.0 will always be `COLUMN_SCALAR`. This element is retained for backwards compatibility, and may be ignored by the client unless RedisGraph versions older than v2.1.0 must be supported. ### Reading result rows @@ -145,37 +145,42 @@ The entity representations in this section will closely resemble those found in Our query produced one row of results with 3 columns (as described by the header): ```sh -1) 1) 1) (integer) 0 +1) 1) 1) (integer) 8 2) 1) (integer) 0 - 3) 1) 1) (integer) 0 - 2) (integer) 2 - 3) "Tree" - 2) 1) (integer) 0 - 2) (integer) 0 - 3) (integer) 0 - 4) (integer) 1 - 5) 1) 1) (integer) 1 - 2) (integer) 2 - 3) "Autumn" + 2) 1) (integer) 0 + 3) 1) 1) (integer) 0 + 2) (integer) 2 + 3) "Tree" + 2) 1) (integer) 7 + 2) 1) (integer) 0 + 2) (integer) 0 + 3) (integer) 0 + 4) (integer) 1 + 5) 1) 1) (integer) 1 + 2) (integer) 2 + 3) "Autumn" 3) 1) (integer) 2 2) "Apple" ``` +Each element is emitted as a 2-array - [`ValueType`, value]. + +It is the client's responsibility to store the [ValueType enum](https://github.com/RedisGraph/RedisGraph/blob/master/src/resultset/formatters/resultset_formatter.h#L21-L28). RedisGraph guarantees that this enum may be extended in the future, but the existing values will not be altered. -We know the first column to contain nodes. The node representation contains 3 top-level elements: +The `ValueType` for the first entry is `VALUE_NODE`. The node representation contains 3 top-level elements: 1. The node's internal ID. 2. An array of all label IDs associated with the node (currently, each node can have either 0 or 1 labels, though this restriction may be lifted in the future). -3. An array of all properties the node contains. Properties are represented as 3-arrays - [property key ID, `PropertyType` enum, value]. +3. An array of all properties the node contains. Properties are represented as 3-arrays - [property key ID, `ValueType`, value]. ```sh [ Node ID (integer), [label ID (integer) X label count] - [[property key ID (integer), PropertyType (enum), value (scalar)] X property count] + [[property key ID (integer), ValueType (enum), value (scalar)] X property count] ] ``` -The second column contains relations. The relation representation differs from the node representation in two respects: +The `ValueType` for the first entry is `VALUE_EDGE`. The edge representation differs from the node representation in two respects: - Each relation has exactly one type, rather than the 0+ labels a node may have. - A relation is emitted with the IDs of its source and destination nodes. @@ -194,13 +199,11 @@ As such, the complete representation is as follows: type ID (integer), source node ID (integer), destination node ID (integer), - [[property key ID (integer), PropertyType (enum), value (scalar)] X property count] + [[property key ID (integer), ValueType (enum), value (scalar)] X property count] ] ``` -The third column contains a scalar. Each scalar is emitted as a 2-array - [`PropertyType` enum, value]. - -As with ColumnType, it is the client's responsibility to store the [PropertyType enum](https://github.com/RedisGraph/RedisGraph/blob/ff108d7e21061025166a35d29be1a1cb5bac6d55/src/resultset/formatters/resultset_formatter.h#L21-L28). RedisGraph guarantees that this enum may be extended in the future, but the existing values will not be altered. +The `ValueType` for the third entry is `VALUE_STRING`, and the other element in the array is the actual value, "Apple". ### Reading statistics diff --git a/docs/commands.md b/docs/commands.md index 877e0a3e21..9eac3a7817 100644 --- a/docs/commands.md +++ b/docs/commands.md @@ -23,6 +23,7 @@ supported. ### Query structure - MATCH +- OPTIONAL MATCH - WHERE - RETURN - ORDER BY @@ -128,6 +129,45 @@ The syntactic sugar `(person_a)<-[:KNOWS]->(person_b)` will return the same resu The bracketed edge description can be omitted if all relations should be considered: `(person_a)--(person_b)`. +#### OPTIONAL MATCH + +The OPTIONAL MATCH clause is a MATCH variant that produces null values for elements that do not match successfully, rather than the all-or-nothing logic for patterns in MATCH clauses. + +It can be considered to fill the same role as LEFT/RIGHT JOIN does in SQL, as MATCH entities must be resolved but nodes and edges introduced in OPTIONAL MATCH will be returned as nulls if they cannot be found. + +OPTIONAL MATCH clauses accept the same patterns as standard MATCH clauses, and may similarly be modified by WHERE clauses. + +Multiple MATCH and OPTIONAL MATCH clauses can be chained together, though a mandatory MATCH cannot follow an optional one. + +```sh +GRAPH.QUERY DEMO_GRAPH +"MATCH (p:Person) OPTIONAL MATCH (p)-[w:WORKS_AT]->(c:Company) +WHERE w.start_date > 2016 +RETURN p, w, c" +``` + +All `Person` nodes are returned, as well as any `WORKS_AT` relations and `Company` nodes that can be resolved and satisfy the `start_date` constraint. For each `Person` that does not resolve the optional pattern, the person will be returned as normal and the non-matching elements will be returned as null. + +Cypher is lenient in its handling of null values, so actions like property accesses and function calls on null values will return null values rather than emit errors. + +```sh +GRAPH.QUERY DEMO_GRAPH +"MATCH (p:Person) OPTIONAL MATCH (p)-[w:WORKS_AT]->(c:Company) +RETURN p, w.department, ID(c) as ID" +``` + +In this case, `w.department` and `ID` will be returned if the OPTIONAL MATCH was successful, and will be null otherwise. + +Clauses like SET, CREATE, MERGE, and DELETE will ignore null inputs and perform the expected updates on real inputs. One exception to this is that attempting to create a relation with a null endpoint will cause an error: + +```sh +GRAPH.QUERY DEMO_GRAPH +"MATCH (p:Person) OPTIONAL MATCH (p)-[w:WORKS_AT]->(c:Company) +CREATE (c)-[:NEW_RELATION]->(:NEW_NODE)" +``` + +If `c` is null for any record, this query will emit an error. In this case, no changes to the graph are committed, even if some values for `c` were resolved. + #### WHERE This clause is not mandatory, but if you want to filter results, you can specify your predicates here. diff --git a/docs/cypher_support.md b/docs/cypher_support.md index 4369cd48ad..86bd7da2c2 100644 --- a/docs/cypher_support.md +++ b/docs/cypher_support.md @@ -42,11 +42,7 @@ We do not support any of these properties at the type level, meaning nodes and r ## Clauses ### Reading Clauses + MATCH - - **Unsupported:** - -- OPTIONAL MATCH -- MANDATORY MATCH ++ OPTIONAL MATCH ### Projecting Clauses + RETURN diff --git a/src/arithmetic/arithmetic_expression.c b/src/arithmetic/arithmetic_expression.c index 8ac24a8961..5aa554c15e 100644 --- a/src/arithmetic/arithmetic_expression.c +++ b/src/arithmetic/arithmetic_expression.c @@ -344,12 +344,18 @@ static bool _AR_EXP_UpdateEntityIdx(AR_OperandNode *node, const Record r) { static AR_EXP_Result _AR_EXP_EvaluateProperty(AR_ExpNode *node, const Record r, SIValue *result) { RecordEntryType t = Record_GetType(r, node->operand.variadic.entity_alias_idx); - // Property requested on a scalar value. - if(!(t & (REC_TYPE_NODE | REC_TYPE_EDGE))) { + if(t != REC_TYPE_NODE && t != REC_TYPE_EDGE) { + if(t == REC_TYPE_UNKNOWN) { + /* If we attempt to access an unset Record entry as a graph entity + * (due to a scenario like a failed OPTIONAL MATCH), return a null value. */ + *result = SI_NullVal(); + return EVAL_OK; + } + /* Attempted to access a scalar value as a map. * Set an error and invoke the exception handler. */ char *error; - SIValue v = Record_GetScalar(r, node->operand.variadic.entity_alias_idx); + SIValue v = Record_Get(r, node->operand.variadic.entity_alias_idx); asprintf(&error, "Type mismatch: expected a map but was %s", SIType_ToString(SI_TYPE(v))); QueryCtx_SetError(error); // Set the query-level error. return EVAL_ERR; diff --git a/src/arithmetic/entity_funcs/entity_funcs.c b/src/arithmetic/entity_funcs/entity_funcs.c index 0e807374af..788912b1e1 100644 --- a/src/arithmetic/entity_funcs/entity_funcs.c +++ b/src/arithmetic/entity_funcs/entity_funcs.c @@ -15,12 +15,14 @@ /* returns the id of a relationship or node. */ SIValue AR_ID(SIValue *argv, int argc) { + if(SI_TYPE(argv[0]) == T_NULL) return SI_NullVal(); GraphEntity *graph_entity = (GraphEntity *)argv[0].ptrval; return SI_LongVal(ENTITY_GET_ID(graph_entity)); } /* returns a string representations the label of a node. */ SIValue AR_LABELS(SIValue *argv, int argc) { + if(SI_TYPE(argv[0]) == T_NULL) return SI_NullVal(); char *label = ""; Node *node = argv[0].ptrval; GraphContext *gc = QueryCtx_GetGraphCtx(); @@ -32,6 +34,7 @@ SIValue AR_LABELS(SIValue *argv, int argc) { /* returns a string representation of the type of a relation. */ SIValue AR_TYPE(SIValue *argv, int argc) { + if(SI_TYPE(argv[0]) == T_NULL) return SI_NullVal(); char *type = ""; Edge *e = argv[0].ptrval; GraphContext *gc = QueryCtx_GetGraphCtx(); @@ -51,6 +54,7 @@ SIValue AR_EXISTS(SIValue *argv, int argc) { } SIValue _AR_NodeDegree(SIValue *argv, int argc, GRAPH_EDGE_DIR dir) { + if(SI_TYPE(argv[0]) == T_NULL) return SI_NullVal(); Node *n = (Node *)argv[0].ptrval; Edge *edges = array_new(Edge, 0); GraphContext *gc = QueryCtx_GetGraphCtx(); @@ -79,11 +83,13 @@ SIValue _AR_NodeDegree(SIValue *argv, int argc, GRAPH_EDGE_DIR dir) { /* Returns the number of incoming edges for given node. */ SIValue AR_INCOMEDEGREE(SIValue *argv, int argc) { + if(SI_TYPE(argv[0]) == T_NULL) return SI_NullVal(); return _AR_NodeDegree(argv, argc, GRAPH_EDGE_DIR_INCOMING); } /* Returns the number of outgoing edges for given node. */ SIValue AR_OUTGOINGDEGREE(SIValue *argv, int argc) { + if(SI_TYPE(argv[0]) == T_NULL) return SI_NullVal(); return _AR_NodeDegree(argv, argc, GRAPH_EDGE_DIR_OUTGOING); } @@ -92,34 +98,35 @@ void Register_EntityFuncs() { AR_FuncDesc *func_desc; types = array_new(SIType, 1); - types = array_append(types, T_NODE | T_EDGE); + types = array_append(types, T_NULL | T_NODE | T_EDGE); func_desc = AR_FuncDescNew("id", AR_ID, 1, 1, types, false); AR_RegFunc(func_desc); types = array_new(SIType, 1); - types = array_append(types, T_NODE); + types = array_append(types, T_NULL | T_NODE); func_desc = AR_FuncDescNew("labels", AR_LABELS, 1, 1, types, false); AR_RegFunc(func_desc); types = array_new(SIType, 1); - types = array_append(types, T_EDGE); + types = array_append(types, T_NULL | T_EDGE); func_desc = AR_FuncDescNew("type", AR_TYPE, 1, 1, types, false); AR_RegFunc(func_desc); types = array_new(SIType, 1); - types = array_append(types, SI_ALL); + types = array_append(types, T_NULL | SI_ALL); func_desc = AR_FuncDescNew("exists", AR_EXISTS, 1, 1, types, false); AR_RegFunc(func_desc); types = array_new(SIType, 2); - types = array_append(types, T_NODE); + types = array_append(types, T_NULL | T_NODE); types = array_append(types, T_STRING); func_desc = AR_FuncDescNew("indegree", AR_INCOMEDEGREE, 1, VAR_ARG_LEN, types, false); AR_RegFunc(func_desc); - types = array_new(SIType, 1); - types = array_append(types, T_NODE); + types = array_new(SIType, 2); + types = array_append(types, T_NULL | T_NODE); types = array_append(types, T_STRING); func_desc = AR_FuncDescNew("outdegree", AR_OUTGOINGDEGREE, 1, VAR_ARG_LEN, types, false); AR_RegFunc(func_desc); } + diff --git a/src/arithmetic/list_funcs/list_funcs.c b/src/arithmetic/list_funcs/list_funcs.c index 0073e08330..5eae3abbcf 100644 --- a/src/arithmetic/list_funcs/list_funcs.c +++ b/src/arithmetic/list_funcs/list_funcs.c @@ -25,7 +25,9 @@ SIValue AR_TOLIST(SIValue *argv, int argc) { Invalid index will return null. "RETURN [1, 2, 3][0]" will yield 1. */ SIValue AR_SUBSCRIPT(SIValue *argv, int argc) { - assert(argc == 2 && argv[0].type == T_ARRAY && argv[1].type == T_INT64); + assert(argc == 2); + if(SI_TYPE(argv[0]) == T_NULL || SI_TYPE(argv[1]) == T_NULL) return SI_NullVal(); + assert(SI_TYPE(argv[0]) == T_ARRAY && SI_TYPE(argv[1]) == T_INT64); SIValue list = argv[0]; int32_t index = (int32_t)argv[1].longval; uint32_t arrayLen = SIArray_Length(list); @@ -48,9 +50,13 @@ SIValue AR_SUBSCRIPT(SIValue *argv, int argc) { If one of the indices is null, null will be returnd. "RETURN [1, 2, 3][0..1]" will yield [1, 2] */ SIValue AR_SLICE(SIValue *argv, int argc) { - assert(argc == 3 && argv[0].type == T_ARRAY); - if(argv[0].type == T_NULL || argv[1].type == T_NULL || argv[2].type == T_NULL) return SI_NullVal(); - assert(argv[1].type == T_INT64 && argv[2].type == T_INT64); + assert(argc == 3); + if(SI_TYPE(argv[0]) == T_NULL || + SI_TYPE(argv[1]) == T_NULL || + SI_TYPE(argv[2]) == T_NULL) { + return SI_NullVal(); + } + assert(SI_TYPE(argv[0]) == T_ARRAY && SI_TYPE(argv[1]) == T_INT64 && SI_TYPE(argv[2]) == T_INT64); SIValue array = argv[0]; // get array length @@ -92,7 +98,7 @@ SIValue AR_RANGE(SIValue *argv, int argc) { int64_t end = argv[1].longval; int64_t interval = 1; if(argc == 3) { - assert(argv[2].type == T_INT64); + assert(SI_TYPE(argv[2]) == T_INT64); interval = argv[2].longval; if(interval < 1) { char *error; @@ -114,7 +120,9 @@ SIValue AR_RANGE(SIValue *argv, int argc) { /* Checks if a value is in a given list. "RETURN 3 IN [1, 2, 3]" will return true */ SIValue AR_IN(SIValue *argv, int argc) { - assert(argc == 2 && argv[1].type == T_ARRAY); + assert(argc == 2); + if(SI_TYPE(argv[1]) == T_NULL) return SI_NullVal(); + assert(SI_TYPE(argv[1]) == T_ARRAY); SIValue lookupValue = argv[0]; SIValue lookupList = argv[1]; // indicate if there was a null comparison during the array scan @@ -139,11 +147,13 @@ SIValue AR_IN(SIValue *argv, int argc) { SIValue AR_SIZE(SIValue *argv, int argc) { assert(argc == 1); SIValue value = argv[0]; - switch(value.type) { + switch(SI_TYPE(value)) { case T_ARRAY: return SI_LongVal(SIArray_Length(value)); case T_STRING: return SI_LongVal(strlen(value.stringval)); + case T_NULL: + return SI_NullVal(); default: assert(false); } @@ -154,8 +164,8 @@ SIValue AR_SIZE(SIValue *argv, int argc) { SIValue AR_HEAD(SIValue *argv, int argc) { assert(argc == 1); SIValue value = argv[0]; - if(value.type == T_NULL) return SI_NullVal(); - assert(value.type == T_ARRAY); + if(SI_TYPE(value) == T_NULL) return SI_NullVal(); + assert(SI_TYPE(value) == T_ARRAY); uint arrayLen = SIArray_Length(value); if(arrayLen == 0) return SI_NullVal(); return SIArray_Get(value, 0); @@ -166,8 +176,8 @@ SIValue AR_HEAD(SIValue *argv, int argc) { SIValue AR_TAIL(SIValue *argv, int argc) { assert(argc == 1); SIValue value = argv[0]; - if(value.type == T_NULL) return SI_NullVal(); - assert(value.type == T_ARRAY); + if(SI_TYPE(value) == T_NULL) return SI_NullVal(); + assert(SI_TYPE(value) == T_ARRAY); uint arrayLen = SIArray_Length(value); SIValue array = SI_Array(arrayLen); if(arrayLen < 2) return array; @@ -187,13 +197,13 @@ void Register_ListFuncs() { AR_RegFunc(func_desc); types = array_new(SIType, 2); - types = array_append(types, T_ARRAY); + types = array_append(types, T_ARRAY | T_NULL); types = array_append(types, T_INT64 | T_NULL); func_desc = AR_FuncDescNew("subscript", AR_SUBSCRIPT, 2, 2, types, true); AR_RegFunc(func_desc); types = array_new(SIType, 3); - types = array_append(types, T_ARRAY); + types = array_append(types, T_ARRAY | T_NULL); types = array_append(types, T_INT64 | T_NULL); types = array_append(types, T_INT64 | T_NULL); func_desc = AR_FuncDescNew("slice", AR_SLICE, 3, 3, types, true); @@ -202,28 +212,29 @@ void Register_ListFuncs() { types = array_new(SIType, 3); types = array_append(types, T_INT64); types = array_append(types, T_INT64); - types = array_append(types, T_INT64 | T_NULL); + types = array_append(types, T_INT64); func_desc = AR_FuncDescNew("range", AR_RANGE, 2, 3, types, true); AR_RegFunc(func_desc); types = array_new(SIType, 2); types = array_append(types, SI_ALL); - types = array_append(types, T_ARRAY); + types = array_append(types, T_ARRAY | T_NULL); func_desc = AR_FuncDescNew("in", AR_IN, 2, 2, types, true); AR_RegFunc(func_desc); types = array_new(SIType, 1); - types = array_append(types, T_ARRAY); + types = array_append(types, T_ARRAY | T_NULL); func_desc = AR_FuncDescNew("size", AR_SIZE, 1, 1, types, true); AR_RegFunc(func_desc); types = array_new(SIType, 1); - types = array_append(types, T_ARRAY); + types = array_append(types, T_ARRAY | T_NULL); func_desc = AR_FuncDescNew("head", AR_HEAD, 1, 1, types, true); AR_RegFunc(func_desc); types = array_new(SIType, 1); - types = array_append(types, T_ARRAY); + types = array_append(types, T_ARRAY | T_NULL); func_desc = AR_FuncDescNew("tail", AR_TAIL, 1, 1, types, true); AR_RegFunc(func_desc); } + diff --git a/src/arithmetic/path_funcs/path_funcs.c b/src/arithmetic/path_funcs/path_funcs.c index 05475fbb0e..8d5a54de11 100644 --- a/src/arithmetic/path_funcs/path_funcs.c +++ b/src/arithmetic/path_funcs/path_funcs.c @@ -23,9 +23,17 @@ SIValue AR_TOPATH(SIValue *argv, int argc) { const cypher_astnode_t *ast_path = argv[0].ptrval; uint nelements = cypher_ast_pattern_path_nelements(ast_path); assert(argc == (nelements + 1)); + SIValue path = SIPathBuilder_New(nelements); for(uint i = 0; i < nelements; i++) { SIValue element = argv[i + 1]; + if(SI_TYPE(element) == T_NULL) { + /* If any element of the path does not exist, the entire path is invalid. + * Free it and return a null value. */ + SIValue_Free(path); + return SI_NullVal(); + } + if(i % 2 == 0) { // Nodes are in even position. SIPathBuilder_AppendNode(path, element); @@ -34,7 +42,7 @@ SIValue AR_TOPATH(SIValue *argv, int argc) { const cypher_astnode_t *ast_rel_pattern = cypher_ast_pattern_path_get_element(ast_path, i); bool RTL_pattern = cypher_ast_rel_pattern_get_direction(ast_rel_pattern) == CYPHER_REL_INBOUND; // Element type can be either edge, or path. - if(element.type == T_EDGE) SIPathBuilder_AppendEdge(path, element, RTL_pattern); + if(SI_TYPE(element) == T_EDGE) SIPathBuilder_AppendEdge(path, element, RTL_pattern); // If element is not an edge, it is a path. else { /* Path with 0 edges should not be appended. Their source and destination nodes are the same, @@ -55,14 +63,17 @@ SIValue AR_TOPATH(SIValue *argv, int argc) { } SIValue AR_PATH_NODES(SIValue *argv, int argc) { + if(SI_TYPE(argv[0]) == T_NULL) return SI_NullVal(); return SIPath_Nodes(argv[0]); } SIValue AR_PATH_RELATIONSHIPS(SIValue *argv, int argc) { + if(SI_TYPE(argv[0]) == T_NULL) return SI_NullVal(); return SIPath_Relationships(argv[0]); } SIValue AR_PATH_LENGTH(SIValue *argv, int argc) { + if(SI_TYPE(argv[0]) == T_NULL) return SI_NullVal(); return SI_LongVal(SIPath_Length(argv[0])); } @@ -72,22 +83,23 @@ void Register_PathFuncs() { types = array_new(SIType, 2); types = array_append(types, T_PTR); - types = array_append(types, T_NODE | T_EDGE | T_PATH); + types = array_append(types, T_NULL | T_NODE | T_EDGE | T_PATH); func_desc = AR_FuncDescNew("topath", AR_TOPATH, 1, VAR_ARG_LEN, types, false); AR_RegFunc(func_desc); types = array_new(SIType, 1); - types = array_append(types, T_PATH); + types = array_append(types, T_NULL | T_PATH); func_desc = AR_FuncDescNew("nodes", AR_PATH_NODES, 1, 1, types, false); AR_RegFunc(func_desc); types = array_new(SIType, 1); - types = array_append(types, T_PATH); + types = array_append(types, T_NULL | T_PATH); func_desc = AR_FuncDescNew("relationships", AR_PATH_RELATIONSHIPS, 1, 1, types, false); AR_RegFunc(func_desc); types = array_new(SIType, 1); - types = array_append(types, T_PATH); + types = array_append(types, T_NULL | T_PATH); func_desc = AR_FuncDescNew("length", AR_PATH_LENGTH, 1, 1, types, false); AR_RegFunc(func_desc); } + diff --git a/src/ast/ast_build_filter_tree.c b/src/ast/ast_build_filter_tree.c index debf0f1a53..057c649f37 100644 --- a/src/ast/ast_build_filter_tree.c +++ b/src/ast/ast_build_filter_tree.c @@ -319,6 +319,8 @@ FT_FilterNode *AST_BuildFilterTree(AST *ast) { if(match_clauses) { uint match_count = array_len(match_clauses); for(uint i = 0; i < match_count; i ++) { + // Optional match clauses are handled separately. + if(cypher_ast_match_is_optional(match_clauses[i])) continue; const cypher_astnode_t *pattern = cypher_ast_match_get_pattern(match_clauses[i]); _AST_ConvertGraphPatternToFilter(ast, &filter_tree, pattern); const cypher_astnode_t *predicate = cypher_ast_match_get_predicate(match_clauses[i]); @@ -362,3 +364,4 @@ FT_FilterNode *AST_BuildFilterTree(AST *ast) { return filter_tree; } + diff --git a/src/ast/ast_mock.c b/src/ast/ast_mock.c index 2401cc3f76..b0ca80ea65 100644 --- a/src/ast/ast_mock.c +++ b/src/ast/ast_mock.c @@ -8,24 +8,43 @@ #include "../query_ctx.h" #include "../util/rmalloc.h" -AST *AST_MockMatchPattern(AST *master_ast, const cypher_astnode_t *original_path) { +AST *AST_MockMatchClause(AST *master_ast, cypher_astnode_t *node, bool node_is_path) { + /* The AST node and its children must be reused directly, + * as cloning causes annotations (entity names) to be lost. + * TODO consider updating parser to improve this. */ AST *ast = rm_malloc(sizeof(AST)); ast->referenced_entities = master_ast->referenced_entities; ast->anot_ctx_collection = master_ast->anot_ctx_collection; ast->free_root = true; + ast->skip = NULL; ast->limit = NULL; - struct cypher_input_range range = {}; + cypher_astnode_t *pattern; + struct cypher_input_range range = {0}; + const cypher_astnode_t *predicate = NULL; + uint child_count = (node_is_path) ? 1 : cypher_astnode_nchildren(node); + cypher_astnode_t *children[child_count]; - // Reuse the input path directly. We cannot clone, as this causes annotations (entity names) to be lost. - // TODO consider updating parser to improve this. - cypher_astnode_t *path = (cypher_astnode_t *)original_path; + if(node_is_path) { + /* MERGE clauses and path filters are comprised of a single path. + * Construct a pattern node containing just this path. */ + pattern = cypher_ast_pattern(&node, 1, &node, 1, range); + children[0] = pattern; + } else { + /* OPTIONAL MATCH clauses contain a pattern node + * and possibly a predicate node (containing WHERE conditions). */ + assert(cypher_astnode_type(node) == CYPHER_AST_MATCH); + pattern = (cypher_astnode_t *)cypher_ast_match_get_pattern(node); + predicate = cypher_ast_match_get_predicate(node); - // Build a pattern comprised of the input path. - cypher_astnode_t *pattern = cypher_ast_pattern(&path, 1, &path, 1, range); + // Explicitly collect all child nodes from the clause. + for(uint i = 0; i < child_count; i ++) { + children[i] = (cypher_astnode_t *)cypher_astnode_get_child(node, i); + } + } // Build a new match clause that holds this pattern. - cypher_astnode_t *match_clause = cypher_ast_match(false, pattern, NULL, 0, NULL, &pattern, 1, - range); + cypher_astnode_t *match_clause = cypher_ast_match(false, pattern, NULL, 0, predicate, + children, child_count, range); // Build a query node holding this clause. ast->root = cypher_ast_query(NULL, 0, &match_clause, 1, &match_clause, 1, range); @@ -35,14 +54,16 @@ AST *AST_MockMatchPattern(AST *master_ast, const cypher_astnode_t *original_path return ast; } -void AST_MockFree(AST *ast) { - /* When freeing the mock AST, we have to be careful to not free the shared path +void AST_MockFree(AST *ast, bool free_pattern) { + /* When freeing the mock AST, we have to be careful to not free any shared node * or its annotations. We'll free every surrounding layer explicitly - the MATCH - * pattern, the MATCH clause, and finally the AST root. */ + * pattern (if we constructed it), the MATCH clause, and finally the AST root. */ const cypher_astnode_t *clause = cypher_ast_query_get_clause(ast->root, 0); assert(cypher_astnode_type(clause) == CYPHER_AST_MATCH); - const cypher_astnode_t *pattern = cypher_ast_match_get_pattern(clause); - cypher_astnode_free((cypher_astnode_t *)pattern); + if(free_pattern) { + const cypher_astnode_t *pattern = cypher_ast_match_get_pattern(clause); + cypher_astnode_free((cypher_astnode_t *)pattern); + } cypher_astnode_free((cypher_astnode_t *)clause); cypher_astnode_free((cypher_astnode_t *)ast->root); rm_free(ast); diff --git a/src/ast/ast_mock.h b/src/ast/ast_mock.h index ae9a325af8..6de3ea065c 100644 --- a/src/ast/ast_mock.h +++ b/src/ast/ast_mock.h @@ -8,9 +8,9 @@ #include "ast.h" -// Build a temporary AST with one MATCH clause that holds the given path. -AST *AST_MockMatchPattern(AST *master_ast, const cypher_astnode_t *original_path); +// Build a temporary AST with one MATCH clause that holds the given path or pattern. +AST *AST_MockMatchClause(AST *master_ast, cypher_astnode_t *node, bool node_is_pattern); // Free a temporary AST. -void AST_MockFree(AST *ast); +void AST_MockFree(AST *ast, bool free_pattern); diff --git a/src/ast/ast_validations.c b/src/ast/ast_validations.c index 598a54ccc7..473e958d0a 100644 --- a/src/ast/ast_validations.c +++ b/src/ast/ast_validations.c @@ -617,13 +617,6 @@ static AST_Validation _Validate_MATCH_Clauses(const AST *ast, char **reason) { uint match_count = array_len(match_clauses); for(uint i = 0; i < match_count; i ++) { const cypher_astnode_t *match_clause = match_clauses[i]; - // We currently do not support optional MATCH. - if(cypher_ast_match_is_optional(match_clause)) { - asprintf(reason, "RedisGraph does not support OPTIONAL MATCH."); - res = AST_INVALID; - goto cleanup; - } - // Validate the pattern described by the MATCH clause res = _ValidatePattern(projections, cypher_ast_match_get_pattern(match_clause), edge_aliases, reason); @@ -1022,12 +1015,14 @@ static AST_Validation _ValidateQuerySequence(const AST *ast, char **reason) { return AST_VALID; } -// In any given query scope, reading clauses (MATCH, UNWIND, and InQueryCall) -// cannot follow updating clauses (CREATE, MERGE, DELETE, SET, REMOVE). -// https://s3.amazonaws.com/artifacts.opencypher.org/railroad/SinglePartQuery.html +/* In any given query scope, reading clauses (MATCH, UNWIND, and InQueryCall) + * cannot follow updating clauses (CREATE, MERGE, DELETE, SET, REMOVE). + * https://s3.amazonaws.com/artifacts.opencypher.org/railroad/SinglePartQuery.html + * Additionally, a MATCH clause cannot follow an OPTIONAL MATCH clause. */ static AST_Validation _ValidateClauseOrder(const AST *ast, char **reason) { uint clause_count = cypher_ast_query_nclauses(ast->root); + bool encountered_optional_match = false; bool encountered_updating_clause = false; for(uint i = 0; i < clause_count; i ++) { const cypher_astnode_t *clause = cypher_ast_query_get_clause(ast->root, i); @@ -1043,6 +1038,17 @@ static AST_Validation _ValidateClauseOrder(const AST *ast, char **reason) { cypher_astnode_typestr(type)); return AST_INVALID; } + + if(type == CYPHER_AST_MATCH) { + // Check whether this match is optional. + bool current_clause_is_optional = cypher_ast_match_is_optional(clause); + // If the current clause is non-optional but we have already encountered an optional match, emit an error. + if(!current_clause_is_optional && encountered_optional_match) { + asprintf(reason, "A WITH clause is required to introduce a MATCH clause after an OPTIONAL MATCH."); + return AST_INVALID; + } + encountered_optional_match |= current_clause_is_optional; + } } return AST_VALID; @@ -1679,3 +1685,4 @@ AST_Validation AST_Validate_QueryParams(RedisModuleCtx *ctx, const cypher_parse_ return AST_VALID; } + diff --git a/src/execution_plan/execution_plan.c b/src/execution_plan/execution_plan.c index 87e21d5908..846f374ba6 100644 --- a/src/execution_plan/execution_plan.c +++ b/src/execution_plan/execution_plan.c @@ -591,6 +591,38 @@ static void _buildMergeOp(GraphContext *gc, AST *ast, ExecutionPlan *plan, array_free(arguments); } +static void _buildOptionalMatchOps(ExecutionPlan *plan, const cypher_astnode_t *clause) { + OpBase *optional = NewOptionalOp(plan); + const char **arguments = NULL; + // The root will be non-null unless the first clause is an OPTIONAL MATCH. + if(plan->root) { + // Collect the variables that are bound at this point. + rax *bound_vars = raxNew(); + // Rather than cloning the record map, collect the bound variables along with their + // parser-generated constant strings. + ExecutionPlan_BoundVariables(plan->root, bound_vars); + // Collect the variable names from bound_vars to populate the Argument op we will build. + arguments = (const char **)raxValues(bound_vars); + raxFree(bound_vars); + + // Create an Apply operator and make it the new root. + OpBase *apply_op = NewApplyOp(plan); + _ExecutionPlan_UpdateRoot(plan, apply_op); + + // Create an Optional op and add it as an Apply child as a right-hand stream. + ExecutionPlan_AddOp(apply_op, optional); + } + + // Build the new Match stream and add it to the Optional stream. + OpBase *match_stream = ExecutionPlan_BuildOpsFromPath(plan, arguments, clause); + ExecutionPlan_AddOp(optional, match_stream); + + // If no root has been set (OPTIONAL was the first clause), set it to the Optional op. + if(!plan->root) _ExecutionPlan_UpdateRoot(plan, optional); + + array_free(arguments); +} + static inline void _buildUpdateOp(ExecutionPlan *plan, const cypher_astnode_t *clause) { EntityUpdateEvalCtx *update_exps = AST_PrepareUpdateOp(clause); OpBase *op = NewUpdateOp(plan, update_exps); @@ -608,6 +640,10 @@ static void _ExecutionPlanSegment_ConvertClause(GraphContext *gc, AST *ast, Exec cypher_astnode_type_t t = cypher_astnode_type(clause); // Because 't' is set using the offsetof() call, it cannot be used in switch statements. if(t == CYPHER_AST_MATCH) { + if(cypher_ast_match_is_optional(clause)) { + _buildOptionalMatchOps(plan, clause); + return; + } // Only add at most one set of traversals per plan. TODO Revisit and improve this logic. if(plan->root && ExecutionPlan_LocateOpMatchingType(plan->root, SCAN_OPS, SCAN_OP_COUNT)) { return; diff --git a/src/execution_plan/execution_plan_modify.c b/src/execution_plan/execution_plan_modify.c index 2b7834bd81..7e9479670d 100644 --- a/src/execution_plan/execution_plan_modify.c +++ b/src/execution_plan/execution_plan_modify.c @@ -306,7 +306,7 @@ void ExecutionPlan_BindPlanToOps(ExecutionPlan *plan, OpBase *root) { } OpBase *ExecutionPlan_BuildOpsFromPath(ExecutionPlan *plan, const char **bound_vars, - const cypher_astnode_t *path) { + const cypher_astnode_t *node) { // Initialize an ExecutionPlan that shares this plan's Record mapping. ExecutionPlan *match_stream_plan = ExecutionPlan_NewEmptyExecutionPlan(); match_stream_plan->record_map = plan->record_map; @@ -316,11 +316,19 @@ OpBase *ExecutionPlan_BuildOpsFromPath(ExecutionPlan *plan, const char **bound_v AST *ast = QueryCtx_GetAST(); // Build a temporary AST holding a MATCH clause. - AST *match_stream_ast = AST_MockMatchPattern(ast, path); + cypher_astnode_type_t type = cypher_astnode_type(node); + + /* The AST node we're building a mock MATCH clause for will be a path + * if we're converting a MERGE clause or WHERE filter, and we must build + * and later free a CYPHER_AST_PATTERN node to contain it. + * If instead we're converting an OPTIONAL MATCH, the node is itself a MATCH clause, + * and we will reuse its CYPHER_AST_PATTERN node rather than building a new one. */ + bool node_is_path = (type == CYPHER_AST_PATTERN_PATH || type == CYPHER_AST_NAMED_PATH); + AST *match_stream_ast = AST_MockMatchClause(ast, (cypher_astnode_t *)node, node_is_path); ExecutionPlan_PopulateExecutionPlan(match_stream_plan); - AST_MockFree(match_stream_ast); + AST_MockFree(match_stream_ast, node_is_path); QueryCtx_SetAST(ast); // Reset the AST. // Add filter ops to sub-ExecutionPlan. if(match_stream_plan->filter_tree) ExecutionPlan_PlaceFilterOps(match_stream_plan, NULL); diff --git a/src/execution_plan/ops/op.h b/src/execution_plan/ops/op.h index 4d06651fa6..df6d8741aa 100644 --- a/src/execution_plan/ops/op.h +++ b/src/execution_plan/ops/op.h @@ -50,6 +50,7 @@ typedef enum { OpType_ANTI_SEMI_APPLY, OPType_OR_APPLY_MULTIPLEXER, OPType_AND_APPLY_MULTIPLEXER, + OPType_OPTIONAL, } OPType; typedef enum { diff --git a/src/execution_plan/ops/op_all_node_scan.c b/src/execution_plan/ops/op_all_node_scan.c index 1ee1589e93..db883ab72f 100644 --- a/src/execution_plan/ops/op_all_node_scan.c +++ b/src/execution_plan/ops/op_all_node_scan.c @@ -70,8 +70,8 @@ static Record AllNodeScanConsumeFromChild(OpBase *opBase) { Record r = OpBase_CloneRecord(op->child_record); // Populate the Record with the graph entity data. - Node *n = Record_GetNode(r, op->nodeRecIdx); - n->entity = en; + Node n = { .entity = en }; + Record_AddNode(r, op->nodeRecIdx, n); return r; } @@ -83,8 +83,8 @@ static Record AllNodeScanConsume(OpBase *opBase) { if(en == NULL) return NULL; Record r = OpBase_CreateRecord((OpBase *)op); - Node *n = Record_GetNode(r, op->nodeRecIdx); - n->entity = en; + Node n = { .entity = en }; + Record_AddNode(r, op->nodeRecIdx, n); return r; } @@ -113,3 +113,4 @@ static void AllNodeScanFree(OpBase *ctx) { op->child_record = NULL; } } + diff --git a/src/execution_plan/ops/op_apply.c b/src/execution_plan/ops/op_apply.c index 27482e75e9..17c581ab62 100644 --- a/src/execution_plan/ops/op_apply.c +++ b/src/execution_plan/ops/op_apply.c @@ -7,97 +7,91 @@ #include "op_apply.h" /* Forward declarations. */ +static OpResult ApplyInit(OpBase *opBase); static Record ApplyConsume(OpBase *opBase); static OpResult ApplyReset(OpBase *opBase); static void ApplyFree(OpBase *opBase); OpBase *NewApplyOp(const ExecutionPlan *plan) { Apply *op = rm_malloc(sizeof(Apply)); - op->init = true; - op->rhs_idx = 0; - op->lhs_record = NULL; - op->rhs_records = array_new(Record, 32); + op->r = NULL; + op->op_arg = NULL; + op->bound_branch = NULL; + op->rhs_branch = NULL; // Set our Op operations - OpBase_Init((OpBase *)op, OPType_APPLY, "Apply", NULL, ApplyConsume, ApplyReset, NULL, NULL, + OpBase_Init((OpBase *)op, OPType_APPLY, "Apply", ApplyInit, ApplyConsume, ApplyReset, NULL, NULL, ApplyFree, false, plan); return (OpBase *)op; } -static Record ApplyConsume(OpBase *opBase) { - Apply *op = (Apply *)opBase; - - assert(op->op.childCount == 2); +static OpResult ApplyInit(OpBase *opBase) { + assert(opBase->childCount == 2); - Record rhs_record; - - if(op->init) { - // On the first call to ApplyConsume, we'll read the entirety of the - // right-hand stream and buffer its outputs. - op->init = false; + Apply *op = (Apply *)opBase; + /* The op's bound branch and optional match branch have already been built as + * the Apply op's first and second child ops, respectively. */ + op->bound_branch = opBase->children[0]; + op->rhs_branch = opBase->children[1]; - OpBase *rhs = op->op.children[1]; - while((rhs_record = rhs->consume(rhs))) { - op->rhs_records = array_append(op->rhs_records, rhs_record); - } - } + // Locate branch's Argument op tap. + op->op_arg = (Argument *)ExecutionPlan_LocateOp(op->rhs_branch, OPType_ARGUMENT); + assert(op->op_arg); - uint rhs_count = array_len(op->rhs_records); + return OP_OK; +} - OpBase *lhs = op->op.children[0]; - if(op->lhs_record == NULL) { - // No pending data from left-hand stream - op->rhs_idx = 0; - op->lhs_record = lhs->consume(lhs); - } +static Record ApplyConsume(OpBase *opBase) { + Apply *op = (Apply *)opBase; - if(op->lhs_record == NULL) { - // Left-hand stream has been fully depleted - return NULL; - } + while(true) { + if(op->r == NULL) { + // Retrieve a Record from the bound branch if we're not currently holding one. + op->r = OpBase_Consume(op->bound_branch); + if(!op->r) return NULL; // Bound branch and this op are depleted. - // Clone the left-hand record - Record r = OpBase_CloneRecord(op->lhs_record); + // Successfully pulled a new Record, propagate to the top of the RHS branch. + Argument_AddRecord(op->op_arg, OpBase_CloneRecord(op->r)); + } - // No records were produced by the right-hand stream - if(rhs_count == 0) return r; + // Pull a Record from the RHS branch. + Record rhs_record = OpBase_Consume(op->rhs_branch); + + if(rhs_record == NULL) { + /* RHS branch depleted for the current bound Record; + * free it and loop back to retrieve a new one. */ + OpBase_DeleteRecord(op->r); + op->r = NULL; + // Reset the RHS branch. + OpBase_PropagateReset(op->rhs_branch); + continue; + } - rhs_record = op->rhs_records[op->rhs_idx++]; + // Clone the bound Record and merge the RHS Record into it. + Record r = OpBase_CloneRecord(op->r); + Record_Merge(&r, rhs_record); - if(op->rhs_idx == rhs_count) { - // We've joined all data from the right-hand stream with the current - // retrieval from the left-hand stream. - // The next call to ApplyConsume will attempt to pull new data. - OpBase_DeleteRecord(op->lhs_record); - op->lhs_record = NULL; - op->rhs_idx = 0; + return r; } - Record_Merge(&r, rhs_record); - - return r; + return NULL; } static OpResult ApplyReset(OpBase *opBase) { Apply *op = (Apply *)opBase; - op->init = true; + if(op->r) { + OpBase_DeleteRecord(op->r); + op->r = NULL; + } return OP_OK; } static void ApplyFree(OpBase *opBase) { Apply *op = (Apply *)opBase; - if(op->lhs_record) { - OpBase_DeleteRecord(op->lhs_record); - op->lhs_record = NULL; - } - if(op->rhs_records) { - uint len = array_len(op->rhs_records); - for(uint i = 0; i < len; i ++) { - OpBase_DeleteRecord(op->rhs_records[i]); - } - array_free(op->rhs_records); - op->rhs_records = NULL; + if(op->r) { + OpBase_DeleteRecord(op->r); + op->r = NULL; } } diff --git a/src/execution_plan/ops/op_apply.h b/src/execution_plan/ops/op_apply.h index e2cf5c8384..afb2347e08 100644 --- a/src/execution_plan/ops/op_apply.h +++ b/src/execution_plan/ops/op_apply.h @@ -7,15 +7,17 @@ #pragma once #include "op.h" +#include "op_argument.h" #include "../execution_plan.h" typedef struct { OpBase op; - bool init; - uint rhs_idx; - Record lhs_record; - Record *rhs_records; + Record r; // Bound branch record. + OpBase *bound_branch; // Bound branch. + OpBase *rhs_branch; // Right-hand branch. + Argument *op_arg; // Right-hand branch tap. } Apply; // OpBase* NewApplyOp(uint *modifies); OpBase *NewApplyOp(const ExecutionPlan *plan); + diff --git a/src/execution_plan/ops/op_cond_var_len_traverse.c b/src/execution_plan/ops/op_cond_var_len_traverse.c index 693c1161ef..fa57cc88be 100644 --- a/src/execution_plan/ops/op_cond_var_len_traverse.c +++ b/src/execution_plan/ops/op_cond_var_len_traverse.c @@ -144,7 +144,7 @@ static Record CondVarLenTraverseConsume(OpBase *opBase) { if(op->edgesIdx >= 0) { // If we're returning a new path from a previously-used Record, // free the previous path to avoid a memory leak. - if(reused_record) SIValue_Free(Record_GetScalar(op->r, op->edgesIdx)); + if(reused_record) SIValue_Free(Record_Get(op->r, op->edgesIdx)); // Add new path to Record. Record_AddScalar(op->r, op->edgesIdx, SI_Path(p)); } diff --git a/src/execution_plan/ops/op_conditional_traverse.c b/src/execution_plan/ops/op_conditional_traverse.c index 1ee8d15bc7..8076692f05 100644 --- a/src/execution_plan/ops/op_conditional_traverse.c +++ b/src/execution_plan/ops/op_conditional_traverse.c @@ -199,7 +199,15 @@ static Record CondTraverseConsume(OpBase *opBase) { // Ask child operations for data. for(op->recordsLen = 0; op->recordsLen < op->recordsCap; op->recordsLen++) { Record childRecord = OpBase_Consume(child); + // If the Record is NULL, the child has been depleted. if(!childRecord) break; + if(!Record_GetNode(childRecord, op->srcNodeIdx)) { + /* The child Record may not contain the source node in scenarios like + * a failed OPTIONAL MATCH. In this case, delete the Record and try again. */ + OpBase_DeleteRecord(childRecord); + op->recordsLen--; + continue; + } // Store received record. Record_PersistScalars(childRecord); op->records[op->recordsLen] = childRecord; @@ -213,8 +221,9 @@ static Record CondTraverseConsume(OpBase *opBase) { /* Get node from current column. */ op->r = op->records[src_id]; - Node *destNode = Record_GetNode(op->r, op->destNodeIdx); - Graph_GetNode(op->graph, dest_id, destNode); + Node destNode = {0}; + Graph_GetNode(op->graph, dest_id, &destNode); + Record_AddNode(op->r, op->destNodeIdx, destNode); if(op->setEdge) { _CondTraverse_CollectEdges(op, op->destNodeIdx, op->srcNodeIdx); diff --git a/src/execution_plan/ops/op_create.c b/src/execution_plan/ops/op_create.c index 95a1158b41..65cabb33fb 100644 --- a/src/execution_plan/ops/op_create.c +++ b/src/execution_plan/ops/op_create.c @@ -48,10 +48,14 @@ static void _CreateNodes(OpCreate *op, Record r) { QGNode *n = op->pending.nodes_to_create[i].node; /* Create a new node. */ - Node *newNode = Record_GetNode(r, op->pending.nodes_to_create[i].node_idx); - newNode->entity = NULL; - newNode->label = n->label; - newNode->labelID = n->labelID; + Node newNode = { + .entity = NULL, + .label = n->label, + .labelID = n->labelID + }; + + /* Add new node to Record and save a reference to it. */ + Node *node_ref = Record_AddNode(r, op->pending.nodes_to_create[i].node_idx, newNode); /* Convert query-level properties. */ PropertyMap *map = op->pending.nodes_to_create[i].properties; @@ -59,7 +63,7 @@ static void _CreateNodes(OpCreate *op, Record r) { if(map) converted_properties = ConvertPropertyMap(r, map); /* Save node for later insertion. */ - op->pending.created_nodes = array_append(op->pending.created_nodes, newNode); + op->pending.created_nodes = array_append(op->pending.created_nodes, node_ref); /* Save properties to insert with node. */ op->pending.node_properties = array_append(op->pending.node_properties, converted_properties); @@ -76,12 +80,20 @@ static void _CreateEdges(OpCreate *op, Record r) { /* Retrieve source and dest nodes. */ Node *src_node = Record_GetNode(r, op->pending.edges_to_create[i].src_idx); Node *dest_node = Record_GetNode(r, op->pending.edges_to_create[i].dest_idx); + /* Verify that the endpoints of the new edge resolved properly; fail otherwise. */ + if(!src_node || !dest_node) { + char *error; + asprintf(&error, "Failed to create relationship; endpoint was not found."); + QueryCtx_SetError(error); + QueryCtx_RaiseRuntimeException(); + } /* Create the actual edge. */ - Edge *newEdge = Record_GetEdge(r, op->pending.edges_to_create[i].edge_idx); - if(array_len(e->reltypes) > 0) newEdge->relationship = e->reltypes[0]; - Edge_SetSrcNode(newEdge, src_node); - Edge_SetDestNode(newEdge, dest_node); + Edge newEdge = {0}; + if(array_len(e->reltypes) > 0) newEdge.relationship = e->reltypes[0]; + Edge_SetSrcNode(&newEdge, src_node); + Edge_SetDestNode(&newEdge, dest_node); + Edge *edge_ref = Record_AddEdge(r, op->pending.edges_to_create[i].edge_idx, newEdge); /* Convert query-level properties. */ PropertyMap *map = op->pending.edges_to_create[i].properties; @@ -89,7 +101,7 @@ static void _CreateEdges(OpCreate *op, Record r) { if(map) converted_properties = ConvertPropertyMap(r, map); /* Save edge for later insertion. */ - op->pending.created_edges = array_append(op->pending.created_edges, newEdge); + op->pending.created_edges = array_append(op->pending.created_edges, edge_ref); /* Save properties to insert with node. */ op->pending.edge_properties = array_append(op->pending.edge_properties, converted_properties); diff --git a/src/execution_plan/ops/op_delete.c b/src/execution_plan/ops/op_delete.c index 971218a730..cab793b2d8 100644 --- a/src/execution_plan/ops/op_delete.c +++ b/src/execution_plan/ops/op_delete.c @@ -78,13 +78,16 @@ static Record DeleteConsume(OpBase *opBase) { for(int i = 0; i < op->exp_count; i++) { AR_ExpNode *exp = op->exps[i]; SIValue value = AR_EXP_Evaluate(exp, r); + SIType type = SI_TYPE(value); /* Enqueue entities for deletion. */ - if(SI_TYPE(value) & T_NODE) { + if(type & T_NODE) { Node *n = (Node *)value.ptrval; op->deleted_nodes = array_append(op->deleted_nodes, *n); - } else if(SI_TYPE(value) & T_EDGE) { + } else if(type & T_EDGE) { Edge *e = (Edge *)value.ptrval; op->deleted_edges = array_append(op->deleted_edges, *e); + } else if(type & T_NULL) { + continue; // Ignore null values. } else { /* Expression evaluated to a non-graph entity type * clear pending deletions and raise an exception. */ diff --git a/src/execution_plan/ops/op_expand_into.c b/src/execution_plan/ops/op_expand_into.c index 3c36578862..da26f4f2f7 100644 --- a/src/execution_plan/ops/op_expand_into.c +++ b/src/execution_plan/ops/op_expand_into.c @@ -103,7 +103,7 @@ OpBase *NewExpandIntoOp(const ExecutionPlan *plan, Graph *g, AlgebraicExpression op->recordCount = 0; op->edgeRelationTypes = NULL; op->recordsCap = 0; - op->records = rm_calloc(op->recordsCap, sizeof(Record)); + op->records = NULL; // Set our Op operations OpBase_Init((OpBase *)op, OPType_EXPAND_INTO, "Expand Into", ExpandIntoInit, ExpandIntoConsume, @@ -156,8 +156,6 @@ static Record _handoff(OpExpandInto *op) { // Current record resides at row recordCount. int rowIdx = op->recordCount; op->r = op->records[op->recordCount]; - assert(Record_GetType(op->r, op->srcNodeIdx) == REC_TYPE_NODE); - assert(Record_GetType(op->r, op->destNodeIdx) == REC_TYPE_NODE); srcNode = Record_GetNode(op->r, op->srcNodeIdx); destNode = Record_GetNode(op->r, op->destNodeIdx); srcId = ENTITY_GET_ID(srcNode); @@ -215,6 +213,14 @@ static Record ExpandIntoConsume(OpBase *opBase) { Record childRecord = OpBase_Consume(child); // Did not managed to get new data, break. if(!childRecord) break; + if(!Record_GetNode(childRecord, op->srcNodeIdx) || + !Record_GetNode(childRecord, op->destNodeIdx)) { + /* The child Record may not contain the source node in scenarios like + * a failed OPTIONAL MATCH. In this case, delete the Record and try again. */ + OpBase_DeleteRecord(childRecord); + op->recordCount--; + continue; + } // Store received record. op->records[op->recordCount] = childRecord; diff --git a/src/execution_plan/ops/op_index_scan.c b/src/execution_plan/ops/op_index_scan.c index a3c7436f00..dc6c2f5d2e 100644 --- a/src/execution_plan/ops/op_index_scan.c +++ b/src/execution_plan/ops/op_index_scan.c @@ -41,10 +41,11 @@ static OpResult IndexScanInit(OpBase *opBase) { } static inline void _UpdateRecord(IndexScan *op, Record r, EntityID node_id) { - // Get a pointer to a heap allocated node. - Node *n = Record_GetNode(r, op->nodeRecIdx); - // Update node's internal entity pointer. - assert(Graph_GetNode(op->g, node_id, n)); + // Populate the Record with the graph entity data. + Node n = {0}; + assert(Graph_GetNode(op->g, node_id, &n)); + // Get a pointer to the node's allocated space within the Record. + Record_AddNode(r, op->nodeRecIdx, n); } static Record IndexScanConsumeFromChild(OpBase *opBase) { diff --git a/src/execution_plan/ops/op_merge_create.c b/src/execution_plan/ops/op_merge_create.c index 90dbe05918..5f7b38fcd8 100644 --- a/src/execution_plan/ops/op_merge_create.c +++ b/src/execution_plan/ops/op_merge_create.c @@ -93,10 +93,13 @@ static bool _CreateEntities(OpMergeCreate *op, Record r) { QGNode *n = op->pending.nodes_to_create[i].node; /* Create a new node. */ - Node *newNode = Record_GetNode(r, op->pending.nodes_to_create[i].node_idx); - newNode->entity = NULL; - newNode->label = n->label; - newNode->labelID = n->labelID; + Node newNode = { + .entity = NULL, + .label = n->label, + .labelID = n->labelID + }; + /* Add new node to Record and save a reference to it. */ + Node *node_ref = Record_AddNode(r, op->pending.nodes_to_create[i].node_idx, newNode); /* Convert query-level properties. */ PropertyMap *map = op->pending.nodes_to_create[i].properties; @@ -107,7 +110,7 @@ static bool _CreateEntities(OpMergeCreate *op, Record r) { _IncrementalHashEntity(op->hash_state, n->label, converted_properties); /* Save node for later insertion. */ - op->pending.created_nodes = array_append(op->pending.created_nodes, newNode); + op->pending.created_nodes = array_append(op->pending.created_nodes, node_ref); /* Save properties to insert with node. */ op->pending.node_properties = array_append(op->pending.node_properties, converted_properties); @@ -122,11 +125,21 @@ static bool _CreateEntities(OpMergeCreate *op, Record r) { Node *src_node = Record_GetNode(r, op->pending.edges_to_create[i].src_idx); Node *dest_node = Record_GetNode(r, op->pending.edges_to_create[i].dest_idx); + /* Verify that the endpoints of the new edge resolved properly; fail otherwise. */ + if(!src_node || !dest_node) { + char *error; + asprintf(&error, "Failed to create relationship; endpoint was not found."); + QueryCtx_SetError(error); + QueryCtx_RaiseRuntimeException(); + } + /* Create the actual edge. */ - Edge *newEdge = Record_GetEdge(r, op->pending.edges_to_create[i].edge_idx); - if(array_len(e->reltypes) > 0) newEdge->relationship = e->reltypes[0]; - Edge_SetSrcNode(newEdge, src_node); - Edge_SetDestNode(newEdge, dest_node); + Edge newEdge = {0}; + if(array_len(e->reltypes) > 0) newEdge.relationship = e->reltypes[0]; + Edge_SetSrcNode(&newEdge, src_node); + Edge_SetDestNode(&newEdge, dest_node); + + Edge *edge_ref = Record_AddEdge(r, op->pending.edges_to_create[i].edge_idx, newEdge); /* Convert query-level properties. */ PropertyMap *map = op->pending.edges_to_create[i].properties; @@ -153,7 +166,7 @@ static bool _CreateEntities(OpMergeCreate *op, Record r) { } /* Save edge for later insertion. */ - op->pending.created_edges = array_append(op->pending.created_edges, newEdge); + op->pending.created_edges = array_append(op->pending.created_edges, edge_ref); /* Save properties to insert with node. */ op->pending.edge_properties = array_append(op->pending.edge_properties, converted_properties); diff --git a/src/execution_plan/ops/op_node_by_label_scan.c b/src/execution_plan/ops/op_node_by_label_scan.c index 3b01bd7e5f..7604713cce 100644 --- a/src/execution_plan/ops/op_node_by_label_scan.c +++ b/src/execution_plan/ops/op_node_by_label_scan.c @@ -89,10 +89,11 @@ static OpResult NodeByLabelScanInit(OpBase *opBase) { } static inline void _UpdateRecord(NodeByLabelScan *op, Record r, GrB_Index node_id) { - // Get a pointer to the node's allocated space within the Record. - Node *n = Record_GetNode(r, op->nodeRecIdx); // Populate the Record with the graph entity data. - Graph_GetNode(op->g, node_id, n); + Node n = {0}; + Graph_GetNode(op->g, node_id, &n); + // Get a pointer to the node's allocated space within the Record. + Record_AddNode(r, op->nodeRecIdx, n); } static inline void _ResetIterator(NodeByLabelScan *op) { diff --git a/src/execution_plan/ops/op_optional.c b/src/execution_plan/ops/op_optional.c new file mode 100644 index 0000000000..694f619877 --- /dev/null +++ b/src/execution_plan/ops/op_optional.c @@ -0,0 +1,48 @@ +/* + * Copyright 2018-2020 Redis Labs Ltd. and Contributors + * + * This file is available under the Redis Labs Source Available License Agreement + */ + +#include "op_optional.h" + +/* Forward declarations. */ +static Record OptionalConsume(OpBase *opBase); +static OpResult OptionalReset(OpBase *opBase); +static OpBase *OptionalClone(const ExecutionPlan *plan, const OpBase *opBase); + +OpBase *NewOptionalOp(const ExecutionPlan *plan) { + Optional *op = rm_malloc(sizeof(Optional)); + op->emitted_record = false; + + // Set our Op operations + OpBase_Init((OpBase *)op, OPType_OPTIONAL, "Optional", NULL, OptionalConsume, + OptionalReset, NULL, OptionalClone, NULL, false, plan); + + return (OpBase *)op; +} + +static Record OptionalConsume(OpBase *opBase) { + Optional *op = (Optional *)opBase; + // Try to produce a Record from the child op. + Record r = OpBase_Consume(opBase->children[0]); + + // Create an empty Record if the child returned NULL and this op has not yet returned data. + if(!r && !op->emitted_record) r = OpBase_CreateRecord(opBase); + + // Don't produce multiple empty Records. + op->emitted_record = true; + + return r; +} + +static OpResult OptionalReset(OpBase *opBase) { + Optional *op = (Optional *)opBase; + op->emitted_record = false; + return OP_OK; +} + +static inline OpBase *OptionalClone(const ExecutionPlan *plan, const OpBase *opBase) { + return NewOptionalOp(plan); +} + diff --git a/src/execution_plan/ops/op_optional.h b/src/execution_plan/ops/op_optional.h new file mode 100644 index 0000000000..25fe406ddb --- /dev/null +++ b/src/execution_plan/ops/op_optional.h @@ -0,0 +1,56 @@ +/* + * Copyright 2018-2020 Redis Labs Ltd. and Contributors + * + * This file is available under the Redis Labs Source Available License Agreement + */ + +#pragma once + +#include "op.h" +#include "../execution_plan.h" + +/* Optional is an operation that manages the output of its child op tree rather + * than producing or mutating data in itself. + * + * It attempts to pull a Record from its child op tree, which is typically + * comprised of scan and traversal operations that model a MATCH pattern in a query. + * + * If the child successfully produces data, all of its Records are passed to Optional's + * parent without modification. + * If the child fails to produce any data, Optional will instead emit a single empty Record. + * + * This allows for the expression of complex patterns in which not all elements need to be resolved, + * rather than the all-or-nothing matching logic traditionally used to resolve MATCH patterns. + * + * For example, the query: + * MATCH (a:A) OPTIONAL MATCH (a)-[e]->(b) RETURN a, e, b + * Produces the operation tree: + * Results + * | + * Project + * | + * Apply + * / \ + * LabelScan (a) Optional + * \ + * CondTraverse (a)-[e]->(b) + * \ + * Argument (a) + * + * We want to retrieve all 'a' nodes regardless of whether they have an outgoing edge. + * For each 'a' that does have at least one outgoing edge, all matching paths + * will be resolved by Optional's CondTraverse child and returned as they would + * be for a standard MATCH pattern. + * For each 'a' that lacks an outgoing edge, the CondTraverse fails to produce any data, + * and Optional instead emits one empty Record. + * Optional's parent Apply op merges the empty Record with the left-hand Record + * produced by the Label Scan, causing 'a' to be emitted unmodified with NULL entries + * representing the elements of the failed traversal. */ +typedef struct { + OpBase op; + bool emitted_record; // True if this operation has returned at least one Record. +} Optional; + +/* Creates a new Optional operation. */ +OpBase *NewOptionalOp(const ExecutionPlan *plan); + diff --git a/src/execution_plan/ops/op_update.c b/src/execution_plan/ops/op_update.c index 15682c2e62..fcd0aae226 100644 --- a/src/execution_plan/ops/op_update.c +++ b/src/execution_plan/ops/op_update.c @@ -53,7 +53,13 @@ static void _UpdateIndex(EntityUpdateCtx *ctx, GraphContext *gc, Schema *s, SIVa Schema_AddNodeToIndices(s, n, true); } -static void _UpdateNode(OpUpdate *op, EntityUpdateCtx *ctx) { +/* Set a property on a node. For non-NULL values, the property + * will be added or updated if it is already present. + * For NULL values, the property will be deleted if present + * and nothing will be done otherwise. + * Relevant indexes will be updated accordingly. + * Returns 1 if a property was set or deleted. */ +static int _UpdateNode(OpUpdate *op, EntityUpdateCtx *ctx) { /* Retrieve GraphEntity: * Due to Record freeing we can't maintain the original pointer to GraphEntity object, * but only a pointer to an Entity object, @@ -71,7 +77,8 @@ static void _UpdateNode(OpUpdate *op, EntityUpdateCtx *ctx) { SIValue *old_value = GraphEntity_GetProperty((GraphEntity *)node, ctx->attr_id); if(old_value == PROPERTY_NOTFOUND) { - // Add new property. + // Adding a new property; do nothing if its value is NULL. + if(SI_TYPE(ctx->new_value) == T_NULL) return 0; GraphEntity_AddProperty((GraphEntity *)node, ctx->attr_id, ctx->new_value); } else { // Update property. @@ -80,9 +87,15 @@ static void _UpdateNode(OpUpdate *op, EntityUpdateCtx *ctx) { // Update index for node entities. _UpdateIndex(ctx, op->gc, s, old_value, &ctx->new_value); + return 1; } -static void _UpdateEdge(OpUpdate *op, EntityUpdateCtx *ctx) { +/* Set a property on an edge. For non-NULL values, the property + * will be added or updated if it is already present. + * For NULL values, the property will be deleted if present + * and nothing will be done otherwise. + * Returns 1 if a property was set or deleted. */ +static int _UpdateEdge(OpUpdate *op, EntityUpdateCtx *ctx) { /* Retrieve GraphEntity: * Due to Record freeing we can't maintain the original pointer to GraphEntity object, * but only a pointer to an Entity object, @@ -96,16 +109,19 @@ static void _UpdateEdge(OpUpdate *op, EntityUpdateCtx *ctx) { SIValue *old_value = GraphEntity_GetProperty((GraphEntity *)edge, ctx->attr_id); if(old_value == PROPERTY_NOTFOUND) { - // Add new property. + // Adding a new property; do nothing if its value is NULL. + if(SI_TYPE(ctx->new_value) == T_NULL) return 0; GraphEntity_AddProperty((GraphEntity *)edge, ctx->attr_id, ctx->new_value); } else { // Update property. GraphEntity_SetProperty((GraphEntity *)edge, ctx->attr_id, ctx->new_value); } + return 1; } /* Executes delayed updates. */ static void _CommitUpdates(OpUpdate *op) { + uint properties_set = 0; for(uint i = 0; i < op->pending_updates_count; i++) { EntityUpdateCtx *ctx = &op->pending_updates[i]; // Map the attribute key if it has not been encountered before @@ -113,14 +129,14 @@ static void _CommitUpdates(OpUpdate *op) { ctx->attr_id = GraphContext_FindOrAddAttribute(op->gc, ctx->attribute); } if(ctx->entity_type == GETYPE_NODE) { - _UpdateNode(op, ctx); + properties_set += _UpdateNode(op, ctx); } else { - _UpdateEdge(op, ctx); + properties_set += _UpdateEdge(op, ctx); } SIValue_Free(ctx->new_value); } - if(op->stats) op->stats->properties_set += op->pending_updates_count; + if(op->stats) op->stats->properties_set += properties_set; } /* We only cache records if op_update is not the last @@ -181,13 +197,22 @@ static Record UpdateConsume(OpBase *opBase) { * for later execution. */ EntityUpdateEvalCtx *update_expression = op->update_expressions; for(uint i = 0; i < op->update_expressions_count; i++, update_expression++) { - SIValue new_value = SI_CloneValue(AR_EXP_Evaluate(update_expression->exp, r)); - // Make sure we're updating either a node or an edge. + // Get the type of the entity to update. RecordEntryType t = Record_GetType(r, update_expression->record_idx); - assert(t == REC_TYPE_NODE || t == REC_TYPE_EDGE); + // If the expected entity was not found, make no updates but do not error. + if(t == REC_TYPE_UNKNOWN) continue; + // Make sure we're updating either a node or an edge. + if(t != REC_TYPE_NODE && t != REC_TYPE_EDGE) { + char *error; + asprintf(&error, "Update error: alias '%s' did not resolve to a graph entity", + update_expression->alias); + QueryCtx_SetError(error); + QueryCtx_RaiseRuntimeException(); + } GraphEntityType type = (t == REC_TYPE_NODE) ? GETYPE_NODE : GETYPE_EDGE; - GraphEntity *entity = Record_GetGraphEntity(r, update_expression->record_idx); + + SIValue new_value = SI_CloneValue(AR_EXP_Evaluate(update_expression->exp, r)); _QueueUpdate(op, entity, type, update_expression->attribute, new_value); } diff --git a/src/execution_plan/ops/op_value_hash_join.c b/src/execution_plan/ops/op_value_hash_join.c index a4b02d66a1..8edb538b8b 100644 --- a/src/execution_plan/ops/op_value_hash_join.c +++ b/src/execution_plan/ops/op_value_hash_join.c @@ -21,8 +21,8 @@ static void ValueHashJoinFree(OpBase *opBase); /* Determins order between two records by inspecting * element stored at postion idx. */ static bool _record_islt(Record l, Record r, uint idx) { - SIValue lv = Record_GetScalar(l, idx); - SIValue rv = Record_GetScalar(r, idx); + SIValue lv = Record_Get(l, idx); + SIValue rv = Record_Get(r, idx); return SIValue_Compare(lv, rv, NULL) < 0; } @@ -39,7 +39,7 @@ static int64_t _binarySearchLeftmost(Record *array, int join_key_idx, SIValue v) int64_t right = recordCount; while(left < right) { pos = (right + left) / 2; - SIValue x = Record_GetScalar(array[pos], join_key_idx); + SIValue x = Record_Get(array[pos], join_key_idx); if(SIValue_Compare(x, v, NULL) < 0) left = pos + 1; else right = pos; } @@ -54,7 +54,7 @@ static int64_t _binarySearchRightmost(Record *array, int64_t array_len, int join int64_t right = array_len; while(left < right) { pos = (right + left) / 2; - SIValue x = Record_GetScalar(array[pos], join_key_idx); + SIValue x = Record_Get(array[pos], join_key_idx); if(SIValue_Compare(v, x, NULL) < 0) right = pos; else left = pos + 1; } @@ -89,7 +89,7 @@ static bool _set_intersection_idx(OpValueHashJoin *op, SIValue v) { // Make sure value was found. Record r = op->cached_records[leftmost_idx]; - SIValue x = Record_GetScalar(r, op->join_value_rec_idx); + SIValue x = Record_Get(r, op->join_value_rec_idx); if(SIValue_Compare(x, v, NULL) != 0) return false; // Value wasn't found. /* Value was found diff --git a/src/execution_plan/ops/ops.h b/src/execution_plan/ops/ops.h index fa1bfb530d..14e1b4a717 100644 --- a/src/execution_plan/ops/ops.h +++ b/src/execution_plan/ops/ops.h @@ -36,3 +36,5 @@ #include "op_merge_create.h" #include "op_semi_apply.h" #include "op_apply_multiplexer.h" +#include "op_optional.h" + diff --git a/src/execution_plan/record.c b/src/execution_plan/record.c index 9b7dbae3b9..3231b562c7 100644 --- a/src/execution_plan/record.c +++ b/src/execution_plan/record.c @@ -88,19 +88,32 @@ RecordEntryType Record_GetType(const Record r, int idx) { return r->entries[idx].type; } -SIValue Record_GetScalar(Record r, int idx) { - r->entries[idx].type = REC_TYPE_SCALAR; - return r->entries[idx].value.s; -} - Node *Record_GetNode(const Record r, int idx) { - r->entries[idx].type = REC_TYPE_NODE; - return &(r->entries[idx].value.n); + switch(r->entries[idx].type) { + case REC_TYPE_NODE: + return &(r->entries[idx].value.n); + case REC_TYPE_UNKNOWN: + return NULL; + case REC_TYPE_SCALAR: + // Null scalar values are expected here; otherwise fall through. + if(SIValue_IsNull(r->entries[idx].value.s)) return NULL; + default: + assert("encountered unexpected type in Record; expected Node" && false); + } } Edge *Record_GetEdge(const Record r, int idx) { - r->entries[idx].type = REC_TYPE_EDGE; - return &(r->entries[idx].value.e); + switch(r->entries[idx].type) { + case REC_TYPE_EDGE: + return &(r->entries[idx].value.e); + case REC_TYPE_UNKNOWN: + return NULL; + case REC_TYPE_SCALAR: + // Null scalar values are expected here; otherwise fall through. + if(SIValue_IsNull(r->entries[idx].value.s)) return NULL; + default: + assert("encountered unexpected type in Record; expected Edge" && false); + } } SIValue Record_Get(Record r, int idx) { @@ -111,7 +124,9 @@ SIValue Record_Get(Record r, int idx) { case REC_TYPE_EDGE: return SI_Edge(Record_GetEdge(r, idx)); case REC_TYPE_SCALAR: - return Record_GetScalar(r, idx); + return r->entries[idx].value.s; + case REC_TYPE_UNKNOWN: + return SI_NullVal(); default: assert(false); } @@ -124,10 +139,8 @@ GraphEntity *Record_GetGraphEntity(const Record r, int idx) { return (GraphEntity *)Record_GetNode(r, idx); case REC_TYPE_EDGE: return (GraphEntity *)Record_GetEdge(r, idx); - case REC_TYPE_SCALAR: - return (GraphEntity *)(Record_GetScalar(r, idx).ptrval); default: - assert(false); + assert(false && "encountered unexpected type when trying to retrieve graph entity"); } return NULL; } @@ -147,19 +160,22 @@ void Record_Add(Record r, int idx, SIValue v) { } } -void Record_AddScalar(Record r, int idx, SIValue v) { +SIValue *Record_AddScalar(Record r, int idx, SIValue v) { r->entries[idx].value.s = v; r->entries[idx].type = REC_TYPE_SCALAR; + return &(r->entries[idx].value.s); } -void Record_AddNode(Record r, int idx, Node node) { +Node *Record_AddNode(Record r, int idx, Node node) { r->entries[idx].value.n = node; r->entries[idx].type = REC_TYPE_NODE; + return &(r->entries[idx].value.n); } -void Record_AddEdge(Record r, int idx, Edge edge) { +Edge *Record_AddEdge(Record r, int idx, Edge edge) { r->entries[idx].value.e = edge; r->entries[idx].type = REC_TYPE_EDGE; + return &(r->entries[idx].value.e); } void Record_PersistScalars(Record r) { @@ -218,7 +234,7 @@ unsigned long long Record_Hash64(const Record r) { len = sizeof(id); break; case REC_TYPE_SCALAR: - si = Record_GetScalar(r, i); + si = Record_Get(r, i); switch(si.type) { case T_NULL: data = &_null; diff --git a/src/execution_plan/record.h b/src/execution_plan/record.h index 7f1820dbe0..e6982ee3dc 100644 --- a/src/execution_plan/record.h +++ b/src/execution_plan/record.h @@ -61,9 +61,6 @@ int Record_GetEntryIdx(Record r, const char *alias); // Get entry type. RecordEntryType Record_GetType(const Record r, int idx); -// Get a scalar from record at position idx. -SIValue Record_GetScalar(const Record r, int idx); - // Get a node from record at position idx. Node *Record_GetNode(const Record r, int idx); @@ -79,14 +76,14 @@ GraphEntity *Record_GetGraphEntity(const Record r, int idx); // Add a scalar, node, or edge to the record, depending on the SIValue type. void Record_Add(Record r, int idx, SIValue v); -// Add a scalar to record at position idx. -void Record_AddScalar(Record r, int idx, SIValue v); +// Add a scalar to record at position idx and return a reference to it. +SIValue *Record_AddScalar(Record r, int idx, SIValue v); -// Add a node to record at position idx. -void Record_AddNode(Record r, int idx, Node node); +// Add a node to record at position idx and return a reference to it. +Node *Record_AddNode(Record r, int idx, Node node); -// Add an edge to record at position idx. -void Record_AddEdge(Record r, int idx, Edge edge); +// Add an edge to record at position idx and return a reference to it. +Edge *Record_AddEdge(Record r, int idx, Edge edge); // Ensure that all scalar values in record are access-safe. void Record_PersistScalars(Record r); diff --git a/src/graph/query_graph.c b/src/graph/query_graph.c index addcdeb0a3..d9da8e7465 100644 --- a/src/graph/query_graph.c +++ b/src/graph/query_graph.c @@ -155,6 +155,8 @@ QueryGraph *BuildQueryGraph(const GraphContext *gc, const AST *ast) { if(match_clauses) { uint match_count = array_len(match_clauses); for(uint i = 0; i < match_count; i ++) { + // OPTIONAL MATCH clauses are handled separately. + if(cypher_ast_match_is_optional(match_clauses[i])) continue; const cypher_astnode_t *pattern = cypher_ast_match_get_pattern(match_clauses[i]); uint npaths = cypher_ast_pattern_npaths(pattern); for(uint j = 0; j < npaths; j ++) { diff --git a/src/resultset/formatters/resultset_formatter.h b/src/resultset/formatters/resultset_formatter.h index d6ec2d7ea8..3e5ee28895 100644 --- a/src/resultset/formatters/resultset_formatter.h +++ b/src/resultset/formatters/resultset_formatter.h @@ -14,8 +14,8 @@ typedef enum { COLUMN_UNKNOWN = 0, COLUMN_SCALAR = 1, - COLUMN_NODE = 2, - COLUMN_RELATION = 3, + COLUMN_NODE = 2, // Unused, retained for client compatibility. + COLUMN_RELATION = 3, // Unused, retained for client compatibility. } ColumnType; typedef enum { @@ -32,10 +32,12 @@ typedef enum { } ValueType; // Typedef for header formatters. -typedef void (*EmitHeaderFunc)(RedisModuleCtx *ctx, const char **columns, const Record r, uint *col_rec_map); +typedef void (*EmitHeaderFunc)(RedisModuleCtx *ctx, const char **columns, const Record r, + uint *col_rec_map); // Typedef for record formatters. -typedef void (*EmitRecordFunc)(RedisModuleCtx *ctx, GraphContext *gc, const Record r, uint numcols, uint *col_rec_map); +typedef void (*EmitRecordFunc)(RedisModuleCtx *ctx, GraphContext *gc, const Record r, uint numcols, + uint *col_rec_map); typedef struct { EmitRecordFunc EmitRecord; diff --git a/src/resultset/formatters/resultset_replycompact.c b/src/resultset/formatters/resultset_replycompact.c index 5bba433d43..bfd5f4e27b 100644 --- a/src/resultset/formatters/resultset_replycompact.c +++ b/src/resultset/formatters/resultset_replycompact.c @@ -229,56 +229,26 @@ void ResultSet_EmitCompactRecord(RedisModuleCtx *ctx, GraphContext *gc, const Re for(uint i = 0; i < numcols; i++) { uint idx = col_rec_map[i]; - switch(Record_GetType(r, idx)) { - case REC_TYPE_NODE: - _ResultSet_CompactReplyWithNode(ctx, gc, Record_GetNode(r, idx)); - break; - case REC_TYPE_EDGE: - _ResultSet_CompactReplyWithEdge(ctx, gc, Record_GetEdge(r, idx)); - break; - default: - RedisModule_ReplyWithArray(ctx, 2); // Reply with array with space for type and value - _ResultSet_CompactReplyWithSIValue(ctx, gc, Record_GetScalar(r, idx)); - } + RedisModule_ReplyWithArray(ctx, 2); // Reply with array with space for type and value + _ResultSet_CompactReplyWithSIValue(ctx, gc, Record_Get(r, idx)); } } -// For every column in the header, emit a 2-array that specifies -// the column alias followed by an enum denoting what type -// (scalar, node, or relation) it holds. +// For every column in the header, emit a 2-array containing the ColumnType enum +// followed by the column alias. void ResultSet_ReplyWithCompactHeader(RedisModuleCtx *ctx, const char **columns, const Record r, uint *col_rec_map) { uint columns_len = array_len(columns); RedisModule_ReplyWithArray(ctx, columns_len); for(uint i = 0; i < columns_len; i++) { RedisModule_ReplyWithArray(ctx, 2); - ColumnType t; - // First, emit the column type enum - if(r) { - RecordEntryType entry_type = Record_GetType(r, col_rec_map[i]); - switch(entry_type) { - case REC_TYPE_NODE: - t = COLUMN_NODE; - break; - case REC_TYPE_EDGE: - t = COLUMN_RELATION; - break; - case REC_TYPE_SCALAR: - case REC_TYPE_UNKNOWN: // Treat unknown as scalar. - t = COLUMN_SCALAR; - break; - default: - assert(false); - } - } else { - // If we didn't receive a record, no results will be returned, - // and the column types don't matter. - t = COLUMN_SCALAR; - } - + /* Because the types found in the first Record do not necessarily inform the types + * in subsequent records, we will always set the column type as scalar. */ + ColumnType t = COLUMN_SCALAR; RedisModule_ReplyWithLongLong(ctx, t); // Second, emit the identifier string associated with the column RedisModule_ReplyWithStringBuffer(ctx, columns[i], strlen(columns[i])); } } + diff --git a/src/resultset/formatters/resultset_replyverbose.c b/src/resultset/formatters/resultset_replyverbose.c index f030debf27..715670fd9f 100644 --- a/src/resultset/formatters/resultset_replyverbose.c +++ b/src/resultset/formatters/resultset_replyverbose.c @@ -19,7 +19,6 @@ static void _ResultSet_VerboseReplyWithPath(RedisModuleCtx *ctx, SIValue path); * and NULL values. */ static void _ResultSet_VerboseReplyWithSIValue(RedisModuleCtx *ctx, GraphContext *gc, const SIValue v) { - // Emit the actual value, then the value type (to facilitate client-side parsing) switch(SI_TYPE(v)) { case T_STRING: RedisModule_ReplyWithStringBuffer(ctx, v.stringval, strlen(v.stringval)); @@ -180,7 +179,7 @@ void ResultSet_EmitVerboseRecord(RedisModuleCtx *ctx, GraphContext *gc, const Re _ResultSet_VerboseReplyWithEdge(ctx, gc, Record_GetEdge(r, idx)); break; default: - _ResultSet_VerboseReplyWithSIValue(ctx, gc, Record_GetScalar(r, idx)); + _ResultSet_VerboseReplyWithSIValue(ctx, gc, Record_Get(r, idx)); } } } @@ -195,3 +194,4 @@ void ResultSet_ReplyWithVerboseHeader(RedisModuleCtx *ctx, const char **columns, RedisModule_ReplyWithStringBuffer(ctx, columns[i], strlen(columns[i])); } } + diff --git a/tests/flow/test_function_calls.py b/tests/flow/test_function_calls.py index f7b480385e..916eebed51 100644 --- a/tests/flow/test_function_calls.py +++ b/tests/flow/test_function_calls.py @@ -204,3 +204,41 @@ def test10_modulo_inputs(self): actual_result = graph.query(query) expected_result = [[-0.5]] self.env.assertEquals(actual_result.result_set, expected_result) + + # Aggregate functions should handle null inputs appropriately. + def test11_null_aggregate_function_inputs(self): + # SUM should sum all non-null inputs. + query = """UNWIND [1, NULL, 3] AS a RETURN sum(a)""" + actual_result = graph.query(query) + expected_result = [[4]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # SUM should return 0 given a fully NULL input. + query = """WITH NULL AS a RETURN sum(a)""" + actual_result = graph.query(query) + expected_result = [[0]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # COUNT should count all non-null inputs. + query = """UNWIND [1, NULL, 3] AS a RETURN count(a)""" + actual_result = graph.query(query) + expected_result = [[2]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # COUNT should return 0 given a fully NULL input. + query = """WITH NULL AS a RETURN count(a)""" + actual_result = graph.query(query) + expected_result = [[0]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # COLLECT should ignore null inputs. + query = """UNWIND [1, NULL, 3] AS a RETURN collect(a)""" + actual_result = graph.query(query) + expected_result = [[[1, 3]]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # COLLECT should return an empty array on all null inputs. + query = """WITH NULL AS a RETURN collect(a)""" + actual_result = graph.query(query) + expected_result = [[[]]] + self.env.assertEquals(actual_result.result_set, expected_result) diff --git a/tests/flow/test_list.py b/tests/flow/test_list.py index 007256a93c..1e9c0759a3 100644 --- a/tests/flow/test_list.py +++ b/tests/flow/test_list.py @@ -1,4 +1,5 @@ from RLTest import Env +import redis from redisgraph import Graph, Node, Edge from base import FlowTestsBase @@ -33,3 +34,68 @@ def test02_unwind(self): expected_result = [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9], [10]] self.env.assertEquals(result_set, expected_result) + + # List functions should handle null inputs appropriately. + def test03_null_list_function_inputs(self): + expected_result = [[None]] + + # NULL list argument to subscript. + query = """WITH NULL as list RETURN list[0]""" + actual_result = redis_graph.query(query) + self.env.assertEquals(actual_result.result_set, expected_result) + + # NULL list argument to slice. + query = """WITH NULL as list RETURN list[0..5]""" + actual_result = redis_graph.query(query) + self.env.assertEquals(actual_result.result_set, expected_result) + + # NULL list argument to HEAD. + query = """WITH NULL as list RETURN head(list)""" + actual_result = redis_graph.query(query) + self.env.assertEquals(actual_result.result_set, expected_result) + + # NULL list argument to TAIL. + query = """WITH NULL as list RETURN tail(list)""" + actual_result = redis_graph.query(query) + self.env.assertEquals(actual_result.result_set, expected_result) + + # NULL list argument to IN. + query = """WITH NULL as list RETURN 'val' in list""" + actual_result = redis_graph.query(query) + self.env.assertEquals(actual_result.result_set, expected_result) + + # NULL list argument to SIZE. + query = """WITH NULL as list RETURN size(list)""" + actual_result = redis_graph.query(query) + self.env.assertEquals(actual_result.result_set, expected_result) + + # NULL subscript argument. + query = """WITH ['a'] as list RETURN list[NULL]""" + actual_result = redis_graph.query(query) + self.env.assertEquals(actual_result.result_set, expected_result) + + # NULL IN non-empty list should return NULL. + query = """RETURN NULL in ['val']""" + actual_result = redis_graph.query(query) + expected_result = [[None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # NULL arguments to slice. + query = """WITH ['a'] as list RETURN list[0..NULL]""" + actual_result = redis_graph.query(query) + self.env.assertEquals(actual_result.result_set, expected_result) + + # NULL range argument should produce an error. + query = """RETURN range(NULL, 5)""" + try: + redis_graph.query(query) + assert(False) + except redis.exceptions.ResponseError: + # Expecting an error. + pass + + # NULL IN empty list should return false. + query = """RETURN NULL in []""" + actual_result = redis_graph.query(query) + expected_result = [[False]] + self.env.assertEquals(actual_result.result_set, expected_result) diff --git a/tests/flow/test_null_handling.py b/tests/flow/test_null_handling.py new file mode 100644 index 0000000000..2fb0f0cbca --- /dev/null +++ b/tests/flow/test_null_handling.py @@ -0,0 +1,88 @@ +import os +import sys +import redis +from RLTest import Env +from redisgraph import Graph, Node, Edge +from base import FlowTestsBase + +redis_graph = None + +class testNullHandlingFlow(FlowTestsBase): + def __init__(self): + self.env = Env() + global redis_graph + redis_con = self.env.getConnection() + redis_graph = Graph("null_handling", redis_con) + self.populate_graph() + + def populate_graph(self): + # Create a single node. + node = Node(label="L", properties={"v": "v1"}) + redis_graph.add_node(node) + redis_graph.flush() + + # Error when attempting to create a relationship with a null endpoint. + def test01_create_null(self): + try: + query = """MATCH (a) OPTIONAL MATCH (a)-[nonexistent_edge]->(nonexistent_node) CREATE (nonexistent_node)-[:E]->(a)""" + redis_graph.query(query) + assert(False) + except redis.exceptions.ResponseError: + # Expecting an error. + pass + + try: + query = """MATCH (a) OPTIONAL MATCH (a)-[nonexistent_edge]->(nonexistent_node) CREATE (a)-[:E]->(nonexistent_node)""" + redis_graph.query(query) + assert(False) + except redis.exceptions.ResponseError: + # Expecting an error. + pass + + # Error when attempting to merge a relationship with a null endpoint. + def test02_merge_null(self): + try: + query = """MATCH (a) OPTIONAL MATCH (a)-[nonexistent_edge]->(nonexistent_node) MERGE (nonexistent_node)-[:E]->(a)""" + redis_graph.query(query) + assert(False) + except redis.exceptions.ResponseError: + # Expecting an error. + pass + + try: + query = """MATCH (a) OPTIONAL MATCH (a)-[nonexistent_edge]->(nonexistent_node) MERGE (a)-[:E]->(nonexistent_node)""" + redis_graph.query(query) + assert(False) + except redis.exceptions.ResponseError: + # Expecting an error. + pass + + # SET should update attributes on non-null entities and ignore null entities. + def test03_set_null(self): + query = """MATCH (a) OPTIONAL MATCH (a)-[nonexistent_edge]->(nonexistent_node) SET a.v2 = true, nonexistent_node.v2 = true, a.v3 = nonexistent_node.v3 RETURN a.v2, nonexistent_node.v2, a.v3""" + actual_result = redis_graph.query(query) + # The property should be set on the real node and ignored on the null entity. + assert(actual_result.properties_set == 1) + expected_result = [[True, None, None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # DELETE should ignore null entities. + def test04_delete_null(self): + query = """MATCH (a) OPTIONAL MATCH (a)-[nonexistent_edge]->(nonexistent_node) DELETE nonexistent_node""" + actual_result = redis_graph.query(query) + assert(actual_result.nodes_deleted == 0) + + # Functions should handle null inputs appropriately. + def test05_null_function_inputs(self): + query = """MATCH (a) OPTIONAL MATCH (a)-[r]->(b) RETURN type(r), labels(b), b.v * 5""" + actual_result = redis_graph.query(query) + expected_result = [[None, None, None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Path functions should handle null inputs appropriately. + def test06_null_named_path_function_inputs(self): + query = """MATCH (a) OPTIONAL MATCH p = (a)-[r]->() RETURN p, length(p), collect(relationships(p))""" + actual_result = redis_graph.query(query) + # The path and function calls on it should return NULL, while collect() returns an empty array. + expected_result = [[None, None, []]] + self.env.assertEquals(actual_result.result_set, expected_result) diff --git a/tests/flow/test_optional_match.py b/tests/flow/test_optional_match.py new file mode 100644 index 0000000000..a579d18386 --- /dev/null +++ b/tests/flow/test_optional_match.py @@ -0,0 +1,217 @@ +import os +import sys +from RLTest import Env +from redisgraph import Graph, Node, Edge +from base import FlowTestsBase + +redis_graph = None +nodes = {} + +class testOptionalFlow(FlowTestsBase): + def __init__(self): + self.env = Env() + global redis_graph + redis_con = self.env.getConnection() + redis_graph = Graph("optional_match", redis_con) + self.populate_graph() + + def populate_graph(self): + global nodes + # Construct a graph with the form: + # (v1)-[:E1]->(v2)-[:E2]->(v3), (v4) + node_props = ['v1', 'v2', 'v3', 'v4'] + + for idx, v in enumerate(node_props): + node = Node(label="L", properties={"v": v}) + nodes[v] = node + redis_graph.add_node(node) + + edge = Edge(nodes['v1'], "E1", nodes['v2']) + redis_graph.add_edge(edge) + + edge = Edge(nodes['v2'], "E2", nodes['v3']) + redis_graph.add_edge(edge) + + redis_graph.flush() + + # Optional MATCH clause that does not interact with the mandatory MATCH. + def test01_disjoint_optional(self): + global redis_graph + query = """MATCH (a {v: 'v1'}) OPTIONAL MATCH (b) RETURN a.v, b.v ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + expected_result = [['v1', 'v1'], + ['v1', 'v2'], + ['v1', 'v3'], + ['v1', 'v4']] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Optional MATCH clause that extends the mandatory MATCH pattern and has matches for all results. + def test02_optional_traverse(self): + global redis_graph + query = """MATCH (a) WHERE a.v IN ['v1', 'v2'] OPTIONAL MATCH (a)-[]->(b) RETURN a.v, b.v ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + expected_result = [['v1', 'v2'], + ['v2', 'v3']] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Optional MATCH clause that extends the mandatory MATCH pattern and has null results. + def test03_optional_traverse_with_nulls(self): + global redis_graph + query = """MATCH (a) OPTIONAL MATCH (a)-[]->(b) RETURN a.v, b.v ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + # (v3) and (v4) have no outgoing edges. + expected_result = [['v1', 'v2'], + ['v2', 'v3'], + ['v3', None], + ['v4', None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Optional MATCH clause that extends the mandatory MATCH pattern and has a WHERE clause. + def test04_optional_traverse_with_predicate(self): + global redis_graph + query = """MATCH (a) OPTIONAL MATCH (a)-[]->(b) WHERE b.v = 'v2' RETURN a.v, b.v ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + # only (v1) has an outgoing edge to (v2). + expected_result = [['v1', 'v2'], + ['v2', None], + ['v3', None], + ['v4', None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Optional MATCH clause with endpoints resolved by the mandatory MATCH pattern. + def test05_optional_expand_into(self): + global redis_graph + query = """MATCH (a)-[]->(b) OPTIONAL MATCH (a)-[e]->(b) RETURN a.v, b.v, TYPE(e) ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + expected_result = [['v1', 'v2', 'E1'], + ['v2', 'v3', 'E2']] + self.env.assertEquals(actual_result.result_set, expected_result) + + # The OPTIONAL MATCH exactly repeats the MATCH, producing identical results. + query_without_optional = """MATCH (a)-[e]->(b) RETURN a.v, b.v, TYPE(e) ORDER BY a.v, b.v""" + result_without_optional = redis_graph.query(query_without_optional) + self.env.assertEquals(actual_result.result_set, result_without_optional.result_set) + + # Optional MATCH clause with endpoints resolved by the mandatory MATCH pattern and new filters introduced. + def test06_optional_expand_into_with_reltype(self): + global redis_graph + query = """MATCH (a)-[]->(b) OPTIONAL MATCH (a)-[e:E2]->(b) RETURN a.v, b.v, TYPE(e) ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + # Only (v2)-[E2]->(v3) fulfills the constraint of the OPTIONAL MATCH clause. + expected_result = [['v1', 'v2', None], + ['v2', 'v3', 'E2']] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Optional MATCH clause with endpoints resolved by the mandatory MATCH pattern, but no mandatory traversal. + def test07_optional_expand_into_cartesian_product(self): + global redis_graph + query = """MATCH (a {v: 'v1'}), (b) OPTIONAL MATCH (a)-[e]->(b) RETURN a.v, b.v, TYPE(e) ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + # All nodes are represented, but (v1)-[E1]->(v2) is the only matching connection. + expected_result = [['v1', 'v1', None], + ['v1', 'v2', 'E1'], + ['v1', 'v3', None], + ['v1', 'v4', None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # TODO ExpandInto doesn't evaluate bidirectionally properly + # Optional MATCH clause with endpoints resolved by the mandatory MATCH pattern and a bidirectional optional pattern. + # def test08_optional_expand_into_bidirectional(self): + # global redis_graph + # query = """MATCH (a), (b {v: 'v2'}) OPTIONAL MATCH (a)-[e]-(b) RETURN a.v, b.v, TYPE(e) ORDER BY a.v, b.v""" + # actual_result = redis_graph.query(query) + # # All nodes are represented, but only edges with (v2) as an endpoint match. + # expected_result = [['v1', 'v2', 'E1'], + # ['v2', 'v2', None], + # ['v3', 'v2', 'E2'], + # ['v3', 'v2', None]] + # self.env.assertEquals(actual_result.result_set, expected_result) + + # Optional MATCH clause with variable-length traversal and some results match. + def test09_optional_variable_length(self): + global redis_graph + query = """MATCH (a) OPTIONAL MATCH (a)-[*]->(b) RETURN a.v, b.v ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + expected_result = [['v1', 'v2'], + ['v1', 'v3'], + ['v2', 'v3'], + ['v3', None], + ['v4', None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Optional MATCH clause with variable-length traversal and all results match. + def test10_optional_variable_length_all_matches(self): + global redis_graph + query = """MATCH (a {v: 'v1'}) OPTIONAL MATCH (a)-[*]->(b) RETURN a.v, b.v ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + expected_result = [['v1', 'v2'], + ['v1', 'v3']] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Optional MATCH clause with a variable-length traversal that has no matches. + def test11_optional_variable_length_no_matches(self): + global redis_graph + query = """MATCH (a {v: 'v3'}) OPTIONAL MATCH (a)-[*]->(b) RETURN a.v, b.v ORDER BY a.v, b.v""" + actual_result = redis_graph.query(query) + expected_result = [['v3', None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Multiple interdependent optional MATCH clauses. + def test12_multiple_optional_traversals(self): + global redis_graph + query = """MATCH (a) OPTIONAL MATCH (a)-[]->(b) OPTIONAL MATCH (b)-[]->(c) RETURN a.v, b.v, c.v ORDER BY a.v, b.v, c.v""" + actual_result = redis_graph.query(query) + expected_result = [['v1', 'v2', 'v3'], + ['v2', 'v3', None], + ['v3', None, None], + ['v4', None, None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Multiple interdependent optional MATCH clauses with both directed and bidirectional traversals. + def test13_multiple_optional_multi_directional_traversals(self): + global redis_graph + query = """MATCH (a) OPTIONAL MATCH (a)-[]-(b) OPTIONAL MATCH (b)-[]->(c) RETURN a.v, b.v, c.v ORDER BY a.v, b.v, c.v""" + actual_result = redis_graph.query(query) + expected_result = [['v1', 'v2', 'v3'], + ['v2', 'v1', 'v2'], + ['v2', 'v3', None], + ['v3', 'v2', 'v3'], + ['v4', None, None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Multiple interdependent optional MATCH clauses with exclusively bidirectional traversals. + def test14_multiple_optional_bidirectional_traversals(self): + global redis_graph + query = """MATCH (a) OPTIONAL MATCH (a)-[]-(b) OPTIONAL MATCH (b)-[]-(c) RETURN a.v, b.v, c.v ORDER BY a.v, b.v, c.v""" + actual_result = redis_graph.query(query) + expected_result = [['v1', 'v2', 'v1'], + ['v1', 'v2', 'v3'], + ['v2', 'v1', 'v2'], + ['v2', 'v3', 'v2'], + ['v3', 'v2', 'v1'], + ['v3', 'v2', 'v3'], + ['v4', None, None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Build a named path in an optional clause. + def test15_optional_named_path(self): + global redis_graph + query = """MATCH (a) OPTIONAL MATCH p = (a)-[]->(b) RETURN length(p) ORDER BY length(p)""" + actual_result = redis_graph.query(query) + # 2 nodes have outgoing edges and 2 do not, so expected 2 paths of length 1 and 2 null results. + expected_result = [[1], + [1], + [None], + [None]] + self.env.assertEquals(actual_result.result_set, expected_result) + + # Return a result set with null values in the first record and non-null values in subsequent records. + def test16_optional_null_first_result(self): + global redis_graph + query = """MATCH (a) OPTIONAL MATCH (a)-[e]->(b) RETURN a, b, TYPE(e) ORDER BY EXISTS(b), a.v, b.v""" + actual_result = redis_graph.query(query) + expected_result = [[nodes['v3'], None, None], + [nodes['v4'], None, None], + [nodes['v1'], nodes['v2'], 'E1'], + [nodes['v2'], nodes['v3'], 'E2']] + self.env.assertEquals(actual_result.result_set, expected_result) diff --git a/tests/flow/test_results.py b/tests/flow/test_results.py index 2bc11838bd..c9ea9fae38 100644 --- a/tests/flow/test_results.py +++ b/tests/flow/test_results.py @@ -101,15 +101,11 @@ def test05_distinct_full_entities(self): def test06_return_all(self): query = """MATCH (a)-[e]->(b) RETURN *""" result = graph.query(query) - - # These definitions are duplicates of the non-public ResultSetColumnTypes values in redisgraph-py - COLUMN_SCALAR = 1 - COLUMN_NODE = 2 - COLUMN_RELATION = 3 - # Validate the header strings and value types + # Validate the header strings of the 3 columns. # NOTE - currently, RETURN * populates values in alphabetical order, but that is subject to later change. - expected_header = [[COLUMN_NODE, 'a'], [COLUMN_NODE, 'b'], [COLUMN_RELATION, 'e']] - self.env.assertEqual(result.header, expected_header) + self.env.assertEqual(result.header[0][1], 'a') + self.env.assertEqual(result.header[1][1], 'b') + self.env.assertEqual(result.header[2][1], 'e') # Verify that 3 columns are returned self.env.assertEqual(len(result.result_set[0]), 3) diff --git a/tests/flow/test_with_clause.py b/tests/flow/test_with_clause.py index 69490df6c4..ec4b795fc9 100644 --- a/tests/flow/test_with_clause.py +++ b/tests/flow/test_with_clause.py @@ -172,15 +172,13 @@ def test07_projected_graph_entities(self): query = """MATCH (a)-[e]->(b) WITH a, e, b.b_val AS b_val ORDER BY a.a_val LIMIT 2 RETURN *""" actual_result = redis_graph.query(query) - # These definitions are duplicates of the non-public ResultSetColumnTypes values in redisgraph-py - COLUMN_SCALAR = 1 - COLUMN_NODE = 2 - COLUMN_RELATION = 3 - # Validate the header strings and value types + # Validate the header strings of the 3 columns. # NOTE - currently, RETURN * populates values in alphabetical order, but that is subject to later change. - expected_header = [[COLUMN_NODE, 'a'], [COLUMN_SCALAR, 'b_val'], [COLUMN_RELATION, 'e']] - self.env.assertEqual(actual_result.header, expected_header) - # Verify that 2 rows and 3 columns are returned + self.env.assertEqual(actual_result.header[0][1], 'a') + self.env.assertEqual(actual_result.header[1][1], 'b_val') + self.env.assertEqual(actual_result.header[2][1], 'e') + + # Verify that 2 rows and 3 columns are returned. self.env.assertEqual(len(actual_result.result_set), 2) self.env.assertEqual(len(actual_result.result_set[0]), 3) diff --git a/tests/tck/features/AggregationAcceptance.feature b/tests/tck/features/AggregationAcceptance.feature index b7656c3ec9..e5c6b7575b 100644 --- a/tests/tck/features/AggregationAcceptance.feature +++ b/tests/tck/features/AggregationAcceptance.feature @@ -111,7 +111,6 @@ Feature: AggregationAcceptance | (:L) | 2 | And no side effects - @skip Scenario: Sort on aggregate function and normal property Given an empty graph And having executed: @@ -208,7 +207,6 @@ Feature: AggregationAcceptance | () | 1.0 | And no side effects - @skip Scenario: Distinct on unbound node Given an empty graph When executing query: diff --git a/tests/tck/features/DeleteAcceptance.feature b/tests/tck/features/DeleteAcceptance.feature index 6b31a953d3..4975ebf5eb 100644 --- a/tests/tck/features/DeleteAcceptance.feature +++ b/tests/tck/features/DeleteAcceptance.feature @@ -195,7 +195,6 @@ Feature: DeleteAcceptance Then the result should be empty And no side effects -@skip Scenario: Delete optionally matched relationship Given an empty graph And having executed: @@ -212,7 +211,6 @@ Feature: DeleteAcceptance And the side effects should be: | -nodes | 1 | -@skip Scenario: Delete on null node Given an empty graph When executing query: @@ -223,7 +221,6 @@ Feature: DeleteAcceptance Then the result should be empty And no side effects -@skip Scenario: Detach delete on null node Given an empty graph When executing query: @@ -234,7 +231,6 @@ Feature: DeleteAcceptance Then the result should be empty And no side effects -@skip Scenario: Delete on null path Given an empty graph When executing query: diff --git a/tests/tck/features/FunctionsAcceptance.feature b/tests/tck/features/FunctionsAcceptance.feature index e47d5949f3..ea10d96701 100644 --- a/tests/tck/features/FunctionsAcceptance.feature +++ b/tests/tck/features/FunctionsAcceptance.feature @@ -47,7 +47,6 @@ Feature: FunctionsAcceptance | 'Nobody' | And no side effects - @skip Scenario: Functions should return null if they get path containing unbound Given any graph When executing query: @@ -383,7 +382,6 @@ Feature: FunctionsAcceptance | 'T1' | 'T2' | And no side effects - @skip Scenario: `type()` on null relationship Given an empty graph And having executed: @@ -401,7 +399,6 @@ Feature: FunctionsAcceptance | null | And no side effects - @skip Scenario: `type()` on mixed null and non-null relationships Given an empty graph And having executed: diff --git a/tests/tck/features/MatchAcceptance.feature b/tests/tck/features/MatchAcceptance.feature index 46b97357b8..6f16069d8f 100644 --- a/tests/tck/features/MatchAcceptance.feature +++ b/tests/tck/features/MatchAcceptance.feature @@ -272,7 +272,6 @@ Feature: MatchAcceptance | (:A {num: 1}) | (:B {num: 2}) | And no side effects -@skip Scenario: Return two subgraphs with bound undirected relationship and optional relationship Given an empty graph And having executed: diff --git a/tests/tck/features/MatchAcceptance2.feature b/tests/tck/features/MatchAcceptance2.feature index baa10e8f9b..cca6f2455d 100644 --- a/tests/tck/features/MatchAcceptance2.feature +++ b/tests/tck/features/MatchAcceptance2.feature @@ -306,7 +306,6 @@ Feature: MatchAcceptance2 | ({name: 'e'}) | And no side effects -@skip Scenario: MATCH with OPTIONAL MATCH in longer pattern Given an empty graph And having executed: @@ -326,7 +325,6 @@ Feature: MatchAcceptance2 | ({name: 'C'}) | And no side effects -@skip Scenario: Optionally matching named paths Given an empty graph And having executed: @@ -347,7 +345,6 @@ Feature: MatchAcceptance2 | ({name: 'C'}) | null | And no side effects -@skip Scenario: Optionally matching named paths with single and variable length patterns Given an empty graph And having executed: @@ -427,7 +424,6 @@ Feature: MatchAcceptance2 | (:B {id: 2}) | And no side effects -@skip Scenario: Do not fail when predicates on optionally matched and missed nodes are invalid Given an empty graph And having executed: @@ -539,7 +535,6 @@ Feature: MatchAcceptance2 | b | And no side effects -@skip Scenario: Variable length relationship in OPTIONAL MATCH Given an empty graph And having executed: @@ -633,7 +628,6 @@ Feature: MatchAcceptance2 | <(:B)<-[:T]-(:A)> | And no side effects -@skip Scenario: Simple OPTIONAL MATCH on empty graph Given an empty graph When executing query: @@ -646,7 +640,6 @@ Feature: MatchAcceptance2 | null | And no side effects -@skip Scenario: OPTIONAL MATCH with previously bound nodes Given an empty graph And having executed: @@ -664,7 +657,6 @@ Feature: MatchAcceptance2 | () | null | And no side effects -@skip Scenario: `collect()` filtering nulls Given an empty graph And having executed: @@ -927,7 +919,6 @@ Feature: MatchAcceptance2 | [[:T]] | And no side effects -@skip Scenario: Matching from null nodes should return no results owing to finding no matches Given an empty graph When executing query: @@ -941,7 +932,6 @@ Feature: MatchAcceptance2 | b | And no side effects -@skip Scenario: Matching from null nodes should return no results owing to matches being filtered out Given an empty graph And having executed: @@ -959,7 +949,6 @@ Feature: MatchAcceptance2 | b | And no side effects -@skip Scenario: Optionally matching from null nodes should return null Given an empty graph When executing query: @@ -974,7 +963,6 @@ Feature: MatchAcceptance2 | null | And no side effects -@skip Scenario: OPTIONAL MATCH returns null Given an empty graph When executing query: @@ -1102,7 +1090,6 @@ Feature: MatchAcceptance2 | [:T2 {id: 1}] | And no side effects -@skip Scenario: Matching with LIMIT and optionally matching using a relationship that is already bound Given an empty graph And having executed: @@ -1122,7 +1109,6 @@ Feature: MatchAcceptance2 | (:A) | [:T] | (:B) | And no side effects -@skip Scenario: Matching with LIMIT and optionally matching using a relationship and node that are both already bound Given an empty graph And having executed: @@ -1142,7 +1128,6 @@ Feature: MatchAcceptance2 | (:A) | [:T] | (:B) | And no side effects -@skip Scenario: Matching with LIMIT, then matching again using a relationship and node that are both already bound along with an additional predicate Given an empty graph And having executed: @@ -1282,7 +1267,6 @@ Feature: MatchAcceptance2 | first | second | And no side effects -@skip Scenario: Matching and optionally matching with bound nodes in reverse direction Given an empty graph And having executed: @@ -1302,7 +1286,6 @@ Feature: MatchAcceptance2 | (:A) | [:T] | null | And no side effects -@skip Scenario: Matching and optionally matching with unbound nodes and equality predicate in reverse direction Given an empty graph And having executed: @@ -1681,7 +1664,6 @@ Feature: MatchAcceptance2 | null | And no side effects -@skip Scenario: Variable length patterns and nulls Given an empty graph And having executed: diff --git a/tests/tck/features/NullAcceptance.feature b/tests/tck/features/NullAcceptance.feature index 278ba2815e..18c36a7ec1 100755 --- a/tests/tck/features/NullAcceptance.feature +++ b/tests/tck/features/NullAcceptance.feature @@ -47,7 +47,6 @@ Feature: NullAcceptance | false | true | And no side effects -@skip Scenario: Property existence check on optional non-null node Given an empty graph And having executed: @@ -78,7 +77,6 @@ Feature: NullAcceptance | null | And no side effects -@skip Scenario: Ignore null when setting property Given an empty graph When executing query: @@ -162,7 +160,6 @@ Feature: NullAcceptance | null | And no side effects -@skip Scenario: Ignore null when deleting node Given an empty graph When executing query: @@ -176,7 +173,6 @@ Feature: NullAcceptance | null | And no side effects -@skip Scenario: Ignore null when deleting relationship Given an empty graph When executing query: diff --git a/tests/tck/features/NullOperator.feature b/tests/tck/features/NullOperator.feature index 9bcf0a663f..220a547061 100755 --- a/tests/tck/features/NullOperator.feature +++ b/tests/tck/features/NullOperator.feature @@ -64,7 +64,6 @@ Feature: NullOperator | false | true | And no side effects - @skip Scenario: Property null check on optional non-null node Given an empty graph And having executed: @@ -82,7 +81,6 @@ Feature: NullOperator | true | false | And no side effects - @skip Scenario: Property not null check on optional non-null node Given an empty graph And having executed: @@ -100,7 +98,6 @@ Feature: NullOperator | false | true | And no side effects - @skip Scenario: Property null check on null node Given an empty graph When executing query: @@ -113,7 +110,6 @@ Feature: NullOperator | true | And no side effects - @skip Scenario: Property not null check on null node Given an empty graph When executing query: diff --git a/tests/tck/features/OptionalMatch.feature b/tests/tck/features/OptionalMatch.feature index 4b0780ab36..398105a33e 100644 --- a/tests/tck/features/OptionalMatch.feature +++ b/tests/tck/features/OptionalMatch.feature @@ -30,7 +30,6 @@ Feature: OptionalMatch -@skip Scenario: Satisfies the open world assumption, relationships between same nodes Given an empty graph And having executed: @@ -50,7 +49,6 @@ Feature: OptionalMatch | 1 | false | And no side effects -@skip Scenario: Satisfies the open world assumption, single relationship Given an empty graph And having executed: @@ -69,7 +67,6 @@ Feature: OptionalMatch | 1 | true | And no side effects -@skip Scenario: Satisfies the open world assumption, relationships between different nodes Given an empty graph And having executed: diff --git a/tests/tck/features/OptionalMatchAcceptance.feature b/tests/tck/features/OptionalMatchAcceptance.feature index bd843e01d7..17821f0266 100644 --- a/tests/tck/features/OptionalMatchAcceptance.feature +++ b/tests/tck/features/OptionalMatchAcceptance.feature @@ -42,7 +42,6 @@ Feature: OptionalMatchAcceptance (b)-[:LOOP]->(b) """ -@skip Scenario: Return null when no matches due to inline label predicate When executing query: """ @@ -69,7 +68,6 @@ Feature: OptionalMatchAcceptance | null | And no side effects -@skip Scenario: Respect predicates on the OPTIONAL MATCH When executing query: """ @@ -96,7 +94,6 @@ Feature: OptionalMatchAcceptance | null | And no side effects -@skip Scenario: MATCH after OPTIONAL MATCH When executing query: """ @@ -111,7 +108,6 @@ Feature: OptionalMatchAcceptance | d | And no side effects -@skip Scenario: WITH after OPTIONAL MATCH When executing query: """ @@ -125,7 +121,6 @@ Feature: OptionalMatchAcceptance | (:A {num: 42}) | (:B {num: 46}) | And no side effects -@skip Scenario: Named paths in optional matches When executing query: """ @@ -138,7 +133,6 @@ Feature: OptionalMatchAcceptance | null | And no side effects -@skip Scenario: OPTIONAL MATCH and bound nodes When executing query: """ @@ -172,7 +166,6 @@ Feature: OptionalMatchAcceptance | (:Y:Z) | And no side effects -@skip Scenario: Named paths inside optional matches with node predicates When executing query: """ @@ -185,7 +178,6 @@ Feature: OptionalMatchAcceptance | null | And no side effects -@skip Scenario: Variable length optional relationships When executing query: """ @@ -201,7 +193,6 @@ Feature: OptionalMatchAcceptance | (:C) | And no side effects -@skip Scenario: Variable length optional relationships with length predicates When executing query: """ @@ -214,7 +205,6 @@ Feature: OptionalMatchAcceptance | null | And no side effects -@skip Scenario: Optionally matching self-loops When executing query: """ @@ -243,7 +233,6 @@ Feature: OptionalMatchAcceptance | null | And no side effects -@skip Scenario: Variable length optional relationships with bound nodes When executing query: """ @@ -256,7 +245,6 @@ Feature: OptionalMatchAcceptance | (:C) | And no side effects -@skip Scenario: Variable length optional relationships with bound nodes, no matches When executing query: """ @@ -269,7 +257,6 @@ Feature: OptionalMatchAcceptance | null | And no side effects -@skip Scenario: Longer pattern with bound nodes When executing query: """ @@ -282,7 +269,6 @@ Feature: OptionalMatchAcceptance | (:A {num: 42}) | And no side effects -@skip Scenario: Longer pattern with bound nodes without matches When executing query: """ @@ -295,7 +281,6 @@ Feature: OptionalMatchAcceptance | null | And no side effects -@skip Scenario: Handling correlated optional matches; first does not match implies second does not match When executing query: """ @@ -309,7 +294,6 @@ Feature: OptionalMatchAcceptance | (:C) | null | And no side effects -@skip Scenario: Handling optional matches between optionally matched entities When executing query: """ @@ -325,7 +309,6 @@ Feature: OptionalMatchAcceptance | null | (:B {num: 46}) | null | And no side effects -@skip Scenario: Handling optional matches between nulls When executing query: """ @@ -340,7 +323,6 @@ Feature: OptionalMatchAcceptance | null | null | null | And no side effects -@skip Scenario: OPTIONAL MATCH and `collect()` And having executed: """ @@ -359,7 +341,6 @@ Feature: OptionalMatchAcceptance | [] | [42, 43, 44] | And no side effects -@skip Scenario: OPTIONAL MATCH and WHERE And having executed: """ @@ -382,7 +363,6 @@ Feature: OptionalMatchAcceptance | (:X {val: 6}) | null | And no side effects -@skip Scenario: OPTIONAL MATCH on two relationships and WHERE And having executed: """ @@ -405,7 +385,6 @@ Feature: OptionalMatchAcceptance | (:X {val: 6}) | null | null | And no side effects -@skip Scenario: Two OPTIONAL MATCH clauses and WHERE And having executed: """ diff --git a/tests/tck/features/ReturnAcceptance.feature b/tests/tck/features/ReturnAcceptance.feature index 28df8a1f57..f72ca01819 100644 --- a/tests/tck/features/ReturnAcceptance.feature +++ b/tests/tck/features/ReturnAcceptance.feature @@ -108,7 +108,6 @@ Feature: ReturnAcceptanceTest | ({name: 'E'}) | And no side effects -@skip Scenario: Start the result from the second row by param Given an empty graph And having executed: @@ -160,7 +159,6 @@ Feature: ReturnAcceptanceTest | ({name: 'D'}) | And no side effects -@skip Scenario: Get rows in the middle by param Given an empty graph And having executed: diff --git a/tests/tck/features/TernaryLogicAcceptance.feature b/tests/tck/features/TernaryLogicAcceptance.feature index 14e2408718..8e12dee680 100755 --- a/tests/tck/features/TernaryLogicAcceptance.feature +++ b/tests/tck/features/TernaryLogicAcceptance.feature @@ -149,7 +149,6 @@ Feature: TernaryLogicAcceptanceTest | null | false | null | | false | null | null | - @skip Scenario Outline: Using null in IN And parameters are: | name | value | diff --git a/tests/tck/features/WithAcceptance.feature b/tests/tck/features/WithAcceptance.feature index e1c718da1d..5d8d837c4c 100644 --- a/tests/tck/features/WithAcceptance.feature +++ b/tests/tck/features/WithAcceptance.feature @@ -286,7 +286,6 @@ Feature: WithAcceptance | (:A) | [:REL] | (:B) | And no side effects - @skip Scenario: Null handling Given an empty graph When executing query: