Skip to content

Commit

Permalink
Address PR comments and fix failing tests
Browse files Browse the repository at this point in the history
  • Loading branch information
systay committed Sep 6, 2016
1 parent 265ba20 commit 83272c6
Show file tree
Hide file tree
Showing 102 changed files with 404 additions and 410 deletions.
Expand Up @@ -451,7 +451,7 @@ public State discardAll( BoltStateMachine machine )
* The FAILED state occurs when a recoverable error is encountered. * The FAILED state occurs when a recoverable error is encountered.
* This might be something like a Cypher SyntaxError or * This might be something like a Cypher SyntaxError or
* ConstraintViolation. To exit the FAILED state, either a RESET * ConstraintViolation. To exit the FAILED state, either a RESET
* or and ACK_FAILURE must be issued. All queries will be IGNORED * or and ACK_FAILURE must be issued. All stream will be IGNORED
* until this is done. * until this is done.
*/ */
FAILED FAILED
Expand Down
Expand Up @@ -271,7 +271,7 @@ else if ( statement.equalsIgnoreCase( ROLLBACK ) )
else if( spi.isPeriodicCommit( statement ) ) else if( spi.isPeriodicCommit( statement ) )
{ {
throw new QueryExecutionKernelException( new InvalidSemanticsException( throw new QueryExecutionKernelException( new InvalidSemanticsException(
"Executing queries that use periodic commit in an " + "Executing stream that use periodic commit in an " +
"open transaction is not possible." ) ); "open transaction is not possible." ) );
} }
else else
Expand Down
Expand Up @@ -518,7 +518,7 @@ public void shouldNotSupportUsingPeriodicCommitInTransaction() throws Exception


// Then // Then
assertThat( recorder.nextResponse(), failedWithStatus( Status.Statement.SemanticError ) ); assertThat( recorder.nextResponse(), failedWithStatus( Status.Statement.SemanticError ) );
// "Executing queries that use periodic commit in an open transaction is not possible." // "Executing stream that use periodic commit in an open transaction is not possible."
} }


@Test @Test
Expand Down
Expand Up @@ -23,8 +23,8 @@
import org.hamcrest.Matcher; import org.hamcrest.Matcher;
import org.hamcrest.TypeSafeMatcher; import org.hamcrest.TypeSafeMatcher;


import java.util.Collections;
import java.util.HashSet; import java.util.HashSet;
import java.util.Optional;
import java.util.Set; import java.util.Set;


public final class CommonMatchers public final class CommonMatchers
Expand All @@ -33,65 +33,76 @@ private CommonMatchers()
{ {
} }


public static <T> Matcher<? super Iterable<T>> itemsMatchingExactlyOneOf( /**
Matcher<? super T> ... expectedMatchers * Checks that an iterable of T contains items that each match one and only one of the provided matchers and
) * and that each matchers matches exactly one and only one of the items from the iterable.
*/
@SafeVarargs
public static <T> Matcher<? super Iterable<T>> matchesOneToOneInAnyOrder( Matcher<? super T>... expectedMatchers )
{ {
return new TypeSafeMatcher<Iterable<T>>() return new MatchesOneToOneInAnyOrder<>( expectedMatchers );
}

private static class MatchesOneToOneInAnyOrder<T> extends TypeSafeMatcher<Iterable<T>>
{
private final Matcher<? super T>[] expectedMatchers;

@SafeVarargs
MatchesOneToOneInAnyOrder( Matcher<? super T>... expectedMatchers )
{ {
@Override this.expectedMatchers = expectedMatchers;
protected boolean matchesSafely( Iterable<T> items ) }
{
Set<Matcher<? super T>> matchers = uniqueMatchers(); @Override
protected boolean matchesSafely( Iterable<T> items )
{
Set<Matcher<? super T>> matchers = uniqueMatchers();


for ( T item : items ) { for ( T item : items )
Matcher<? super T> matcherFound = null; {
for ( Matcher<? super T> matcherConsidered : matchers ) Matcher<? super T> matcherFound = null;
for ( Matcher<? super T> matcherConsidered : matchers )
{
if ( matcherConsidered.matches( item ) )
{ {
if ( matcherConsidered.matches( item ) ) if ( matcherFound == null )
{ {
if ( matcherFound == null ) matcherFound = matcherConsidered;
{ }
matcherFound = matcherConsidered; else
} {
else return false;
{
return false;
}
} }
}
if ( matcherFound == null )
{
return false;
}
else
{
matchers.remove( matcherFound );
} }
} }

if ( matcherFound == null )
return matchers.isEmpty(); {
return false;
}
else
{
matchers.remove( matcherFound );
}
} }


@Override return matchers.isEmpty();
public void describeTo( Description description ) }
{
Set<Matcher<? super T>> matchers = uniqueMatchers();


description.appendText( "items that each match exactly one of " ); @Override
description.appendList( "{ ", ", ", " }", matchers ); public void describeTo( Description description )
description.appendText( " and exactly as many items as matchers" ); {
} Set<Matcher<? super T>> matchers = uniqueMatchers();


private Set<Matcher<? super T>> uniqueMatchers() description.appendText( "items that each match exactly one of " );
{ description.appendList( "{ ", ", ", " }", matchers );
Set<Matcher<? super T>> matchers = new HashSet<>(); description.appendText( " and exactly as many items as matchers" );
for ( Matcher<? super T> expectedMatcher : expectedMatchers ) }
{
matchers.add( expectedMatcher ); private Set<Matcher<? super T>> uniqueMatchers()
} {
return matchers; Set<Matcher<? super T>> matchers = new HashSet<>();
} Collections.addAll( matchers, expectedMatchers );
}; return matchers;
}
} }
} }
Expand Up @@ -20,17 +20,18 @@
package org.neo4j.internal.cypher.acceptance package org.neo4j.internal.cypher.acceptance


import java.io.{File, PrintWriter} import java.io.{File, PrintWriter}
import java.net.{URLConnection, URLStreamHandler, URLStreamHandlerFactory, URL} import java.net.{URL, URLConnection, URLStreamHandler, URLStreamHandlerFactory}

import org.neo4j.cypher._ import org.neo4j.cypher._
import org.neo4j.cypher.internal.ExecutionEngine import org.neo4j.cypher.internal.ExecutionEngine
import org.neo4j.cypher.internal.compiler.v3_1.test_helpers.CreateTempFileTestSupport import org.neo4j.cypher.internal.compiler.v3_1.test_helpers.CreateTempFileTestSupport
import org.neo4j.cypher.internal.frontend.v3_1.helpers.StringHelper.RichString import org.neo4j.cypher.internal.frontend.v3_1.helpers.StringHelper.RichString
import org.neo4j.cypher.javacompat.internal.GraphDatabaseCypherService import org.neo4j.cypher.javacompat.internal.GraphDatabaseCypherService
import org.neo4j.graphdb.config.Configuration
import org.neo4j.graphdb.factory.GraphDatabaseSettings import org.neo4j.graphdb.factory.GraphDatabaseSettings
import org.neo4j.graphdb.security.URLAccessRule import org.neo4j.graphdb.security.URLAccessRule
import org.neo4j.test.TestGraphDatabaseFactory import org.neo4j.test.TestGraphDatabaseFactory
import org.scalatest.BeforeAndAfterAll import org.scalatest.BeforeAndAfterAll
import org.neo4j.graphdb.config.Configuration


class LoadCsvAcceptanceTest class LoadCsvAcceptanceTest
extends ExecutionEngineFunSuite with BeforeAndAfterAll extends ExecutionEngineFunSuite with BeforeAndAfterAll
Expand Down Expand Up @@ -421,7 +422,7 @@ class LoadCsvAcceptanceTest
result.toList should equal(List(Map("field" -> "something"))) result.toList should equal(List(Map("field" -> "something")))
} }


test("eager queries should be handled correctly") { test("eager stream should be handled correctly") {
val urls = csvUrls({ val urls = csvUrls({
writer => writer =>
writer.println("id,title,country,year") writer.println("id,title,country,year")
Expand Down
Expand Up @@ -340,7 +340,7 @@ return p""")


// Not TCK material -- indexes // Not TCK material -- indexes


test("should handle queries that cant be index solved because expressions lack dependencies") { test("should handle stream that cant be index solved because expressions lack dependencies") {
// given // given
val a = createLabeledNode(Map("property" -> 42), "Label") val a = createLabeledNode(Map("property" -> 42), "Label")
val b = createLabeledNode(Map("property" -> 42), "Label") val b = createLabeledNode(Map("property" -> 42), "Label")
Expand All @@ -358,7 +358,7 @@ return p""")
result.toList should equal (List(Map("a" -> a, "b" -> b))) result.toList should equal (List(Map("a" -> a, "b" -> b)))
} }


test("should handle queries that cant be index solved because expressions lack dependencies with two disjoin patterns") { test("should handle stream that cant be index solved because expressions lack dependencies with two disjoin patterns") {
// given // given
val a = createLabeledNode(Map("property" -> 42), "Label") val a = createLabeledNode(Map("property" -> 42), "Label")
val b = createLabeledNode(Map("property" -> 42), "Label") val b = createLabeledNode(Map("property" -> 42), "Label")
Expand All @@ -378,7 +378,7 @@ return p""")
)) ))
} }


test("should use the index for property existence queries (with exists) for cost when asked for it") { test("should use the index for property existence stream (with exists) for cost when asked for it") {
// given // given
val n = createLabeledNode(Map("email" -> "me@mine"), "User") val n = createLabeledNode(Map("email" -> "me@mine"), "User")
val m = createLabeledNode(Map("email" -> "you@yours"), "User") val m = createLabeledNode(Map("email" -> "you@yours"), "User")
Expand All @@ -393,7 +393,7 @@ return p""")
result.executionPlanDescription().toString should include("NodeIndexScan") result.executionPlanDescription().toString should include("NodeIndexScan")
} }


test("should use the index for property existence queries (with IS NOT NULL) for cost when asked for it") { test("should use the index for property existence stream (with IS NOT NULL) for cost when asked for it") {
// given // given
val n = createLabeledNode(Map("email" -> "me@mine"), "User") val n = createLabeledNode(Map("email" -> "me@mine"), "User")
val m = createLabeledNode(Map("email" -> "you@yours"), "User") val m = createLabeledNode(Map("email" -> "you@yours"), "User")
Expand All @@ -420,7 +420,7 @@ return p""")
Seq(n, m, p) Seq(n, m, p)
} }


test("should use the index for property existence queries when cardinality prefers it") { test("should use the index for property existence stream when cardinality prefers it") {
// given // given
val nodes = setupIndexScanTest() val nodes = setupIndexScanTest()


Expand All @@ -432,7 +432,7 @@ return p""")
result.executionPlanDescription().toString should include("NodeIndexScan") result.executionPlanDescription().toString should include("NodeIndexScan")
} }


test("should not use the index for property existence queries when cardinality does not prefer it") { test("should not use the index for property existence stream when cardinality does not prefer it") {
// given // given
val nodes = setupIndexScanTest() val nodes = setupIndexScanTest()


Expand All @@ -444,7 +444,7 @@ return p""")
result.executionPlanDescription().toString should include("NodeByLabelScan") result.executionPlanDescription().toString should include("NodeByLabelScan")
} }


test("should not use the index for property existence queries when property value predicate exists") { test("should not use the index for property existence stream when property value predicate exists") {
// given // given
val nodes = setupIndexScanTest() val nodes = setupIndexScanTest()


Expand All @@ -457,7 +457,7 @@ return p""")
result.executionPlanDescription().toString should not include "NodeIndexScan" result.executionPlanDescription().toString should not include "NodeIndexScan"
} }


test("should use the index for property existence queries for rule when asked for it") { test("should use the index for property existence stream for rule when asked for it") {
// given // given
val n = createLabeledNode(Map("email" -> "me@mine"), "User") val n = createLabeledNode(Map("email" -> "me@mine"), "User")
val m = createLabeledNode(Map("email" -> "you@yours"), "User") val m = createLabeledNode(Map("email" -> "you@yours"), "User")
Expand All @@ -472,7 +472,7 @@ return p""")
result.executionPlanDescription().toString should include("SchemaIndex") result.executionPlanDescription().toString should include("SchemaIndex")
} }


test("should not use the index for property existence queries for rule when not asking for it") { test("should not use the index for property existence stream for rule when not asking for it") {
// given // given
val n = createLabeledNode(Map("email" -> "me@mine"), "User") val n = createLabeledNode(Map("email" -> "me@mine"), "User")
val m = createLabeledNode(Map("email" -> "you@yours"), "User") val m = createLabeledNode(Map("email" -> "you@yours"), "User")
Expand Down
Expand Up @@ -354,7 +354,7 @@ class SemanticErrorAcceptanceTest extends ExecutionEngineFunSuite {
) )
} }


test("aggregation inside looping queries is not allowed") { test("aggregation inside looping stream is not allowed") {


val mess = "Can't use aggregating expressions inside of expressions executing over lists" val mess = "Can't use aggregating expressions inside of expressions executing over lists"
executeAndEnsureError( executeAndEnsureError(
Expand Down
Expand Up @@ -118,7 +118,7 @@ class SyntaxExceptionAcceptanceTest extends ExecutionEngineFunSuite {
) )
} }


test("handles multiline queries") { test("handles multiline stream") {
test( test(
"""start """start
a=node(0), a=node(0),
Expand Down
Expand Up @@ -104,7 +104,7 @@ class UniqueIndexAcceptanceTest extends ExecutionEngineFunSuite with NewPlannerT
result shouldNot use("NodeUniqueIndexSeek(Locking)") result shouldNot use("NodeUniqueIndexSeek(Locking)")
} }


test("should use locking unique index for merge queries") { test("should use locking unique index for merge stream") {
//GIVEN //GIVEN
createLabeledNode(Map("name" -> "Andres"), "Person") createLabeledNode(Map("name" -> "Andres"), "Person")
graph.createConstraint("Person", "name") graph.createConstraint("Person", "name")
Expand All @@ -117,7 +117,7 @@ class UniqueIndexAcceptanceTest extends ExecutionEngineFunSuite with NewPlannerT
result should use("NodeUniqueIndexSeek(Locking)") result should use("NodeUniqueIndexSeek(Locking)")
} }


test("should use locking unique index for mixed read write queries") { test("should use locking unique index for mixed read write stream") {
//GIVEN //GIVEN
createLabeledNode(Map("name" -> "Andres"), "Person") createLabeledNode(Map("name" -> "Andres"), "Person")
graph.createConstraint("Person", "name") graph.createConstraint("Person", "name")
Expand Down
Expand Up @@ -431,7 +431,7 @@ class UsingAcceptanceTest extends ExecutionEngineFunSuite with NewPlannerTestSup
result.executionPlanDescription() should includeOnlyOneHashJoinOn("c") result.executionPlanDescription() should includeOnlyOneHashJoinOn("c")
} }


test(s"$plannerName should be able to use join hints for queries with var length pattern") { test(s"$plannerName should be able to use join hints for stream with var length pattern") {
val a = createLabeledNode(Map("prop" -> "foo"), "Foo") val a = createLabeledNode(Map("prop" -> "foo"), "Foo")
val b = createNode() val b = createNode()
val c = createNode() val c = createNode()
Expand Down
Expand Up @@ -30,7 +30,7 @@ import org.neo4j.cypher.internal.frontend.v3_1.{InputPosition, Rewriter, Scope,
// A prepared query captures all information that has been derived so far as part // A prepared query captures all information that has been derived so far as part
// of this processing // of this processing
// //
// Currently there are two types of prepared queries: // Currently there are two types of prepared stream:
// //
// - PreparedQuerySyntax (can be constructed without db access) // - PreparedQuerySyntax (can be constructed without db access)
// - PreparedQuerySemantics (construction may requires db access for resolving procedure signatures) // - PreparedQuerySemantics (construction may requires db access for resolving procedure signatures)
Expand Down
Expand Up @@ -19,7 +19,7 @@
*/ */
package org.neo4j.cypher.internal.compiler.v3_1 package org.neo4j.cypher.internal.compiler.v3_1


import org.neo4j.cypher.internal.frontend.v3_1.{Bounds, Bound} import org.neo4j.cypher.internal.frontend.v3_1.{Bound, Bounds}


/* /*
Seek ranges describe intervals. In practice they are used to summarize all inequalities over the Seek ranges describe intervals. In practice they are used to summarize all inequalities over the
Expand Down Expand Up @@ -175,7 +175,7 @@ final case class RangeLessThan[+V](bounds: Bounds[V]) extends HalfOpenSeekRange[
PrefixRange is used to describe intervals on string values for prefix search. PrefixRange is used to describe intervals on string values for prefix search.
This is practical for two reasons: This is practical for two reasons:
- It directly maps on prefix queries of index implementations - It directly maps on prefix stream of index implementations
- It removes the need to construct a proper upper bound value for an interval that - It removes the need to construct a proper upper bound value for an interval that
would describe the prefix search (which can be difficult due to unicode issues) would describe the prefix search (which can be difficult due to unicode issues)
*/ */
Expand Down
Expand Up @@ -359,8 +359,8 @@ object ClauseConverters {


/* /*
When encountering a WITH that is not an event horizon, and we have no optional matches in the current QueryGraph, When encountering a WITH that is not an event horizon, and we have no optional matches in the current QueryGraph,
we simply continue building on the current PlannerQuery. Our ASTRewriters rewrite queries in such a way that we simply continue building on the current PlannerQuery. Our ASTRewriters rewrite stream in such a way that
a lot of queries have these WITH clauses. a lot of stream have these WITH clauses.
Handles: ... WITH * [WHERE <predicate>] ... Handles: ... WITH * [WHERE <predicate>] ...
*/ */
Expand Down Expand Up @@ -467,7 +467,7 @@ object ClauseConverters {
val nonHints = items.collect { case Left(item) => item } val nonHints = items.collect { case Left(item) => item }


if (nonHints.nonEmpty) { if (nonHints.nonEmpty) {
// all other start queries is delegated to legacy planner // all other start stream is delegated to legacy planner
throw new CantHandleQueryException() throw new CantHandleQueryException()
} }


Expand Down
Expand Up @@ -63,7 +63,7 @@ object StatementConverters {
case _: UnionDistinct => true case _: UnionDistinct => true
} }
val plannedQueries: Seq[PlannerQueryBuilder] = queries.reverseMap(x => toPlannerQueryBuilder(x, semanticTable)) val plannedQueries: Seq[PlannerQueryBuilder] = queries.reverseMap(x => toPlannerQueryBuilder(x, semanticTable))
//UNION requires all queries to return the same variables //UNION requires all stream to return the same variables
assert(plannedQueries.nonEmpty) assert(plannedQueries.nonEmpty)
val returns = plannedQueries.head.returns val returns = plannedQueries.head.returns
assert(plannedQueries.forall(_.returns == returns)) assert(plannedQueries.forall(_.returns == returns))
Expand Down
Expand Up @@ -190,7 +190,7 @@ case class PartiallySolvedQuery(returns: Seq[QueryToken[ReturnColumn]],


def containsAggregation: Boolean = aggregation.nonEmpty || tail.exists(_.containsAggregation) def containsAggregation: Boolean = aggregation.nonEmpty || tail.exists(_.containsAggregation)


/* This methods is used to rewrite the queries from the end of the query line to the beginning of it */ /* This methods is used to rewrite the stream from the end of the query line to the beginning of it */
def rewriteFromTheTail(f: PartiallySolvedQuery => PartiallySolvedQuery): PartiallySolvedQuery = def rewriteFromTheTail(f: PartiallySolvedQuery => PartiallySolvedQuery): PartiallySolvedQuery =
f(copy(tail = tail.map(_.rewriteFromTheTail(f)))) f(copy(tail = tail.map(_.rewriteFromTheTail(f))))


Expand Down

0 comments on commit 83272c6

Please sign in to comment.