Skip to content

Commit

Permalink
Adapt periodic commit in REST transactional endpoint
Browse files Browse the repository at this point in the history
  • Loading branch information
fickludd committed Aug 21, 2018
1 parent 2424df8 commit ad4b3f4
Show file tree
Hide file tree
Showing 2 changed files with 13 additions and 9 deletions.
Expand Up @@ -320,9 +320,7 @@ private void executeStatements( StatementDeserializer statements, ExecutionResul
hasPrevious = true; hasPrevious = true;
TransactionalContext tc = txManagerFacade.create( request, queryService, type, loginContext, TransactionalContext tc = txManagerFacade.create( request, queryService, type, loginContext,
statement.statement(), statement.parameters() ); statement.statement(), statement.parameters() );
Result result = safelyExecute( statement, hasPeriodicCommit, tc ); safelyExecute( statement, hasPeriodicCommit, tc, output );
output.statementResult( result, statement.includeStats(), statement.resultDataContents() );
output.notifications( result.getNotifications() );
} }
catch ( KernelException | CypherException | AuthorizationViolationException | catch ( KernelException | CypherException | AuthorizationViolationException |
WriteOperationsNotAllowedException e ) WriteOperationsNotAllowedException e )
Expand Down Expand Up @@ -363,12 +361,16 @@ private void executeStatements( StatementDeserializer statements, ExecutionResul
} }
} }


private Result safelyExecute( Statement statement, boolean hasPeriodicCommit, TransactionalContext tc ) private void safelyExecute( Statement statement,
throws QueryExecutionKernelException boolean hasPeriodicCommit,
TransactionalContext tc,
ExecutionResultSerializer output ) throws QueryExecutionKernelException, IOException
{ {
try try
{ {
return engine.executeQuery( statement.statement(), ValueUtils.asMapValue( statement.parameters() ), tc ); Result result = engine.executeQuery( statement.statement(), ValueUtils.asMapValue( statement.parameters() ), tc );
output.statementResult( result, statement.includeStats(), statement.resultDataContents() );
output.notifications( result.getNotifications() );
} }
finally finally
{ {
Expand Down
Expand Up @@ -315,7 +315,7 @@ public void begin_and_execute_periodic_commit_and_commit() throws Exception
public void begin_and_execute_periodic_commit_that_returns_data_and_commit() throws Exception public void begin_and_execute_periodic_commit_that_returns_data_and_commit() throws Exception
{ {
int nodes = 11; int nodes = 11;
int batch = 2; int batchSize = 2;
ServerTestUtils.withCSVFile( nodes, url -> ServerTestUtils.withCSVFile( nodes, url ->
{ {
long nodesInDatabaseBeforeTransaction = countNodes(); long nodesInDatabaseBeforeTransaction = countNodes();
Expand All @@ -324,7 +324,7 @@ public void begin_and_execute_periodic_commit_that_returns_data_and_commit() thr
// begin and execute and commit // begin and execute and commit
Response response = http.POST( Response response = http.POST(
"db/data/transaction/commit", "db/data/transaction/commit",
quotedJson( "{ 'statements': [ { 'statement': 'USING PERIODIC COMMIT " + batch + " LOAD CSV FROM " + quotedJson( "{ 'statements': [ { 'statement': 'USING PERIODIC COMMIT " + batchSize + " LOAD CSV FROM " +
"\\\"" + url + "\\\" AS line CREATE (n {id: 23}) RETURN n' } ] }" ) "\\\"" + url + "\\\" AS line CREATE (n {id: 23}) RETURN n' } ] }" )
); );
long txIdAfter = resolveDependency( TransactionIdStore.class ).getLastClosedTransactionId(); long txIdAfter = resolveDependency( TransactionIdStore.class ).getLastClosedTransactionId();
Expand All @@ -336,7 +336,9 @@ public void begin_and_execute_periodic_commit_that_returns_data_and_commit() thr
JsonNode columns = response.get( "results" ).get( 0 ).get( "columns" ); JsonNode columns = response.get( "results" ).get( 0 ).get( "columns" );
assertThat( columns.toString(), equalTo( "[\"n\"]" ) ); assertThat( columns.toString(), equalTo( "[\"n\"]" ) );
assertThat( countNodes(), equalTo( nodesInDatabaseBeforeTransaction + nodes ) ); assertThat( countNodes(), equalTo( nodesInDatabaseBeforeTransaction + nodes ) );
assertThat( txIdAfter, equalTo( txIdBefore + ((nodes / batch) + 1) ) ); long nBatches = (nodes / batchSize) + 1;
long expectedTxCount = nBatches + 1; // tx which create the property key token `id`
assertThat( txIdAfter - txIdBefore, equalTo( expectedTxCount ) );
} ); } );
} }


Expand Down

0 comments on commit ad4b3f4

Please sign in to comment.