Permalink
Browse files

Upgrade to Elasticsearch 2.3.3

  • Loading branch information...
aelesbao committed May 24, 2016
1 parent ee09b98 commit 77e2690c34d43303d26aef1eecfcad2ef92ac0e8
Showing with 254 additions and 211 deletions.
  1. +4 −0 CHANGES.txt
  2. +5 −5 blackbox/docs/sql/analyzer.txt
  3. +5 −0 blackbox/docs/sql/ddl.txt
  4. +1 −1 blob/src/main/java/io/crate/blob/BlobTransferRequest.java
  5. +3 −3 blob/src/main/java/io/crate/blob/pending_transfer/BlobHeadRequestHandler.java
  6. +1 −1 blob/src/main/java/org/elasticsearch/indices/recovery/BlobRecoverySource.java
  7. +6 −6 blob/src/main/java/org/elasticsearch/indices/recovery/BlobRecoveryTarget.java
  8. +1 −1 build.gradle
  9. +3 −3 client/build.gradle
  10. +3 −1 client/src/main/java/io/crate/client/CrateClient.java
  11. +1 −1 core/src/main/java/io/crate/Version.java
  12. +17 −17 es/build.gradle
  13. +1 −1 es/upstream
  14. +4 −2 sql/src/main/java/io/crate/action/sql/TransportBaseSQLAction.java
  15. +4 −3 sql/src/main/java/io/crate/action/sql/TransportSQLAction.java
  16. +4 −2 sql/src/main/java/io/crate/action/sql/TransportSQLBulkAction.java
  17. +49 −25 sql/src/main/java/io/crate/analyze/CreateAnalyzerStatementAnalyzer.java
  18. +1 −1 sql/src/main/java/io/crate/executor/transport/NodeActionRequestHandler.java
  19. +1 −1 sql/src/main/java/io/crate/executor/transport/ShardRequest.java
  20. +4 −2 sql/src/main/java/io/crate/operation/collect/collectors/CrateDocCollector.java
  21. +4 −13 sql/src/main/java/io/crate/operation/collect/collectors/DummyScorer.java
  22. +2 −2 ...src/main/java/org/elasticsearch/action/admin/indices/create/TransportBulkCreateIndicesAction.java
  23. +6 −4 sql/src/main/java/org/elasticsearch/index/mapper/ArrayFieldType.java
  24. +2 −2 sql/src/test/java/io/crate/executor/transport/NodeFetchResponseTest.java
  25. +6 −5 sql/src/test/java/io/crate/executor/transport/RepositoryServiceTest.java
  26. +5 −6 sql/src/test/java/io/crate/executor/transport/TransportShardUpsertActionTest.java
  27. +2 −2 sql/src/test/java/io/crate/jobs/AbstractExecutionSubContextTest.java
  28. +1 −1 sql/src/test/java/io/crate/lucene/LuceneQueryBuilderTest.java
  29. +5 −3 sql/src/test/java/io/crate/metadata/doc/array/ArrayMapperTest.java
  30. +95 −91 sql/src/test/java/io/crate/planner/AbstractPlannerTest.java
  31. +3 −1 sql/src/test/java/io/crate/planner/TableStatsServiceTest.java
  32. +5 −5 sql/src/test/java/org/apache/lucene/index/AssertingLeafReader.java
View
@@ -5,6 +5,8 @@ Changes for Crate
Unreleased
==========
+ - Upgraded Elasticsearch to 2.3.3
+
- Updated crate-admin to 0.18.0 which contains following changes:
- Cluster check include now clickable links
@@ -37,6 +39,8 @@ Unreleased
- The default number of shards is calculated dynamically upon the table
creation.
+ - Deprecated camelCase names on `CREATE ANALYZER`.
+
- `+HeapDumpOnOutOfMemoryError` is no longer set by default.
It can still be set using the JAVA_OPTS environment variable.
@@ -253,10 +253,10 @@ max_token_length
.. _edgengram-tokenizer:
-edgeNGram
+edge ngram
---------
-``type='edge_ngram'`` or ``type='edgeNGram'``
+``type='edge_ngram'``
This tokenizer is very similar to :ref:`ngram-tokenizer` but only keeps n-grams which start at
the beginning of a token.
@@ -317,7 +317,7 @@ It divides text at non-letters and converts them to lower case.
ngram
-----
-``type='ngram'`` or ``type='nGram'``
+``type='ngram'``
Parameters
..........
@@ -513,7 +513,7 @@ language
ngram
-----
-``type='ngram'`` or ``type='nGram'``
+``type='ngram'``
Parameters
..........
@@ -530,7 +530,7 @@ max_gram
edge ngram
----------
-``type='edgeNGram'`` or ``type='edge_ngram'``
+``type='edge_ngram'``
Parameters
..........
@@ -689,6 +689,11 @@ Tokenizer and token-filters can be customized in the same way.
Altering analyzers is not supported yet.
+.. note::
+
+ Use of camelCase names on custom analyzers, tokenizers, token filters and
+ char filters is deprecated.
+
.. seealso::
:ref:`ref-create-analyzer` for the syntax reference.
@@ -33,7 +33,7 @@
/**
* Base Request Class for Blob Transfers
*/
-public abstract class BlobTransferRequest<T extends ReplicationRequest>
+public abstract class BlobTransferRequest<T extends ReplicationRequest<T>>
extends ReplicationRequest<T>
implements IPutChunkRequest
{
@@ -66,7 +66,7 @@ public void registerHandler() {
transportService.registerRequestHandler(Actions.PUT_BLOB_HEAD_CHUNK, PutBlobHeadChunkRequest.class, ThreadPool.Names.GENERIC, new PutBlobHeadChunkHandler());
}
- private class GetBlobHeadHandler implements TransportRequestHandler<GetBlobHeadRequest> {
+ private class GetBlobHeadHandler extends TransportRequestHandler<GetBlobHeadRequest> {
/**
* this is method is called on the recovery source node
* the target is requesting the head of a file it got a PutReplicaChunkRequest for.
@@ -95,7 +95,7 @@ public void messageReceived(final GetBlobHeadRequest request, TransportChannel c
- class PutBlobHeadChunkHandler implements TransportRequestHandler<PutBlobHeadChunkRequest> {
+ class PutBlobHeadChunkHandler extends TransportRequestHandler<PutBlobHeadChunkRequest> {
/**
* called when the target node in a recovery receives a PutBlobHeadChunkRequest
*/
@@ -108,7 +108,7 @@ public void messageReceived(PutBlobHeadChunkRequest request, TransportChannel ch
}
}
- class GetTransferInfoHandler implements TransportRequestHandler<BlobInfoRequest> {
+ class GetTransferInfoHandler extends TransportRequestHandler<BlobInfoRequest> {
@Override
public void messageReceived(BlobInfoRequest request, TransportChannel channel) throws Exception {
final BlobTransferStatus transferStatus = blobTransferTarget.getActiveTransfer(request.transferId);
@@ -142,7 +142,7 @@ private RecoveryResponse recover(final StartRecoveryRequest request) {
}
}
- class StartRecoveryTransportRequestHandler implements TransportRequestHandler<StartRecoveryRequest> {
+ class StartRecoveryTransportRequestHandler extends TransportRequestHandler<StartRecoveryRequest> {
@Override
public void messageReceived(final StartRecoveryRequest request, final TransportChannel channel) throws Exception {
RecoveryResponse response = recover(request);
@@ -102,7 +102,7 @@ public BlobRecoveryTarget(Settings settings, IndicesLifecycle indicesLifecycle,
transportService.registerRequestHandler(Actions.FINALIZE_RECOVERY, BlobFinalizeRecoveryRequest.class, ThreadPool.Names.GENERIC, new FinalizeRecoveryRequestHandler());
}
- class StartRecoveryRequestHandler implements TransportRequestHandler<BlobStartRecoveryRequest> {
+ class StartRecoveryRequestHandler extends TransportRequestHandler<BlobStartRecoveryRequest> {
@Override
public void messageReceived(BlobStartRecoveryRequest request, TransportChannel channel) throws Exception {
@@ -129,7 +129,7 @@ public void messageReceived(BlobStartRecoveryRequest request, TransportChannel c
}
- class TransferChunkRequestHandler implements TransportRequestHandler<BlobRecoveryChunkRequest> {
+ class TransferChunkRequestHandler extends TransportRequestHandler<BlobRecoveryChunkRequest> {
@Override
public void messageReceived(BlobRecoveryChunkRequest request, TransportChannel channel) throws Exception {
@@ -185,7 +185,7 @@ public void messageReceived(BlobRecoveryChunkRequest request, TransportChannel c
}
- class StartPrefixSyncRequestHandler implements TransportRequestHandler<BlobStartPrefixSyncRequest> {
+ class StartPrefixSyncRequestHandler extends TransportRequestHandler<BlobStartPrefixSyncRequest> {
@Override
public void messageReceived(BlobStartPrefixSyncRequest request, TransportChannel channel) throws Exception {
BlobRecoveryStatus status = onGoingRecoveries.get(request.recoveryId());
@@ -204,7 +204,7 @@ public void messageReceived(BlobStartPrefixSyncRequest request, TransportChannel
}
- private class StartTransferRequestHandler implements TransportRequestHandler<BlobRecoveryStartTransferRequest> {
+ private class StartTransferRequestHandler extends TransportRequestHandler<BlobRecoveryStartTransferRequest> {
@Override
public void messageReceived(BlobRecoveryStartTransferRequest request, TransportChannel channel) throws Exception {
BlobRecoveryStatus status = onGoingRecoveries.get(request.recoveryId());
@@ -251,7 +251,7 @@ public void messageReceived(BlobRecoveryStartTransferRequest request, TransportC
}
}
- private class DeleteFileRequestHandler implements TransportRequestHandler<BlobRecoveryDeleteRequest> {
+ private class DeleteFileRequestHandler extends TransportRequestHandler<BlobRecoveryDeleteRequest> {
@Override
public void messageReceived(BlobRecoveryDeleteRequest request, TransportChannel channel) throws Exception {
BlobRecoveryStatus status = onGoingRecoveries.get(request.recoveryId());
@@ -265,7 +265,7 @@ public void messageReceived(BlobRecoveryDeleteRequest request, TransportChannel
}
}
- private class FinalizeRecoveryRequestHandler implements TransportRequestHandler<BlobFinalizeRecoveryRequest> {
+ private class FinalizeRecoveryRequestHandler extends TransportRequestHandler<BlobFinalizeRecoveryRequest> {
@Override
public void messageReceived(BlobFinalizeRecoveryRequest request, TransportChannel channel) throws Exception {
View
@@ -8,7 +8,7 @@ buildscript {
}
plugins {
- id 'me.champeau.gradle.jmh' version '0.2.0'
+ id 'me.champeau.gradle.jmh' version '0.2.0'
}
View
@@ -48,9 +48,9 @@ dependencies {
}
// required by ES and Lucene Version classes - keep up to date with es dependencies
// The version module is required by the DiscoveryNode module
- compile 'org.apache.lucene:lucene-core:5.4.1'
- compile 'org.apache.lucene:lucene-analyzers-common:5.4.1'
- compile 'org.apache.lucene:lucene-suggest:5.4.1'
+ compile 'org.apache.lucene:lucene-core:5.5.0'
+ compile 'org.apache.lucene:lucene-analyzers-common:5.5.0'
+ compile 'org.apache.lucene:lucene-suggest:5.5.0'
compile files(depClasses)
testCompile 'io.crate:crate-testing:0.4.1'
@@ -33,6 +33,7 @@
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.inject.Injector;
import org.elasticsearch.common.inject.ModulesBuilder;
+import org.elasticsearch.common.io.stream.NamedWriteableRegistry;
import org.elasticsearch.common.logging.ESLogger;
import org.elasticsearch.common.logging.Loggers;
import org.elasticsearch.common.settings.Settings;
@@ -86,6 +87,7 @@ public CrateClient(Settings pSettings, String ... servers) throws
this.settings = builder.build();
threadPool = new ThreadPool(this.settings);
+ NamedWriteableRegistry namedWriteableRegistry = new NamedWriteableRegistry();
ModulesBuilder modules = new ModulesBuilder();
modules.add(new CrateClientModule());
@@ -95,7 +97,7 @@ public CrateClient(Settings pSettings, String ... servers) throws
modules.add(new SettingsModule(this.settings));
modules.add(new ClusterNameModule(this.settings));
- modules.add(new TransportModule(this.settings));
+ modules.add(new TransportModule(this.settings, namedWriteableRegistry));
modules.add(new CircuitBreakerModule(this.settings));
Injector injector = modules.createInjector();
@@ -35,7 +35,7 @@
public static final boolean SNAPSHOT = true;
- public static final Version CURRENT = new Version(550099, SNAPSHOT, org.elasticsearch.Version.V_2_2_2);
+ public static final Version CURRENT = new Version(550099, SNAPSHOT, org.elasticsearch.Version.V_2_3_3);
static {
// safe-guard that we don't release a version with DEBUG_MODE set to true
View
@@ -31,21 +31,21 @@ dependencies {
compile 'log4j:apache-log4j-extras:1.2.17'
compile 'log4j:log4j:1.2.17'
compile 'net.java.dev.jna:jna:4.1.0'
- compile 'org.apache.lucene:lucene-analyzers-common:5.4.1'
- compile 'org.apache.lucene:lucene-backward-codecs:5.4.1'
- compile 'org.apache.lucene:lucene-core:5.4.1'
- compile 'org.apache.lucene:lucene-expressions:5.4.1'
- compile 'org.apache.lucene:lucene-grouping:5.4.1'
- compile 'org.apache.lucene:lucene-highlighter:5.4.1'
- compile 'org.apache.lucene:lucene-join:5.4.1'
- compile 'org.apache.lucene:lucene-memory:5.4.1'
- compile 'org.apache.lucene:lucene-misc:5.4.1'
- compile 'org.apache.lucene:lucene-queries:5.4.1'
- compile 'org.apache.lucene:lucene-queryparser:5.4.1'
- compile 'org.apache.lucene:lucene-sandbox:5.4.1'
- compile 'org.apache.lucene:lucene-spatial3d:5.4.1'
- compile 'org.apache.lucene:lucene-spatial:5.4.1'
- compile 'org.apache.lucene:lucene-suggest:5.4.1'
+ compile 'org.apache.lucene:lucene-analyzers-common:5.5.0'
+ compile 'org.apache.lucene:lucene-backward-codecs:5.5.0'
+ compile 'org.apache.lucene:lucene-core:5.5.0'
+ compile 'org.apache.lucene:lucene-expressions:5.5.0'
+ compile 'org.apache.lucene:lucene-grouping:5.5.0'
+ compile 'org.apache.lucene:lucene-highlighter:5.5.0'
+ compile 'org.apache.lucene:lucene-join:5.5.0'
+ compile 'org.apache.lucene:lucene-memory:5.5.0'
+ compile 'org.apache.lucene:lucene-misc:5.5.0'
+ compile 'org.apache.lucene:lucene-queries:5.5.0'
+ compile 'org.apache.lucene:lucene-queryparser:5.5.0'
+ compile 'org.apache.lucene:lucene-sandbox:5.5.0'
+ compile 'org.apache.lucene:lucene-spatial3d:5.5.0'
+ compile 'org.apache.lucene:lucene-spatial:5.5.0'
+ compile 'org.apache.lucene:lucene-suggest:5.5.0'
compile 'org.codehaus.groovy:groovy-all:2.4.4:indy'
compile 'org.elasticsearch:securesm:1.0'
compile 'org.hdrhistogram:HdrHistogram:2.1.6'
@@ -71,8 +71,8 @@ dependencies {
testCompile 'junit:junit:4.11'
// If the version here is increased the `AssertingLeafReader` class needs to
// be updated as it is a copy from lucene-test-framework
- testCompile 'org.apache.lucene:lucene-test-framework:5.4.1'
- testCompile 'org.apache.lucene:lucene-codecs:5.4.1'
+ testCompile 'org.apache.lucene:lucene-test-framework:5.5.0'
+ testCompile 'org.apache.lucene:lucene-codecs:5.5.0'
}
Submodule upstream updated 1355 files
@@ -70,6 +70,7 @@
import org.elasticsearch.rest.RestStatus;
import org.elasticsearch.snapshots.InvalidSnapshotNameException;
import org.elasticsearch.snapshots.SnapshotMissingException;
+import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.NodeDisconnectedException;
@@ -118,8 +119,9 @@ protected TransportBaseSQLAction(ClusterService clusterService,
StatsTables statsTables,
ActionFilters actionFilters,
IndexNameExpressionResolver indexNameExpressionResolver,
- TransportKillJobsNodeAction transportKillJobsNodeAction) {
- super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver);
+ TransportKillJobsNodeAction transportKillJobsNodeAction,
+ TaskManager taskManager) {
+ super(settings, actionName, threadPool, actionFilters, indexNameExpressionResolver, taskManager);
this.clusterService = clusterService;
this.analyzer = analyzer;
this.planner = planner;
@@ -68,8 +68,9 @@ public TransportSQLAction(
IndexNameExpressionResolver indexNameExpressionResolver,
TransportKillJobsNodeAction transportKillJobsNodeAction) {
super(clusterService, settings, SQLAction.NAME, threadPool,
- analyzer, planner, executor, statsTables, actionFilters,
- indexNameExpressionResolver, transportKillJobsNodeAction);
+ analyzer, planner, executor, statsTables, actionFilters,
+ indexNameExpressionResolver, transportKillJobsNodeAction,
+ transportService.getTaskManager());
transportService.registerRequestHandler(SQLAction.NAME, SQLRequest.class, ThreadPool.Names.SAME, new TransportHandler());
}
@@ -129,7 +130,7 @@ protected SQLResponse createResponseFromResult(String[] outputNames,
);
}
- private class TransportHandler implements TransportRequestHandler<SQLRequest> {
+ private class TransportHandler extends TransportRequestHandler<SQLRequest> {
@Override
public void messageReceived(SQLRequest request, final TransportChannel channel) throws Exception {
ActionListener<SQLResponse> listener = ActionListeners.forwardTo(channel);
@@ -38,6 +38,7 @@
import org.elasticsearch.common.inject.Inject;
import org.elasticsearch.common.inject.Provider;
import org.elasticsearch.common.settings.Settings;
+import org.elasticsearch.tasks.TaskManager;
import org.elasticsearch.threadpool.ThreadPool;
import org.elasticsearch.transport.TransportChannel;
import org.elasticsearch.transport.TransportRequestHandler;
@@ -61,7 +62,8 @@ public TransportSQLBulkAction(ClusterService clusterService,
IndexNameExpressionResolver indexNameExpressionResolver,
TransportKillJobsNodeAction transportKillJobsNodeAction) {
super(clusterService, settings, SQLBulkAction.NAME, threadPool, analyzer,
- planner, executor, statsTables, actionFilters, indexNameExpressionResolver, transportKillJobsNodeAction);
+ planner, executor, statsTables, actionFilters, indexNameExpressionResolver, transportKillJobsNodeAction,
+ transportService.getTaskManager());
transportService.registerRequestHandler(SQLBulkAction.NAME, SQLBulkRequest.class, ThreadPool.Names.SAME, new TransportHandler());
}
@@ -100,7 +102,7 @@ protected SQLBulkResponse createResponseFromResult(String[] outputNames,
outputNames, results, duration, dataTypes, request.includeTypesOnResponse());
}
- private class TransportHandler implements TransportRequestHandler<SQLBulkRequest> {
+ private class TransportHandler extends TransportRequestHandler<SQLBulkRequest> {
@Override
public void messageReceived(SQLBulkRequest request, final TransportChannel channel) throws Exception {
ActionListener<SQLBulkResponse> listener = ActionListeners.forwardTo(channel);
Oops, something went wrong.

0 comments on commit 77e2690

Please sign in to comment.