diff --git a/README.textile b/README.textile index 9d8fba6b45d8b..a657b59a392fe 100644 --- a/README.textile +++ b/README.textile @@ -86,7 +86,7 @@ We can also use the JSON query language Elasticsearch provides instead of a quer curl -XGET 'http://localhost:9200/twitter/tweet/_search?pretty=true' -d ' { "query" : { - "text" : { "user": "kimchy" } + "match" : { "user": "kimchy" } } }' @@ -206,6 +206,10 @@ The distribution will be created under @target/releases@. See the "TESTING":TESTING.asciidoc file for more information about running the Elasticsearch test suite. +h3. Upgrading to Elasticsearch 1.x? + +In order to ensure a smooth upgrade process from earlier versions of Elasticsearch (< 1.0.0), it is recommended to perform a full cluster restart. Please see the "Upgrading" section of the "setup reference":http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/setup.html. + h1. License
diff --git a/TESTING.asciidoc b/TESTING.asciidoc
index 282c7e080363a..bf66dcfe0f22c 100644
--- a/TESTING.asciidoc
+++ b/TESTING.asciidoc
@@ -186,22 +186,18 @@ mvn test -Dtests.class=org.elasticsearch.test.rest.ElasticsearchRestTests
`ElasticsearchRestTests` is the executable test class that runs all the
yaml suites available within the `rest-api-spec` folder.
-The following are the options supported by the REST tests runner:
+The REST tests support all the options provided by the randomized runner, plus the following:
-* `tests.rest[true|false|host:port]`: determines whether the REST tests need
-to be run and if so whether to rely on an external cluster (providing host
-and port) or fire a test cluster (default). It's possible to provide a
-comma separated list of addresses to send requests in a round-robin fashion.
+* `tests.rest[true|false]`: determines whether the REST tests need to be run (default) or not.
* `tests.rest.suite`: comma separated paths of the test suites to be run
(by default loaded from /rest-api-spec/test). It is possible to run only a subset
of the tests providing a sub-folder or even a single yaml file (the default
/rest-api-spec/test prefix is optional when files are loaded from classpath)
e.g. -Dtests.rest.suite=index,get,create/10_with_id
-* `tests.rest.section`: regex that allows to filter the test sections that
-are going to be run. If provided, only the section names that match (case
-insensitive) against it will be executed
* `tests.rest.spec`: REST spec path (default /rest-api-spec/api)
-* `tests.iters`: runs multiple iterations
-* `tests.seed`: seed to base the random behaviours on
-* `tests.appendseed[true|false]`: enables adding the seed to each test
-section's description (default false)
+
+Note that the REST tests, like all the integration tests, can be run against an external
+cluster by specifying the `tests.cluster` property, which if present needs to contain a
+comma separated list of nodes to connect to (e.g. localhost:9300). A transport client will
+be created based on that and used for all the before|after test operations, and to extract
+the http addresses of the nodes so that REST requests can be sent to them.
diff --git a/dev-tools/build_release.py b/dev-tools/build_release.py
index 5eabf08c55652..97f895488d64b 100644
--- a/dev-tools/build_release.py
+++ b/dev-tools/build_release.py
@@ -388,7 +388,7 @@ def smoke_test_release(release, files, expected_hash, plugins):
if version['build_hash'].strip() != expected_hash:
raise RuntimeError('HEAD hash does not match expected [%s] but got [%s]' % (expected_hash, version['build_hash']))
print(' Running REST Spec tests against package [%s]' % release_file)
- run_mvn('test -Dtests.rest=%s -Dtests.class=*.*RestTests' % ("127.0.0.1:9200"))
+ run_mvn('test -Dtests.cluster=%s -Dtests.class=*.*RestTests' % ("127.0.0.1:9300"))
print(' Verify if plugins are listed in _nodes')
conn.request('GET', '/_nodes?plugin=true&pretty=true')
res = conn.getresponse()
diff --git a/docs/community/clients.asciidoc b/docs/community/clients.asciidoc
index 307f08228a3c9..d9427bd64b2f7 100644
--- a/docs/community/clients.asciidoc
+++ b/docs/community/clients.asciidoc
@@ -39,15 +39,15 @@ See the {client}/ruby-api/current/index.html[official Elasticsearch Ruby client]
* http://github.com/karmi/tire[Tire]:
Ruby API & DSL, with ActiveRecord/ActiveModel integration.
-* http://github.com/grantr/rubberband[rubberband]:
- Ruby client.
-
* https://github.com/PoseBiz/stretcher[stretcher]:
Ruby client.
* https://github.com/wireframe/elastic_searchable/[elastic_searchable]:
Ruby client + Rails integration.
+* https://github.com/ddnexus/flex[Flex]:
+ Ruby Client.
+
[[community-php]]
=== PHP
@@ -62,6 +62,8 @@ See the {client}/php-api/current/index.html[official Elasticsearch PHP client].
* http://github.com/polyfractal/Sherlock[Sherlock]:
PHP client, one-to-one mapping with query DSL, fluid interface.
+* https://github.com/nervetattoo/elasticsearch[elasticsearch]
+ PHP 5.3 client
[[community-java]]
=== Java
@@ -184,3 +186,7 @@ See the {client}/javascript-api/current/index.html[official Elasticsearch JavaSc
* https://github.com/jasonfill/ColdFusion-ElasticSearch-Client[ColdFusion-Elasticsearch-Client]
Cold Fusion client for Elasticsearch
+[[community-nodejs]]
+=== NodeJS
+* https://github.com/phillro/node-elasticsearch-client[Node-Elasticsearch-Client]
+ A node.js client for elasticsearch
diff --git a/docs/community/misc.asciidoc b/docs/community/misc.asciidoc
index 62c8938c1409c..7ba49c6fdca3d 100644
--- a/docs/community/misc.asciidoc
+++ b/docs/community/misc.asciidoc
@@ -1,15 +1,12 @@
[[misc]]
== Misc
-* https://github.com/electrical/puppet-elasticsearch[Puppet]:
+* https://github.com/elasticsearch/puppet-elasticsearch[Puppet]:
Elasticsearch puppet module.
* http://github.com/elasticsearch/cookbook-elasticsearch[Chef]:
Chef cookbook for Elasticsearch
-* https://github.com/tavisto/elasticsearch-rpms[elasticsearch-rpms]:
- RPMs for elasticsearch.
-
* http://www.github.com/neogenix/daikon[daikon]:
Daikon Elasticsearch CLI
diff --git a/docs/reference/cluster/nodes-info.asciidoc b/docs/reference/cluster/nodes-info.asciidoc
index e6550ea740b96..61d8c1a5a6a1f 100644
--- a/docs/reference/cluster/nodes-info.asciidoc
+++ b/docs/reference/cluster/nodes-info.asciidoc
@@ -40,7 +40,6 @@ plugins per node:
* `site`: `true` if the plugin is a site plugin
* `jvm`: `true` if the plugin is a plugin running in the JVM
* `url`: URL if the plugin is a site plugin
-* `isolation`: whether the plugin is loaded in isolation (`true`) or not (`false`)
The result will look similar to:
diff --git a/docs/reference/cluster/update-settings.asciidoc b/docs/reference/cluster/update-settings.asciidoc
index b8b8657b12fe3..a60b9049e8b8b 100644
--- a/docs/reference/cluster/update-settings.asciidoc
+++ b/docs/reference/cluster/update-settings.asciidoc
@@ -65,22 +65,32 @@ There is a specific list of settings that can be updated, those include:
[float]
===== Balanced Shards
+All these values are relative to one another. The first three are used to
+compose a three separate weighting functions into one. The cluster is balanced
+when no allowed action can bring the weights of each node closer together by
+more then the fourth setting. Actions might not be allowed, for instance,
+due to forced awareness or allocation filtering.
`cluster.routing.allocation.balance.shard`::
- Defines the weight factor for shards allocated on a node
- (float). Defaults to `0.45f`.
+ Defines the weight factor for shards allocated on a node
+ (float). Defaults to `0.45f`. Raising this raises the tendency to
+ equalize the number of shards across all nodes in the cluster.
`cluster.routing.allocation.balance.index`::
- Defines a factor to the number of shards per index allocated
- on a specific node (float). Defaults to `0.5f`.
+ Defines a factor to the number of shards per index allocated
+ on a specific node (float). Defaults to `0.5f`. Raising this raises the
+ tendency to equalize the number of shards per index across all nodes in
+ the cluster.
`cluster.routing.allocation.balance.primary`::
- defines a weight factor for the number of primaries of a specific index
- allocated on a node (float). `0.05f`.
+ Defines a weight factor for the number of primaries of a specific index
+ allocated on a node (float). `0.05f`. Raising this raises the tendency
+ to equalize the number of primary shards across all nodes in the cluster.
`cluster.routing.allocation.balance.threshold`::
- minimal optimization value of operations that should be performed (non
- negative float). Defaults to `1.0f`.
+ Minimal optimization value of operations that should be performed (non
+ negative float). Defaults to `1.0f`. Raising this will cause the cluster
+ to be less aggressive about optimizing the shard balance.
[float]
===== Concurrent Rebalance
diff --git a/docs/reference/index-modules/fielddata.asciidoc b/docs/reference/index-modules/fielddata.asciidoc
index c958dcbc037e2..57afa043fa558 100644
--- a/docs/reference/index-modules/fielddata.asciidoc
+++ b/docs/reference/index-modules/fielddata.asciidoc
@@ -124,6 +124,41 @@ field data format.
`doc_values`::
Computes and stores field data data-structures on disk at indexing time.
+[float]
+==== Global ordinals
+
+coming[1.2.0]
+
+Global ordinals is a data-structure on top of field data, that maintains an
+incremental numbering for all the terms in field data in a lexicographic order.
+Each term has a unique number and the number of term 'A' is lower than the number
+of term 'B'. Global ordinals are only supported on string fields.
+
+Field data on string also has ordinals, which is a unique numbering for all terms
+in a particular segment and field. Global ordinals just build on top of this,
+by providing a mapping between the segment ordinals and the global ordinals.
+The latter being unique across the entire shard.
+
+Global ordinals can be beneficial in search features that use segment ordinals already
+such as the terms aggregator to improve the execution time. Often these search features
+need to merge the segment ordinal results to a cross segment terms result. With
+global ordinals this mapping happens during field data load time instead of during each
+query execution. With global ordinals search features only need to resolve the actual
+term when building the (shard) response, but during the execution there is no need
+at all to use the actual terms and the unique numbering global ordinals provided is
+sufficient and improves the execution time.
+
+Global ordinals for a specified field are tied to all the segments of a shard (Lucene index),
+which is different than for field data for a specific field which is tied to a single segment.
+For this reason global ordinals need to be rebuilt in its entirety once new segments
+become visible. This one time cost would happen anyway without global ordinals, but
+then it would happen for each search execution instead!
+
+The loading time of global ordinals depends on the number of terms in a field, but in general
+it is low, since it source field data has already been loaded. The memory overhead of global
+ordinals is a small because it is very efficiently compressed. Eager loading of global ordinals
+can move the loading time from the first search request, to the refresh itself.
+
[float]
=== Fielddata loading
@@ -147,6 +182,23 @@ It is possible to force field data to be loaded and cached eagerly through the
}
--------------------------------------------------
+Global ordinals can also be eagerly loaded:
+
+[source,js]
+--------------------------------------------------
+{
+ category: {
+ type: "string",
+ fielddata: {
+ loading: "eager_global_ordinals"
+ }
+ }
+}
+--------------------------------------------------
+
+With the above setting both field data and global ordinals for a specific field
+are eagerly loaded.
+
[float]
==== Disabling field data loading
diff --git a/docs/reference/index-modules/similarity.asciidoc b/docs/reference/index-modules/similarity.asciidoc
index ae9f368f3f143..0c95709a860d1 100644
--- a/docs/reference/index-modules/similarity.asciidoc
+++ b/docs/reference/index-modules/similarity.asciidoc
@@ -121,6 +121,31 @@ based model] . This similarity has the following options:
Type name: `IB`
+[float]
+[[lm_dirichlet]]
+==== LM Dirichlet similarity.
+
+http://lucene.apache.org/core/4_7_1/core/org/apache/lucene/search/similarities/LMDirichletSimilarity.html[LM
+Dirichlet similarity] . This similarity has the following options:
+
+[horizontal]
+`mu`:: Default to `2000`.
+
+Type name: `LMDirichlet`
+
+[float]
+[[lm_jelinek_mercer]]
+==== LM Jelinek Mercer similarity.
+
+http://lucene.apache.org/core/4_7_1/core/org/apache/lucene/search/similarities/LMJelinekMercerSimilarity.html[LM
+Jelinek Mercer similarity] . This similarity has the following options:
+
+[horizontal]
+`lambda`:: The optimal value depends on both the collection and the query. The optimal value is around `0.1`
+for title queries and `0.7` for long queries. Default to `0.1`.
+
+Type name: `LMJelinekMercer`
+
[float]
[[default-base]]
==== Default and Base Similarities
diff --git a/docs/reference/mapping/types/core-types.asciidoc b/docs/reference/mapping/types/core-types.asciidoc
index 98f16536adcf7..5b3d5b70b79ce 100644
--- a/docs/reference/mapping/types/core-types.asciidoc
+++ b/docs/reference/mapping/types/core-types.asciidoc
@@ -446,6 +446,7 @@ Defaults to the property/field name.
|`store` |Set to `true` to store actual field in the index, `false` to not
store it. Defaults to `false` (note, the JSON document itself is stored,
and it can be retrieved from it).
+|`doc_values` |Set to `true` to store field values in a column-stride fashion.
|=======================================================================
[float]
diff --git a/docs/reference/modules/advanced-scripting.asciidoc b/docs/reference/modules/advanced-scripting.asciidoc
index d215661ae3e0e..13adef44c33bd 100644
--- a/docs/reference/modules/advanced-scripting.asciidoc
+++ b/docs/reference/modules/advanced-scripting.asciidoc
@@ -177,7 +177,7 @@ return score;
=== Term vectors:
The `_index` variable can only be used to gather statistics for single terms. If you want to use information on all terms in a field, you must store the term vectors (set `term_vector` in the mapping as described in the <>). To access them, call
-`_index.getTermVectors()` to get a
+`_index.termVectors()` to get a
https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html[Fields]
instance. This object can then be used as described in https://lucene.apache.org/core/4_0_0/core/org/apache/lucene/index/Fields.html[lucene doc] to iterate over fields and then for each field iterate over each term in the field.
The method will return null if the term vectors were not stored.
diff --git a/docs/reference/modules/plugins.asciidoc b/docs/reference/modules/plugins.asciidoc
index 029cc627a90f9..4f3f676424721 100644
--- a/docs/reference/modules/plugins.asciidoc
+++ b/docs/reference/modules/plugins.asciidoc
@@ -142,20 +142,6 @@ bin/plugin --install mobz/elasticsearch-head --timeout 1m
bin/plugin --install mobz/elasticsearch-head --timeout 0
-----------------------------------
-added[1.1.0]
-[float]
-==== Plugins isolation
-
-Since Elasticsearch 1.1, by default, each plugin is loaded in _isolation_ (in its dedicated `ClassLoader`) to avoid class clashes between the various plugins and their associated libraries. The default can be changed through the `plugins.isolation` property in `elasticsearch.yml`, by setting it to `false`:
-
-[source,js]
---------------------------------------------------
-plugins.isolation: false
---------------------------------------------------
-
-Do note that each plugin can specify its _mandatory_ isolation through the `isolation` property in its `es-plugin.properties` configuration. In this (rare) case, the plugin setting is used, overwriting whatever default used by Elasticsearch.
-
-
[float]
[[known-plugins]]
=== Known Plugins
diff --git a/docs/reference/search/aggregations/bucket/global-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/global-aggregation.asciidoc
index 037a9b343130b..da6ddc1af4edc 100644
--- a/docs/reference/search/aggregations/bucket/global-aggregation.asciidoc
+++ b/docs/reference/search/aggregations/bucket/global-aggregation.asciidoc
@@ -28,7 +28,7 @@ Example:
<1> The `global` aggregation has an empty body
<2> The sub-aggregations that are registered for this `global` aggregation
-The above aggregation demonstrates how one would compute aggregations (`avg_price` in this example) on all the documents in the search context, regardless of the query (in our example, it will compute the the average price over all products in our catalog, not just on the "shirts").
+The above aggregation demonstrates how one would compute aggregations (`avg_price` in this example) on all the documents in the search context, regardless of the query (in our example, it will compute the average price over all products in our catalog, not just on the "shirts").
The response for the above aggreation:
@@ -48,4 +48,4 @@ The response for the above aggreation:
}
--------------------------------------------------
-<1> The number of documents that were aggregated (in our case, all documents within the search context)
\ No newline at end of file
+<1> The number of documents that were aggregated (in our case, all documents within the search context)
diff --git a/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc b/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc
index 09583e290c180..cea5409d8dde9 100644
--- a/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc
+++ b/docs/reference/search/aggregations/bucket/terms-aggregation.asciidoc
@@ -310,12 +310,15 @@ http://docs.oracle.com/javase/7/docs/api/java/util/regex/Pattern.html#UNIX_LINES
==== Execution hint
-There are two mechanisms by which terms aggregations can be executed: either by using field values directly in order to aggregate
-data per-bucket (`map`), or by using ordinals of the field values instead of the values themselves (`ordinals`). Although the
-latter execution mode can be expected to be slightly faster, it is only available for use when the underlying data source exposes
-those terms ordinals. Moreover, it may actually be slower if most field values are unique. Elasticsearch tries to have sensible
-defaults when it comes to the execution mode that should be used, but in case you know that one execution mode may perform better
-than the other one, you have the ability to "hint" it to Elasticsearch:
+coming[1.2.0] The `global_ordinals` execution mode
+
+There are three mechanisms by which terms aggregations can be executed: either by using field values directly in order to aggregate
+data per-bucket (`map`), by using ordinals of the field values instead of the values themselves (`ordinals`) or by using global
+ordinals of the field (`global_ordinals`). The latter is faster, especially for fields with many unique
+values. However it can be slower if only a few documents match, when for example a terms aggregator is nested in another
+aggregator, this applies for both `ordinals` and `global_ordinals` execution modes. Elasticsearch tries to have sensible
+defaults when it comes to the execution mode that should be used, but in case you know that one execution mode may
+perform better than the other one, you have the ability to "hint" it to Elasticsearch:
[source,js]
--------------------------------------------------
@@ -331,6 +334,6 @@ than the other one, you have the ability to "hint" it to Elasticsearch:
}
--------------------------------------------------
-<1> the possible values are `map` and `ordinals`
+<1> the possible values are `map`, `ordinals` and `global_ordinals`
Please note that Elasticsearch will ignore this execution hint if it is not applicable.
diff --git a/docs/reference/search/request/search-type.asciidoc b/docs/reference/search/request/search-type.asciidoc
index 6f4c7bae79e43..50afc2b593bec 100644
--- a/docs/reference/search/request/search-type.asciidoc
+++ b/docs/reference/search/request/search-type.asciidoc
@@ -109,7 +109,7 @@ curl -XGET 'localhost:9200/_search?search_type=scan&scroll=10m&size=50' -d '
'
--------------------------------------------------
-The `scroll` parameter control the keep alive time of the scrolling
+The `scroll` parameter controls the keep alive time of the scrolling
request and initiates the scrolling process. The timeout applies per
round trip (i.e. between the previous scan scroll request, to the next).
diff --git a/docs/reference/setup.asciidoc b/docs/reference/setup.asciidoc
index cf413cd918a67..12324483f89c2 100644
--- a/docs/reference/setup.asciidoc
+++ b/docs/reference/setup.asciidoc
@@ -61,3 +61,5 @@ include::setup/as-a-service-win.asciidoc[]
include::setup/dir-layout.asciidoc[]
include::setup/repositories.asciidoc[]
+
+include::setup/upgrade.asciidoc[]
diff --git a/docs/reference/setup/upgrade.asciidoc b/docs/reference/setup/upgrade.asciidoc
new file mode 100644
index 0000000000000..182d605acd563
--- /dev/null
+++ b/docs/reference/setup/upgrade.asciidoc
@@ -0,0 +1,175 @@
+[[setup-upgrade]]
+== Upgrading
+
+Elasticsearch can usually be upgraded using a rolling upgrade process, resulting in no interruption of service. This section details how to perform both rolling and restart upgrades. To determine whether a rolling upgrade is supported for your release, please consult this table:
+
+[cols="1,2,3",options="header",]
+|=======================================================================
+|Upgrade From |Upgrade To |Supported Upgrade Type
+|0.90.x |1.x |Restart Upgrade
+
+|< 0.90.7 |0.90.x |Restart Upgrade
+
+|>= 0.90.7 |0.90.x |Rolling Upgrade
+
+|1.x |1.x |Rolling Upgrade
+|=======================================================================
+
+Before upgrading from 0.90.x or any earlier version to 1.x or later, it is a good idea to consult the <> docs.
+
+[float]
+[[backup]]
+=== Back Up Your Data!
+
+Before performing an upgrade, it's a good idea to back up the data on your system. This will allow you to roll back in the event of a problem with the upgrade. The upgrades sometimes include upgrades to the Lucene libraries used by Elasticsearch to access the index files, and after an index file has been updated to work with a new version of Lucene, it may not be accessible to the versions of Lucene present in earlier Elasticsearch releases.
+
+[float]
+==== 0.90.x and earlier
+
+To back up a running 0.90.x system, first disable index flushing. This will prevent indices from being flushed to disk while the backup is in process:
+
+[source,sh]
+-----------------------------------
+$ curl -XPUT 'http://localhost:9200/_all/_settings' -d '{
+ "index": {
+ "translog.disable_flush": "true"
+ }
+}'
+-----------------------------------
+
+Then disable reallocation. This will prevent the cluster from moving data files from one node to another while the backup is in process:
+
+[source,sh]
+-----------------------------------
+$ curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{
+ "transient" : {
+ "cluster.routing.allocation.disable_allocation": "true"
+ }
+}'
+-----------------------------------
+
+After reallocation and index flushing are disabled, initiate a backup of Elasticsearch's data path using your favorite backup method (tar, storage array snapshots, backup software). When the backup is complete and data no longer needs to be read from the Elasticsearch data path, reallocation and index flushing must be re-enabled:
+
+[source,sh]
+-----------------------------------
+$ curl -XPUT 'http://localhost:9200/_all/_settings' -d '{
+ "index": {
+ "translog.disable_flush": "false"
+ }
+}'
+
+$ curl -XPUT 'http://localhost:9200/_cluster/settings' -d '{
+ "transient" : {
+ "cluster.routing.allocation.disable_allocation": "false"
+ }
+}'
+-----------------------------------
+
+[float]
+==== 1.0 and later
+
+To back up a running 1.0 or later system, it is simplest to use the snapshot feature. Complete instructions for backup and restore with snapshots are available http://www.elasticsearch.org/guide/en/elasticsearch/reference/current/modules-snapshots.html[here].
+
+[float]
+[[rolling-upgrades]]
+=== Rolling upgrade process
+
+A rolling upgrade allows the ES cluster to be upgraded one node at a time, with no observable downtime for end users. Running multiple versions of Elasticsearch in the same cluster for any length of time beyond that required for an upgrade is not supported, as shard replication from the more recent version to the previous versions will not work.
+
+Within minor or maintenance releases after release 1.0, rolling upgrades are supported. To perform a rolling upgrade:
+
+* Disable shard reallocation (optional). This is done to allow for a faster startup after cluster shutdown. If this step is not performed, the nodes will immediately start trying to replicate shards to each other on startup and will spend a lot of time on wasted I/O. With shard reallocation disabled, the nodes will join the cluster with their indices intact, without attempting to rebalance. After startup is complete, reallocation will be turned back on.
+
+This syntax applies to Elasticsearch 1.0 and later:
+
+[source,sh]
+--------------------------------------------------
+ curl -XPUT localhost:9200/_cluster/settings -d '{
+ "transient" : {
+ "cluster.routing.allocation.enable" : "none"
+ }
+ }'
+--------------------------------------------------
+
+* Shut down a single node within the cluster.
+
+[source,sh]
+--------------------------------------------
+curl -XPOST 'http://localhost:9200/_cluster/nodes/_local/_shutdown'
+--------------------------------------------
+
+* Confirm that all shards are correctly reallocated to the remaining running nodes.
+
+* Upgrade the stopped node. To upgrade using a zip or compressed tarball from elasticsearch.org:
+** Extract the zip or tarball to a new directory, usually in the same volume as the current Elasticsearch installation. Do not overwrite the existing installation, as the downloaded archive will contain a default elasticsearch.yml file and will overwrite your existing configuration.
+** Copy the configuration files from the old Elasticsearch installation's config directory to the new Elasticsearch installation's config directory. Move data files from the old Elasticsesarch installation's data directory if necessary. If data files are not located within the tarball's extraction directory, they will not have to be moved.
+** The simplest solution for moving from one version to another is to have a symbolic link for 'elasticsearch' that points to the currently running version. This link can be easily updated and will provide a stable access point to the most recent version. Update this symbolic link if it is being used.
+
+*To upgrade using a .deb or .rpm package:
+
+** Use rpm or deb to install the new package. All files should be placed in their proper locations, and config files should not be overwritten.
+
+* Start the now upgraded node. Confirm that it joins the cluster.
+
+* Repeat this process for all remaining nodes.
+
+* When the process is complete on all nodes, you can re-enable shard reallocation:
+
+[source,sh]
+--------------------------------------------------
+ curl -XPUT localhost:9200/_cluster/settings -d '{
+ "transient" : {
+ "cluster.routing.allocation.enable" : "all"
+ }
+ }'
+--------------------------------------------------
+
+* Observe that all shards are properly allocated on all nodes. Balancing may take some time.
+
+It may be possible to perform the upgrade by installing the new software while the service is running. This would reduce downtime by ensuring the service was ready to run on the new version as soon as it is stopped on the node being upgraded. This can be done by installing the new version in its own directory and using the symbolic link method outlined above. It is important to test this procedure first to be sure that site-specific configuration data and production indices will not be overwritten during the upgrade process.
+
+[float]
+[[restart-upgrade]]
+=== Cluster restart upgrade process
+
+Elasticsearch releases prior to 1.0 and releases after 1.0 are not compatible with each other, so a rolling upgrade is not possible. In order to upgrade a pre-1.0 system to 1.0 or later, a full cluster stop and start is required. In order to perform this upgrade:
+
+* Disable shard reallocation (optional). This is done to allow for a faster startup after cluster shutdown. If this step is not performed, the nodes will immediately start trying to replicate shards to each other on startup and will spend a lot of time on wasted I/O. With shard reallocation disabled, the nodes will join the cluster with their indices intact, without attempting to rebalance. After startup is complete, reallocation will be turned back on.
+
+This syntax is from versions prior to 1.0:
+
+[source,sh]
+--------------------------------------------------
+ curl -XPUT localhost:9200/_cluster/settings -d '{
+ "persistent" : {
+ "cluster.routing.allocation.disable_allocation" : true
+ }
+ }'
+--------------------------------------------------
+
+* Stop all Elasticsearch services on all nodes in the cluster.
+[source,sh]
+------------------------------------------------------
+ curl -XPOST 'http://localhost:9200/_shutdown'
+------------------------------------------------------
+
+* On the first node to be upgraded, extract the archive or install the new package as described above in the Rolling Upgrades section. Repeat for all nodes.
+
+* After upgrading Elasticsearch on all nodes is complete, the cluster can be started by starting each node individually.
+** Start master-eligible nodes first, one at a time. Verify that a quorum has been reached and a master has been elected before proceeding.
+** Start data nodes and then client nodes one at a time, verifying that they successfully join the cluster.
+
+* When the cluster is running and reaches a yellow state, shard reallocation can be enabled.
+
+This syntax is from release 1.0 and later:
+[source,sh]
+------------------------------------------------------
+ curl -XPUT localhost:9200/_cluster/settings -d '{
+ "persistent" : {
+ "cluster.routing.allocation.disable_allocation": false,
+ "cluster.routing.allocation.enable" : "all"
+ }
+ }'
+------------------------------------------------------
+
+The cluster upgrade can be streamlined by installing the software before stopping cluster services. If this is done, testing must be performed to ensure that no production data or configuration files are overwritten prior to restart.
diff --git a/pom.xml b/pom.xml
index 883a7c69f2f3f..76359dd482322 100644
--- a/pom.xml
+++ b/pom.xml
@@ -52,7 +52,7 @@
com.carrotsearch.randomizedtesting
randomizedtesting-runner
- 2.1.1
+ 2.1.2
test
@@ -387,7 +387,7 @@
com.carrotsearch.randomizedtesting
junit4-maven-plugin
- 2.1.1
+ 2.1.2
tests
@@ -445,6 +445,7 @@
${tests.verbose}
${tests.seed}
${tests.failfast}
+ false
.
@@ -1138,12 +1139,8 @@
org/elasticsearch/cache/recycler/MockPageCacheRecycler.class
org/apache/lucene/util/AbstractRandomizedTest.class
org/apache/lucene/util/AbstractRandomizedTest$*.class
-
- com/carrotsearch/randomizedtesting/StandaloneRandomizedContext.class
-
- org/elasticsearch/test/rest/ElasticsearchRestTests.class
org/elasticsearch/test/rest/test/**/*
@@ -1417,7 +1414,7 @@
- dependencies
+ index
diff --git a/rest-api-spec/test/cat.allocation/10_basic.yaml b/rest-api-spec/test/cat.allocation/10_basic.yaml
index 6250e42bd2f8b..ca9d5aa6e1ad9 100644
--- a/rest-api-spec/test/cat.allocation/10_basic.yaml
+++ b/rest-api-spec/test/cat.allocation/10_basic.yaml
@@ -55,7 +55,7 @@
- match:
$body: >
/^
- ( [1-5] \s+
+ ( \d+ \s+
\d+(\.\d+)?[kmgt]b \s+
\d+(\.\d+)?[kmgt]b \s+
\d+(\.\d+)?[kmgt]b \s+
diff --git a/rest-api-spec/test/cat.shards/10_basic.yaml b/rest-api-spec/test/cat.shards/10_basic.yaml
index 223fdc2ae98c8..ef10f1b0b091d 100644
--- a/rest-api-spec/test/cat.shards/10_basic.yaml
+++ b/rest-api-spec/test/cat.shards/10_basic.yaml
@@ -9,12 +9,12 @@
/^$/
- do:
- index:
- index: index1
- type: type1
- id: 1
- body: { foo: bar }
- refresh: true
+ indices.create:
+ index: index1
+ body:
+ settings:
+ number_of_shards: "5"
+ number_of_replicas: "1"
- do:
cluster.health:
wait_for_status: yellow
@@ -30,6 +30,7 @@
index: index2
body:
settings:
+ number_of_shards: "5"
number_of_replicas: "0"
- do:
cluster.health:
diff --git a/rest-api-spec/test/cluster.put_settings/10_basic.yaml b/rest-api-spec/test/cluster.put_settings/10_basic.yaml
index e80ab747fa496..37396dac822c4 100644
--- a/rest-api-spec/test/cluster.put_settings/10_basic.yaml
+++ b/rest-api-spec/test/cluster.put_settings/10_basic.yaml
@@ -1,4 +1,9 @@
---
+setup:
+ - skip:
+ version: 0 - 999
+ reason: leaves transient metadata behind, need to fix it
+---
"Test put settings":
- do:
cluster.put_settings:
diff --git a/rest-api-spec/test/create/40_routing.yaml b/rest-api-spec/test/create/40_routing.yaml
index bc3fb84f5e994..e39cbd1287d06 100644
--- a/rest-api-spec/test/create/40_routing.yaml
+++ b/rest-api-spec/test/create/40_routing.yaml
@@ -7,6 +7,7 @@
body:
settings:
index:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/create/55_parent_with_routing.yaml b/rest-api-spec/test/create/55_parent_with_routing.yaml
index ec9471842bc5f..8ce045a64b0ab 100644
--- a/rest-api-spec/test/create/55_parent_with_routing.yaml
+++ b/rest-api-spec/test/create/55_parent_with_routing.yaml
@@ -9,6 +9,7 @@
test:
_parent: { type: "foo" }
settings:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/delete/30_routing.yaml b/rest-api-spec/test/delete/30_routing.yaml
index b0d3ca3885f6d..857cf76a527b6 100644
--- a/rest-api-spec/test/delete/30_routing.yaml
+++ b/rest-api-spec/test/delete/30_routing.yaml
@@ -1,6 +1,12 @@
---
"Routing":
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ number_of_shards: 5
- do:
index:
index: test_1
diff --git a/rest-api-spec/test/delete/40_parent.yaml b/rest-api-spec/test/delete/40_parent.yaml
index 6e8beca79b440..cd250de331038 100644
--- a/rest-api-spec/test/delete/40_parent.yaml
+++ b/rest-api-spec/test/delete/40_parent.yaml
@@ -4,6 +4,8 @@
indices.create:
index: test_1
body:
+ settings:
+ number_of_shards: 5
mappings:
test:
_parent: { type: "foo" }
diff --git a/rest-api-spec/test/delete/45_parent_with_routing.yaml b/rest-api-spec/test/delete/45_parent_with_routing.yaml
index 7a11db4e46749..8b11e82b75459 100644
--- a/rest-api-spec/test/delete/45_parent_with_routing.yaml
+++ b/rest-api-spec/test/delete/45_parent_with_routing.yaml
@@ -9,6 +9,7 @@
test:
_parent: { type: "foo" }
settings:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/delete/50_refresh.yaml b/rest-api-spec/test/delete/50_refresh.yaml
index b550789e2cfaf..ef7a3045514e8 100644
--- a/rest-api-spec/test/delete/50_refresh.yaml
+++ b/rest-api-spec/test/delete/50_refresh.yaml
@@ -7,6 +7,7 @@
body:
settings:
refresh_interval: -1
+ number_of_shards: 5
number_of_replicas: 0
- do:
cluster.health:
diff --git a/rest-api-spec/test/exists/40_routing.yaml b/rest-api-spec/test/exists/40_routing.yaml
index 56ba443e3cda3..6b55a3bee37f7 100644
--- a/rest-api-spec/test/exists/40_routing.yaml
+++ b/rest-api-spec/test/exists/40_routing.yaml
@@ -7,6 +7,7 @@
body:
settings:
index:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/exists/55_parent_with_routing.yaml b/rest-api-spec/test/exists/55_parent_with_routing.yaml
index 0e92aac6b2a12..bf617a23260f7 100644
--- a/rest-api-spec/test/exists/55_parent_with_routing.yaml
+++ b/rest-api-spec/test/exists/55_parent_with_routing.yaml
@@ -9,6 +9,7 @@
test:
_parent: { type: "foo" }
settings:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/get/40_routing.yaml b/rest-api-spec/test/get/40_routing.yaml
index f909cb0cbf78c..f464e662c3a3f 100644
--- a/rest-api-spec/test/get/40_routing.yaml
+++ b/rest-api-spec/test/get/40_routing.yaml
@@ -7,6 +7,7 @@
body:
settings:
index:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/get/55_parent_with_routing.yaml b/rest-api-spec/test/get/55_parent_with_routing.yaml
index 43d60f5bd508e..c65a2b121982b 100644
--- a/rest-api-spec/test/get/55_parent_with_routing.yaml
+++ b/rest-api-spec/test/get/55_parent_with_routing.yaml
@@ -9,6 +9,7 @@
test:
_parent: { type: "foo" }
settings:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/get_source/40_routing.yaml b/rest-api-spec/test/get_source/40_routing.yaml
index f771dbb05937f..bd9a29184472a 100644
--- a/rest-api-spec/test/get_source/40_routing.yaml
+++ b/rest-api-spec/test/get_source/40_routing.yaml
@@ -7,6 +7,7 @@
body:
settings:
index:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/get_source/55_parent_with_routing.yaml b/rest-api-spec/test/get_source/55_parent_with_routing.yaml
index 86fe2ba47bb5b..38ca9d5f22d15 100644
--- a/rest-api-spec/test/get_source/55_parent_with_routing.yaml
+++ b/rest-api-spec/test/get_source/55_parent_with_routing.yaml
@@ -9,6 +9,7 @@
test:
_parent: { type: "foo" }
settings:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/index/40_routing.yaml b/rest-api-spec/test/index/40_routing.yaml
index f909cb0cbf78c..f464e662c3a3f 100644
--- a/rest-api-spec/test/index/40_routing.yaml
+++ b/rest-api-spec/test/index/40_routing.yaml
@@ -7,6 +7,7 @@
body:
settings:
index:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/index/55_parent_with_routing.yaml b/rest-api-spec/test/index/55_parent_with_routing.yaml
index 43d60f5bd508e..c65a2b121982b 100644
--- a/rest-api-spec/test/index/55_parent_with_routing.yaml
+++ b/rest-api-spec/test/index/55_parent_with_routing.yaml
@@ -9,6 +9,7 @@
test:
_parent: { type: "foo" }
settings:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/indices.get_settings/10_basic.yaml b/rest-api-spec/test/indices.get_settings/10_basic.yaml
index eaca930e694f5..a50be32e82c6a 100644
--- a/rest-api-spec/test/indices.get_settings/10_basic.yaml
+++ b/rest-api-spec/test/indices.get_settings/10_basic.yaml
@@ -3,9 +3,17 @@ setup:
- do:
indices.create:
index: test_1
+ body:
+ settings:
+ number_of_shards: 5
+ number_of_replicas: 1
- do:
indices.create:
index: test_2
+ body:
+ settings:
+ number_of_shards: 3
+ number_of_replicas: 0
---
"Get /_settings":
@@ -15,8 +23,8 @@ setup:
- match: { test_1.settings.index.number_of_shards: "5"}
- match: { test_1.settings.index.number_of_replicas: "1"}
- - match: { test_2.settings.index.number_of_shards: "5"}
- - match: { test_2.settings.index.number_of_replicas: "1"}
+ - match: { test_2.settings.index.number_of_shards: "3"}
+ - match: { test_2.settings.index.number_of_replicas: "0"}
---
"Get /{index}/_settings":
@@ -98,7 +106,7 @@ setup:
name: index.number_of_shards
- match: { test_1.settings.index.number_of_shards: "5"}
- - match: { test_2.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_shards: "3"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2.settings.index.number_of_replicas
@@ -111,7 +119,7 @@ setup:
name: index.number_of_shards
- match: { test_1.settings.index.number_of_shards: "5"}
- - match: { test_2.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_shards: "3"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2.settings.index.number_of_replicas
@@ -125,7 +133,7 @@ setup:
name: index.number_of_shards
- match: { test_1.settings.index.number_of_shards: "5"}
- - match: { test_2.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_shards: "3"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2.settings.index.number_of_replicas
@@ -138,7 +146,7 @@ setup:
name: index.number_of_shards
- match: { test_1.settings.index.number_of_shards: "5"}
- - match: { test_2.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_shards: "3"}
- is_false: test_1.settings.index.number_of_replicas
- is_false: test_2.settings.index.number_of_replicas
@@ -150,7 +158,7 @@ setup:
index: '*2'
name: index.number_of_shards
- - match: { test_2.settings.index.number_of_shards: "5"}
+ - match: { test_2.settings.index.number_of_shards: "3"}
- is_false: test_1
- is_false: test_2.settings.index.number_of_replicas
diff --git a/rest-api-spec/test/mget/30_parent.yaml b/rest-api-spec/test/mget/30_parent.yaml
index 491b8b8d5338a..9f29627445933 100644
--- a/rest-api-spec/test/mget/30_parent.yaml
+++ b/rest-api-spec/test/mget/30_parent.yaml
@@ -8,6 +8,8 @@
mappings:
test:
_parent: { type: "foo" }
+ settings:
+ number_of_shards: 5
- do:
cluster.health:
wait_for_status: yellow
diff --git a/rest-api-spec/test/mget/40_routing.yaml b/rest-api-spec/test/mget/40_routing.yaml
index 96734da32932b..6c1884cf5d385 100644
--- a/rest-api-spec/test/mget/40_routing.yaml
+++ b/rest-api-spec/test/mget/40_routing.yaml
@@ -7,6 +7,7 @@
body:
settings:
index:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/mget/55_parent_with_routing.yaml b/rest-api-spec/test/mget/55_parent_with_routing.yaml
index 039f5c58aacce..89e99d4c8813b 100644
--- a/rest-api-spec/test/mget/55_parent_with_routing.yaml
+++ b/rest-api-spec/test/mget/55_parent_with_routing.yaml
@@ -9,6 +9,7 @@
test:
_parent: { type: "foo" }
settings:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/mlt/10_basic.yaml b/rest-api-spec/test/mlt/10_basic.yaml
index e914607b997b8..f7f41b72b0696 100644
--- a/rest-api-spec/test/mlt/10_basic.yaml
+++ b/rest-api-spec/test/mlt/10_basic.yaml
@@ -1,5 +1,20 @@
---
"Basic mlt":
+ - do:
+ indices.create:
+ index: test_1
+ body:
+ settings:
+ index:
+ number_of_replicas: 0
+ mappings:
+ test:
+ properties:
+ foo:
+ type : "string"
+ title:
+ type : "string"
+
- do:
index:
index: test_1
@@ -13,7 +28,7 @@
- do:
cluster.health:
wait_for_status: green
- timeout: 1s
+
- do:
mlt:
index: test_1
diff --git a/rest-api-spec/test/search/20_default_values.yaml b/rest-api-spec/test/search/20_default_values.yaml
index e8f14340e339c..6921a58d8868c 100644
--- a/rest-api-spec/test/search/20_default_values.yaml
+++ b/rest-api-spec/test/search/20_default_values.yaml
@@ -34,10 +34,31 @@
foo: bar
- match: {hits.total: 2}
+
+ - do:
+ search:
+ index: test_1
+ type: test
+ body:
+ query:
+ match:
+ foo: bar
+
+ - match: {hits.total: 1}
- match: {hits.hits.0._index: test_1 }
- match: {hits.hits.0._type: test }
- match: {hits.hits.0._id: "1" }
- - match: {hits.hits.1._index: test_2 }
- - match: {hits.hits.1._type: test }
- - match: {hits.hits.1._id: "42" }
+ - do:
+ search:
+ index: test_2
+ type: test
+ body:
+ query:
+ match:
+ foo: bar
+
+ - match: {hits.total: 1}
+ - match: {hits.hits.0._index: test_2 }
+ - match: {hits.hits.0._type: test }
+ - match: {hits.hits.0._id: "42" }
diff --git a/rest-api-spec/test/update/40_routing.yaml b/rest-api-spec/test/update/40_routing.yaml
index 52cd938fb71f2..4b03a538b9245 100644
--- a/rest-api-spec/test/update/40_routing.yaml
+++ b/rest-api-spec/test/update/40_routing.yaml
@@ -7,6 +7,7 @@
body:
settings:
index:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/rest-api-spec/test/update/55_parent_with_routing.yaml b/rest-api-spec/test/update/55_parent_with_routing.yaml
index cc828021500db..51dd91af3baa2 100644
--- a/rest-api-spec/test/update/55_parent_with_routing.yaml
+++ b/rest-api-spec/test/update/55_parent_with_routing.yaml
@@ -9,6 +9,7 @@
test:
_parent: { type: "foo" }
settings:
+ number_of_shards: 5
number_of_replicas: 0
- do:
diff --git a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginInfo.java b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginInfo.java
index 9dc6e2430824e..b1b2fc2d572e4 100644
--- a/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginInfo.java
+++ b/src/main/java/org/elasticsearch/action/admin/cluster/node/info/PluginInfo.java
@@ -41,7 +41,6 @@ static final class Fields {
static final XContentBuilderString JVM = new XContentBuilderString("jvm");
static final XContentBuilderString SITE = new XContentBuilderString("site");
static final XContentBuilderString VERSION = new XContentBuilderString("version");
- static final XContentBuilderString ISOLATION = new XContentBuilderString("isolation");
}
private String name;
@@ -49,7 +48,6 @@ static final class Fields {
private boolean site;
private boolean jvm;
private String version;
- private boolean isolation;
public PluginInfo() {
}
@@ -62,9 +60,8 @@ public PluginInfo() {
* @param site true if it's a site plugin
* @param jvm true if it's a jvm plugin
* @param version Version number is applicable (NA otherwise)
- * @param isolation true if it's an isolated plugin
*/
- public PluginInfo(String name, String description, boolean site, boolean jvm, String version, boolean isolation) {
+ public PluginInfo(String name, String description, boolean site, boolean jvm, String version) {
this.name = name;
this.description = description;
this.site = site;
@@ -74,7 +71,6 @@ public PluginInfo(String name, String description, boolean site, boolean jvm, St
} else {
this.version = VERSION_NOT_AVAILABLE;
}
- this.isolation = isolation;
}
/**
@@ -125,13 +121,6 @@ public String getVersion() {
return version;
}
- /**
- * @return Plugin isolation
- */
- public boolean isIsolation() {
- return isolation;
- }
-
public static PluginInfo readPluginInfo(StreamInput in) throws IOException {
PluginInfo info = new PluginInfo();
info.readFrom(in);
@@ -149,11 +138,6 @@ public void readFrom(StreamInput in) throws IOException {
} else {
this.version = VERSION_NOT_AVAILABLE;
}
- if (in.getVersion().onOrAfter(Version.V_1_1_0)) {
- this.isolation = in.readBoolean();
- } else {
- this.isolation = false;
- }
}
@Override
@@ -165,9 +149,6 @@ public void writeTo(StreamOutput out) throws IOException {
if (out.getVersion().onOrAfter(Version.V_1_0_0_RC2)) {
out.writeString(version);
}
- if (out.getVersion().onOrAfter(Version.V_1_1_0)) {
- out.writeBoolean(isolation);
- }
}
@Override
@@ -181,7 +162,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
}
builder.field(Fields.JVM, jvm);
builder.field(Fields.SITE, site);
- builder.field(Fields.ISOLATION, isolation);
builder.endObject();
return builder;
@@ -207,13 +187,12 @@ public int hashCode() {
@Override
public String toString() {
- final StringBuilder sb = new StringBuilder("PluginInfo{");
+ final StringBuffer sb = new StringBuffer("PluginInfo{");
sb.append("name='").append(name).append('\'');
sb.append(", description='").append(description).append('\'');
sb.append(", site=").append(site);
sb.append(", jvm=").append(jvm);
sb.append(", version='").append(version).append('\'');
- sb.append(", isolation=").append(isolation);
sb.append('}');
return sb.toString();
}
diff --git a/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java b/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java
index 0c367ef009211..2efb90bc3a1a9 100644
--- a/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java
+++ b/src/main/java/org/elasticsearch/action/get/MultiGetResponse.java
@@ -124,7 +124,6 @@ public Iterator iterator() {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startObject();
builder.startArray(Fields.DOCS);
for (MultiGetItemResponse response : responses) {
if (response.isFailed()) {
@@ -137,11 +136,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.endObject();
} else {
GetResponse getResponse = response.getResponse();
+ builder.startObject();
getResponse.toXContent(builder, params);
+ builder.endObject();
}
}
builder.endArray();
- builder.endObject();
return builder;
}
diff --git a/src/main/java/org/elasticsearch/action/percolate/MultiPercolateResponse.java b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateResponse.java
index 87e17eba0bd00..27e9ce44f6560 100644
--- a/src/main/java/org/elasticsearch/action/percolate/MultiPercolateResponse.java
+++ b/src/main/java/org/elasticsearch/action/percolate/MultiPercolateResponse.java
@@ -59,7 +59,6 @@ public Item[] getItems() {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startObject();
builder.startArray(Fields.RESPONSES);
for (MultiPercolateResponse.Item item : items) {
if (item.isFailure()) {
@@ -67,11 +66,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.field(Fields.ERROR, item.getErrorMessage());
builder.endObject();
} else {
+ builder.startObject();
item.getResponse().toXContent(builder, params);
+ builder.endObject();
}
}
builder.endArray();
- builder.endObject();
return builder;
}
diff --git a/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java b/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java
index 87cf5cc1423e3..284bca38168a8 100644
--- a/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java
+++ b/src/main/java/org/elasticsearch/action/percolate/PercolateResponse.java
@@ -122,8 +122,6 @@ public Iterator iterator() {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startObject();
-
builder.field(Fields.TOOK, tookInMillis);
RestActions.buildBroadcastShardsHeader(builder, this);
@@ -172,8 +170,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
if (aggregations != null) {
aggregations.toXContent(builder, params);
}
-
- builder.endObject();
return builder;
}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java
index 96c2d24adda51..a19db2c7b7264 100644
--- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchCountAction.java
@@ -78,7 +78,7 @@ protected void moveToSecondPhase() throws Exception {
if (request.scroll() != null) {
scrollId = buildScrollId(request.searchType(), firstResults, null);
}
- listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java
index 215672bc21da4..d29355d851fef 100644
--- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryAndFetchAction.java
@@ -158,7 +158,7 @@ void onSecondPhaseFailure(Throwable t, QuerySearchRequest querySearchRequest, in
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
- successulOps.decrementAndGet();
+ successfulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
finishHim();
}
@@ -185,7 +185,7 @@ void innerFinishHim() throws Exception {
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
}
- listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java
index f52a84c680b16..b168a40ab4670 100644
--- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchDfsQueryThenFetchAction.java
@@ -168,7 +168,7 @@ void onQueryFailure(Throwable t, QuerySearchRequest querySearchRequest, int shar
logger.debug("[{}] Failed to execute query phase", t, querySearchRequest.id());
}
this.addShardFailure(shardIndex, dfsResult.shardTarget(), t);
- successulOps.decrementAndGet();
+ successfulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
executeFetchPhase();
}
@@ -272,7 +272,7 @@ void onFetchFailure(Throwable t, FetchSearchRequest fetchSearchRequest, int shar
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
}
this.addShardFailure(shardIndex, shardTarget, t);
- successulOps.decrementAndGet();
+ successfulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
finishHim();
}
@@ -298,7 +298,7 @@ void innerFinishHim() throws Exception {
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
}
- listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java
index 334bb140d2e60..946512f22403b 100644
--- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryAndFetchAction.java
@@ -91,7 +91,7 @@ private void innerFinishHim() throws IOException {
if (request.scroll() != null) {
scrollId = buildScrollId(request.searchType(), firstResults, null);
}
- listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java
index d713069ef39b5..ee92c694209f9 100644
--- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchQueryThenFetchAction.java
@@ -172,7 +172,7 @@ void onFetchFailure(Throwable t, FetchSearchRequest fetchSearchRequest, int shar
logger.debug("[{}] Failed to execute fetch phase", t, fetchSearchRequest.id());
}
this.addShardFailure(shardIndex, shardTarget, t);
- successulOps.decrementAndGet();
+ successfulOps.decrementAndGet();
if (counter.decrementAndGet() == 0) {
finishHim();
}
@@ -198,7 +198,7 @@ void innerFinishHim() throws Exception {
if (request.scroll() != null) {
scrollId = TransportSearchHelper.buildScrollId(request.searchType(), firstResults, null);
}
- listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java
index b7e5c10a3f385..f9a12585329ff 100644
--- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchScanAction.java
@@ -75,7 +75,7 @@ protected void moveToSecondPhase() throws Exception {
if (request.scroll() != null) {
scrollId = buildScrollId(request.searchType(), firstResults, ImmutableMap.of("total_hits", Long.toString(internalResponse.hits().totalHits())));
}
- listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successulOps.get(), buildTookInMillis(), buildShardFailures()));
+ listener.onResponse(new SearchResponse(internalResponse, scrollId, expectedSuccessfulOps, successfulOps.get(), buildTookInMillis(), buildShardFailures()));
}
}
}
diff --git a/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java
index 6b5897602b968..325ac54717f61 100644
--- a/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java
+++ b/src/main/java/org/elasticsearch/action/search/type/TransportSearchTypeAction.java
@@ -89,7 +89,7 @@ protected abstract class BaseAsyncAction
protected final int expectedSuccessfulOps;
private final int expectedTotalOps;
- protected final AtomicInteger successulOps = new AtomicInteger();
+ protected final AtomicInteger successfulOps = new AtomicInteger();
private final AtomicInteger totalOps = new AtomicInteger();
protected final AtomicArray firstResults;
@@ -246,11 +246,13 @@ public void onFailure(Throwable t) {
void onFirstPhaseResult(int shardIndex, ShardRouting shard, FirstResult result, ShardIterator shardIt) {
result.shardTarget(new SearchShardTarget(shard.currentNodeId(), shard.index(), shard.id()));
processFirstPhaseResult(shardIndex, shard, result);
-
+ // we need to increment successful ops first before we compare the exit condition otherwise if we
+ // are fast we could concurrently update totalOps but then preempt one of the threads which can
+ // cause the successor to read a wrong value from successfulOps if second phase is very fast ie. count etc.
+ successfulOps.incrementAndGet();
// increment all the "future" shards to update the total ops since we some may work and some may not...
// and when that happens, we break on total ops, so we must maintain them
- int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
- successulOps.incrementAndGet();
+ final int xTotalOps = totalOps.addAndGet(shardIt.remaining() + 1);
if (xTotalOps == expectedTotalOps) {
try {
innerMoveToSecondPhase();
@@ -281,7 +283,7 @@ void onFirstPhaseResult(final int shardIndex, @Nullable ShardRouting shard, @Nul
logger.trace("{}: Failed to execute [{}]", t, shard, request);
}
}
- if (successulOps.get() == 0) {
+ if (successfulOps.get() == 0) {
if (logger.isDebugEnabled()) {
logger.debug("All shards failed for phase: [{}]", firstPhaseName(), t);
}
diff --git a/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsResponse.java b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsResponse.java
index e4814d90cd7c9..3313c05236ae1 100644
--- a/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsResponse.java
+++ b/src/main/java/org/elasticsearch/action/termvector/MultiTermVectorsResponse.java
@@ -124,7 +124,6 @@ public Iterator iterator() {
@Override
public XContentBuilder toXContent(XContentBuilder builder, Params params) throws IOException {
- builder.startObject();
builder.startArray(Fields.DOCS);
for (MultiTermVectorsItemResponse response : responses) {
if (response.isFailed()) {
@@ -137,11 +136,12 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
builder.endObject();
} else {
TermVectorResponse getResponse = response.getResponse();
+ builder.startObject();
getResponse.toXContent(builder, params);
+ builder.endObject();
}
}
builder.endArray();
- builder.endObject();
return builder;
}
diff --git a/src/main/java/org/elasticsearch/action/termvector/TermVectorResponse.java b/src/main/java/org/elasticsearch/action/termvector/TermVectorResponse.java
index f662cbbc0fdfe..706d285a9f0b9 100644
--- a/src/main/java/org/elasticsearch/action/termvector/TermVectorResponse.java
+++ b/src/main/java/org/elasticsearch/action/termvector/TermVectorResponse.java
@@ -164,7 +164,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
assert index != null;
assert type != null;
assert id != null;
- builder.startObject();
builder.field(FieldStrings._INDEX, index);
builder.field(FieldStrings._TYPE, type);
builder.field(FieldStrings._ID, id);
@@ -182,7 +181,6 @@ public XContentBuilder toXContent(XContentBuilder builder, Params params) throws
buildField(builder, spare, theFields, fieldIter);
}
builder.endObject();
- builder.endObject();
return builder;
}
diff --git a/src/main/java/org/elasticsearch/action/update/UpdateRequest.java b/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
index 590af784cb932..87e11c6231635 100644
--- a/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
+++ b/src/main/java/org/elasticsearch/action/update/UpdateRequest.java
@@ -537,13 +537,13 @@ public UpdateRequest source(byte[] source, int offset, int length) throws Except
public UpdateRequest source(BytesReference source) throws Exception {
XContentType xContentType = XContentFactory.xContentType(source);
try (XContentParser parser = XContentFactory.xContent(xContentType).createParser(source)) {
- XContentParser.Token t = parser.nextToken();
- if (t == null) {
+ XContentParser.Token token = parser.nextToken();
+ if (token == null) {
return this;
}
String currentFieldName = null;
- while ((t = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
- if (t == XContentParser.Token.FIELD_NAME) {
+ while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
+ if (token == XContentParser.Token.FIELD_NAME) {
currentFieldName = parser.currentName();
} else if ("script".equals(currentFieldName)) {
script = parser.textOrNull();
diff --git a/src/main/java/org/elasticsearch/cluster/ClusterState.java b/src/main/java/org/elasticsearch/cluster/ClusterState.java
index 802503098e8f8..8ceaccf9a1b51 100644
--- a/src/main/java/org/elasticsearch/cluster/ClusterState.java
+++ b/src/main/java/org/elasticsearch/cluster/ClusterState.java
@@ -497,8 +497,8 @@ public Builder blocks(ClusterBlocks.Builder blocksBuilder) {
return blocks(blocksBuilder.build());
}
- public Builder blocks(ClusterBlocks block) {
- this.blocks = block;
+ public Builder blocks(ClusterBlocks blocks) {
+ this.blocks = blocks;
return this;
}
diff --git a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java
index 1f7cbc18fa1bd..b31bd52b9839f 100644
--- a/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java
+++ b/src/main/java/org/elasticsearch/cluster/metadata/MappingMetaData.java
@@ -434,28 +434,28 @@ private void innerParse(XContentParser parser, ParseContext context) throws IOEx
return;
}
- XContentParser.Token t = parser.currentToken();
- if (t == null) {
- t = parser.nextToken();
+ XContentParser.Token token = parser.currentToken();
+ if (token == null) {
+ token = parser.nextToken();
}
- if (t == XContentParser.Token.START_OBJECT) {
- t = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
}
String idPart = context.idParsingStillNeeded() ? id().pathElements()[context.locationId] : null;
String routingPart = context.routingParsingStillNeeded() ? routing().pathElements()[context.locationRouting] : null;
String timestampPart = context.timestampParsingStillNeeded() ? timestamp().pathElements()[context.locationTimestamp] : null;
- for (; t == XContentParser.Token.FIELD_NAME; t = parser.nextToken()) {
+ for (; token == XContentParser.Token.FIELD_NAME; token = parser.nextToken()) {
// Must point to field name
String fieldName = parser.currentName();
// And then the value...
- t = parser.nextToken();
+ token = parser.nextToken();
boolean incLocationId = false;
boolean incLocationRouting = false;
boolean incLocationTimestamp = false;
if (context.idParsingStillNeeded() && fieldName.equals(idPart)) {
if (context.locationId + 1 == id.pathElements().length) {
- if (!t.isValue()) {
+ if (!token.isValue()) {
throw new MapperParsingException("id field must be a value but was either an object or an array");
}
context.id = parser.textOrNull();
@@ -482,7 +482,7 @@ private void innerParse(XContentParser parser, ParseContext context) throws IOEx
}
if (incLocationId || incLocationRouting || incLocationTimestamp) {
- if (t == XContentParser.Token.START_OBJECT) {
+ if (token == XContentParser.Token.START_OBJECT) {
context.locationId += incLocationId ? 1 : 0;
context.locationRouting += incLocationRouting ? 1 : 0;
context.locationTimestamp += incLocationTimestamp ? 1 : 0;
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
index 07c46fe76415c..71f2adb430a69 100644
--- a/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/RoutingAllocation.java
@@ -29,7 +29,9 @@
import org.elasticsearch.index.shard.ShardId;
import java.util.HashMap;
+import java.util.HashSet;
import java.util.Map;
+import java.util.Set;
/**
* The {@link RoutingAllocation} keep the state of the current allocation
@@ -107,7 +109,7 @@ public RoutingExplanations explanations() {
private final ClusterInfo clusterInfo;
- private Map ignoredShardToNodes = null;
+ private Map> ignoredShardToNodes = null;
private boolean ignoreDisable = false;
@@ -199,11 +201,20 @@ public void addIgnoreShardForNode(ShardId shardId, String nodeId) {
if (ignoredShardToNodes == null) {
ignoredShardToNodes = new HashMap<>();
}
- ignoredShardToNodes.put(shardId, nodeId);
+ Set nodes = ignoredShardToNodes.get(shardId);
+ if (nodes == null) {
+ nodes = new HashSet<>();
+ ignoredShardToNodes.put(shardId, nodes);
+ }
+ nodes.add(nodeId);
}
public boolean shouldIgnoreShardForNode(ShardId shardId, String nodeId) {
- return ignoredShardToNodes != null && nodeId.equals(ignoredShardToNodes.get(shardId));
+ if (ignoredShardToNodes == null) {
+ return false;
+ }
+ Set nodes = ignoredShardToNodes.get(shardId);
+ return nodes != null && nodes.contains(nodeId);
}
/**
diff --git a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
index d928524aec6fc..c91836922065d 100644
--- a/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
+++ b/src/main/java/org/elasticsearch/cluster/routing/allocation/decider/DiskThresholdDecider.java
@@ -186,7 +186,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing
logger.debug("Less than the required {}% free disk threshold ({}% free) on node [{}], preventing allocation",
freeDiskThresholdLow, freeDiskPercentage, node.nodeId());
}
- return allocation.decision(Decision.NO, NAME, "less than required [%d%%] free disk on node, free: [%d%%]",
+ return allocation.decision(Decision.NO, NAME, "less than required [%s%%] free disk on node, free: [%s%%]",
freeDiskThresholdLow, freeDiskThresholdLow);
}
@@ -204,7 +204,7 @@ public Decision canAllocate(ShardRouting shardRouting, RoutingNode node, Routing
if (freeSpaceAfterShard < freeDiskThresholdHigh) {
logger.warn("After allocating, node [{}] would have less than the required {}% free disk threshold ({}% free), preventing allocation",
node.nodeId(), freeDiskThresholdHigh, freeSpaceAfterShard);
- return allocation.decision(Decision.NO, NAME, "after allocation less than required [%d%%] free disk on node, free: [%d%%]",
+ return allocation.decision(Decision.NO, NAME, "after allocation less than required [%s%%] free disk on node, free: [%s%%]",
freeDiskThresholdLow, freeSpaceAfterShard);
}
@@ -266,7 +266,7 @@ public Decision canRemain(ShardRouting shardRouting, RoutingNode node, RoutingAl
logger.debug("Less than the required {}% free disk threshold ({}% free) on node {}, shard cannot remain",
freeDiskThresholdHigh, freeDiskPercentage, node.nodeId());
}
- return allocation.decision(Decision.NO, NAME, "after allocation less than required [%d%%] free disk on node, free: [%d%%]",
+ return allocation.decision(Decision.NO, NAME, "after allocation less than required [%s%%] free disk on node, free: [%s%%]",
freeDiskThresholdHigh, freeDiskPercentage);
}
diff --git a/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java b/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java
index 8ea6f8fc8ef51..664f041f61b1a 100644
--- a/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java
+++ b/src/main/java/org/elasticsearch/common/bytes/PagedBytesReference.java
@@ -36,13 +36,17 @@
import java.nio.channels.GatheringByteChannel;
import java.util.Arrays;
-public final class PagedBytesReference implements BytesReference {
+/**
+ * A page based bytes reference, internally holding the bytes in a paged
+ * data structure.
+ */
+public class PagedBytesReference implements BytesReference {
private static final int PAGE_SIZE = BigArrays.BYTE_PAGE_SIZE;
private static final int NIO_GATHERING_LIMIT = 524288;
private final BigArrays bigarrays;
- private final ByteArray bytearray;
+ protected final ByteArray bytearray;
private final int offset;
private final int length;
private int hash = 0;
diff --git a/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java b/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
new file mode 100644
index 0000000000000..749a83a67f42c
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/bytes/ReleasableBytesReference.java
@@ -0,0 +1,28 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.bytes;
+
+import org.elasticsearch.common.lease.Releasable;
+
+/**
+ * A bytes reference that needs to be released once its usage is done.
+ */
+public interface ReleasableBytesReference extends BytesReference, Releasable {
+}
diff --git a/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java b/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java
new file mode 100644
index 0000000000000..f660cbd022d6b
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/bytes/ReleasablePagedBytesReference.java
@@ -0,0 +1,46 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.bytes;
+
+import org.elasticsearch.ElasticsearchException;
+import org.elasticsearch.common.lease.Releasables;
+import org.elasticsearch.common.util.BigArrays;
+import org.elasticsearch.common.util.ByteArray;
+
+/**
+ * An extension to {@link PagedBytesReference} that requires releasing its content. This
+ * class exists to make it explicit when a bytes reference needs to be released, and when not.
+ */
+public class ReleasablePagedBytesReference extends PagedBytesReference implements ReleasableBytesReference {
+
+ public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int length) {
+ super(bigarrays, bytearray, length);
+ }
+
+ public ReleasablePagedBytesReference(BigArrays bigarrays, ByteArray bytearray, int from, int length) {
+ super(bigarrays, bytearray, from, length);
+ }
+
+ @Override
+ public boolean release() throws ElasticsearchException {
+ Releasables.release(bytearray);
+ return true;
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java
index 7c97e0614b5a9..17d163913495f 100644
--- a/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java
+++ b/src/main/java/org/elasticsearch/common/geo/GeoHashUtils.java
@@ -449,12 +449,12 @@ public static long encodeAsLong(double latitude, double longitude, int precision
*/
public static String toString(long geohashAsLong)
{
- int precision= (int) (geohashAsLong&15);
- char[] chars=new char[precision];
- geohashAsLong>>=4;
- for (int i = precision-1; i >=0 ; i--) {
- chars[i]= BASE_32[(int) (geohashAsLong&31)];
- geohashAsLong>>=5;
+ int precision = (int) (geohashAsLong&15);
+ char[] chars = new char[precision];
+ geohashAsLong >>= 4;
+ for (int i = precision - 1; i >= 0 ; i--) {
+ chars[i] = BASE_32[(int) (geohashAsLong & 31)];
+ geohashAsLong >>= 5;
}
return new String(chars);
}
diff --git a/src/test/java/org/elasticsearch/plugins/isolation/IsolatedPlugin.java b/src/main/java/org/elasticsearch/common/io/ReleasableBytesStream.java
similarity index 64%
rename from src/test/java/org/elasticsearch/plugins/isolation/IsolatedPlugin.java
rename to src/main/java/org/elasticsearch/common/io/ReleasableBytesStream.java
index 648d31ecd1aa7..d6971b52d0c05 100644
--- a/src/test/java/org/elasticsearch/plugins/isolation/IsolatedPlugin.java
+++ b/src/main/java/org/elasticsearch/common/io/ReleasableBytesStream.java
@@ -16,25 +16,15 @@
* specific language governing permissions and limitations
* under the License.
*/
-package org.elasticsearch.plugins.isolation;
-import org.elasticsearch.plugins.AbstractPlugin;
+package org.elasticsearch.common.io;
-public class IsolatedPlugin extends AbstractPlugin {
+import org.elasticsearch.common.bytes.ReleasableBytesReference;
- private final DummyClass dummy;
-
- public IsolatedPlugin() {
- dummy = new DummyClass();
- }
-
- @Override
- public String name() {
- return dummy.name;
- }
+/**
+ * A bytes stream that requires its bytes to be released once no longer used.
+ */
+public interface ReleasableBytesStream extends BytesStream {
- @Override
- public String description() {
- return "IsolatedPlugin " + hashCode();
- }
+ ReleasableBytesReference bytes();
}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java
index 899ecf2d87315..6f51f07a0d498 100644
--- a/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java
+++ b/src/main/java/org/elasticsearch/common/io/stream/BytesStreamOutput.java
@@ -33,37 +33,31 @@
*/
public class BytesStreamOutput extends StreamOutput implements BytesStream {
- /**
- * Factory/manager for our ByteArray
- */
- private final BigArrays bigarrays;
+ protected final BigArrays bigarrays;
- /**
- * The internal list of pages.
- */
- private ByteArray bytes;
+ protected ByteArray bytes;
+ protected int count;
/**
- * The number of valid bytes in the buffer.
- */
- private int count;
-
- /**
- * Create a nonrecycling {@link BytesStreamOutput} with 1 initial page acquired.
+ * Create a non recycling {@link BytesStreamOutput} with 1 initial page acquired.
*/
public BytesStreamOutput() {
this(BigArrays.PAGE_SIZE_IN_BYTES);
}
/**
- * Create a nonrecycling {@link BytesStreamOutput} with enough initial pages acquired
- * to satisfy the capacity given by {@link expectedSize}.
+ * Create a non recycling {@link BytesStreamOutput} with enough initial pages acquired
+ * to satisfy the capacity given by expected size.
*
* @param expectedSize the expected maximum size of the stream in bytes.
*/
public BytesStreamOutput(int expectedSize) {
- bigarrays = BigArrays.NON_RECYCLING_INSTANCE;
- bytes = bigarrays.newByteArray(expectedSize);
+ this(expectedSize, BigArrays.NON_RECYCLING_INSTANCE);
+ }
+
+ protected BytesStreamOutput(int expectedSize, BigArrays bigarrays) {
+ this.bigarrays = bigarrays;
+ this.bytes = bigarrays.newByteArray(expectedSize);
}
@Override
diff --git a/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java b/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java
new file mode 100644
index 0000000000000..0ead43059f043
--- /dev/null
+++ b/src/main/java/org/elasticsearch/common/io/stream/ReleasableBytesStreamOutput.java
@@ -0,0 +1,48 @@
+/*
+ * Licensed to Elasticsearch under one or more contributor
+ * license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright
+ * ownership. Elasticsearch licenses this file to you under
+ * the Apache License, Version 2.0 (the "License"); you may
+ * not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing,
+ * software distributed under the License is distributed on an
+ * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+ * KIND, either express or implied. See the License for the
+ * specific language governing permissions and limitations
+ * under the License.
+ */
+
+package org.elasticsearch.common.io.stream;
+
+import org.elasticsearch.common.bytes.ReleasableBytesReference;
+import org.elasticsearch.common.bytes.ReleasablePagedBytesReference;
+import org.elasticsearch.common.io.ReleasableBytesStream;
+import org.elasticsearch.common.util.BigArrays;
+
+/**
+ * An bytes stream output that allows providing a {@link BigArrays} instance
+ * expecting it to require releasing its content ({@link #bytes()}) once done.
+ *
+ * Please note, its is the responsibility of the caller to make sure the bytes
+ * reference do not "escape" and are released only once.
+ */
+public class ReleasableBytesStreamOutput extends BytesStreamOutput implements ReleasableBytesStream {
+
+ public ReleasableBytesStreamOutput(BigArrays bigarrays) {
+ super(BigArrays.PAGE_SIZE_IN_BYTES, bigarrays);
+ }
+
+ public ReleasableBytesStreamOutput(int expectedSize, BigArrays bigarrays) {
+ super(expectedSize, bigarrays);
+ }
+
+ @Override
+ public ReleasableBytesReference bytes() {
+ return new ReleasablePagedBytesReference(bigarrays, bytes, count);
+ }
+}
diff --git a/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
index aa01ae96fb126..9f88e0d462564 100644
--- a/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
+++ b/src/main/java/org/elasticsearch/common/io/stream/StreamInput.java
@@ -434,19 +434,19 @@ public Object readGenericValue() throws IOException {
case 16:
return readShort();
case 17:
- return readPrimitiveIntArray();
+ return readIntArray();
case 18:
- return readPrimitiveLongArray();
+ return readLongArray();
case 19:
- return readPrimitiveFloatArray();
+ return readFloatArray();
case 20:
- return readPrimitiveDoubleArray();
+ return readDoubleArray();
default:
throw new IOException("Can't read unknown type [" + type + "]");
}
}
- private Object readPrimitiveIntArray() throws IOException {
+ public int[] readIntArray() throws IOException {
int length = readVInt();
int[] values = new int[length];
for(int i=0; i valuesSourceType;
-
- private ScriptValueType(Class extends ValuesSource> valuesSourceType) {
- this.valuesSourceType = valuesSourceType;
- }
-
- public Class extends ValuesSource> getValuesSourceType() {
- return valuesSourceType;
- }
-
- public boolean isNumeric() {
- return this != STRING;
- }
-
- public boolean isFloatingPoint() {
- return this == DOUBLE;
- }
+ public void setNextReader(IndexReaderContext reader);
}
diff --git a/src/main/java/org/elasticsearch/common/util/CollectionUtils.java b/src/main/java/org/elasticsearch/common/util/CollectionUtils.java
index 7bdd54a3e587c..16297d16982b0 100644
--- a/src/main/java/org/elasticsearch/common/util/CollectionUtils.java
+++ b/src/main/java/org/elasticsearch/common/util/CollectionUtils.java
@@ -22,10 +22,12 @@
import com.carrotsearch.hppc.DoubleArrayList;
import com.carrotsearch.hppc.FloatArrayList;
import com.carrotsearch.hppc.LongArrayList;
+import com.carrotsearch.hppc.ObjectArrayList;
import org.apache.lucene.util.IntroSorter;
import org.elasticsearch.common.Preconditions;
import java.util.AbstractList;
+import java.util.Arrays;
import java.util.List;
import java.util.RandomAccess;
@@ -223,6 +225,61 @@ public static List rotate(final List list, int distance) {
return new RotatedList<>(list, d);
}
+ public static void sortAndDedup(final ObjectArrayList array) {
+ int len = array.size();
+ if (len > 1) {
+ sort(array);
+ int uniqueCount = 1;
+ for (int i = 1; i < len; ++i) {
+ if (!Arrays.equals(array.get(i), array.get(i - 1))) {
+ array.set(uniqueCount++, array.get(i));
+ }
+ }
+ array.elementsCount = uniqueCount;
+ }
+ }
+
+ public static void sort(final ObjectArrayList array) {
+ new IntroSorter() {
+
+ byte[] pivot;
+
+ @Override
+ protected void swap(int i, int j) {
+ final byte[] tmp = array.get(i);
+ array.set(i, array.get(j));
+ array.set(j, tmp);
+ }
+
+ @Override
+ protected int compare(int i, int j) {
+ return compare(array.get(i), array.get(j));
+ }
+
+ @Override
+ protected void setPivot(int i) {
+ pivot = array.get(i);
+ }
+
+ @Override
+ protected int comparePivot(int j) {
+ return compare(pivot, array.get(j));
+ }
+
+ private int compare(byte[] left, byte[] right) {
+ for (int i = 0, j = 0; i < left.length && j < right.length; i++, j++) {
+ int a = left[i] & 0xFF;
+ int b = right[j] & 0xFF;
+ if (a != b) {
+ return a - b;
+ }
+ }
+ return left.length - right.length;
+ }
+
+ }.sort(0, array.size());
+ }
+
private static class RotatedList extends AbstractList implements RandomAccess {
private final List in;
diff --git a/src/main/java/org/elasticsearch/common/util/LongHash.java b/src/main/java/org/elasticsearch/common/util/LongHash.java
index dbaf2af3632f1..3e1c317949d2b 100644
--- a/src/main/java/org/elasticsearch/common/util/LongHash.java
+++ b/src/main/java/org/elasticsearch/common/util/LongHash.java
@@ -23,10 +23,10 @@
/**
* Specialized hash table implementation similar to BytesRefHash that maps
- * long values to ids. Collisions are resolved with open addressing and linear
- * probing, growth is smooth thanks to {@link BigArrays} and capacity is always
- * a multiple of 2 for faster identification of buckets.
- * This class is not thread-safe.
+ * long values to ids. Collisions are resolved with open addressing and linear
+ * probing, growth is smooth thanks to {@link BigArrays} and capacity is always
+ * a multiple of 2 for faster identification of buckets.
+ * This class is not thread-safe.
*/
// IDs are internally stored as id + 1 so that 0 encodes for an empty slot
public final class LongHash extends AbstractHash {
@@ -41,14 +41,14 @@ public LongHash(long capacity, BigArrays bigArrays) {
//Constructor with configurable capacity and load factor.
public LongHash(long capacity, float maxLoadFactor, BigArrays bigArrays) {
super(capacity, maxLoadFactor, bigArrays);
- keys = bigArrays.newLongArray(capacity(), false);
+ keys = bigArrays.newLongArray(capacity, false);
}
/**
* Return the key at 0 <e; index <e; capacity(). The result is undefined if the slot is unused.
*/
- public long key(long index) {
- return keys.get(index);
+ public long get(long id) {
+ return keys.get(id);
}
/**
@@ -58,7 +58,7 @@ public long find(long key) {
final long slot = slot(hash(key), mask);
for (long index = slot; ; index = nextSlot(index, mask)) {
final long id = id(index);
- if (id == -1 || keys.get(index) == key) {
+ if (id == -1 || keys.get(id) == key) {
return id;
}
}
@@ -71,25 +71,28 @@ private long set(long key, long id) {
final long curId = id(index);
if (curId == -1) { // means unset
id(index, id);
- keys.set(index, key);
+ append(id, key);
++size;
return id;
- } else if (keys.get(index) == key) {
+ } else if (keys.get(curId) == key) {
return -1 - curId;
}
}
}
+ private void append(long id, long key) {
+ keys = bigArrays.grow(keys, id + 1);
+ keys.set(id, key);
+ }
+
private void reset(long key, long id) {
final long slot = slot(hash(key), mask);
for (long index = slot; ; index = nextSlot(index, mask)) {
final long curId = id(index);
if (curId == -1) { // means unset
id(index, id);
- keys.set(index, key);
+ append(id, key);
break;
- } else {
- assert keys.get(index) != key;
}
}
}
@@ -107,17 +110,11 @@ public long add(long key) {
return set(key, size);
}
- @Override
- protected void resize(long capacity) {
- super.resize(capacity);
- keys = bigArrays.resize(keys, capacity);
- }
-
@Override
protected void removeAndAdd(long index) {
final long id = id(index, -1);
assert id >= 0;
- final long key = keys.set(index, 0);
+ final long key = keys.set(id, 0);
reset(key, id);
}
diff --git a/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java b/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
index f8dfea5d251db..34a15327986b3 100644
--- a/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
+++ b/src/main/java/org/elasticsearch/common/xcontent/XContentHelper.java
@@ -271,16 +271,16 @@ private static boolean allListValuesAreMapsOfOne(List list) {
}
public static void copyCurrentStructure(XContentGenerator generator, XContentParser parser) throws IOException {
- XContentParser.Token t = parser.currentToken();
+ XContentParser.Token token = parser.currentToken();
// Let's handle field-name separately first
- if (t == XContentParser.Token.FIELD_NAME) {
+ if (token == XContentParser.Token.FIELD_NAME) {
generator.writeFieldName(parser.currentName());
- t = parser.nextToken();
+ token = parser.nextToken();
// fall-through to copy the associated value
}
- switch (t) {
+ switch (token) {
case START_ARRAY:
generator.writeStartArray();
while (parser.nextToken() != XContentParser.Token.END_ARRAY) {
diff --git a/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java b/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java
index fc9a23354d906..b92006d082bdc 100644
--- a/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java
+++ b/src/main/java/org/elasticsearch/common/xcontent/support/AbstractXContentParser.java
@@ -260,38 +260,38 @@ static Map readOrderedMap(XContentParser parser) throws IOExcept
static Map readMap(XContentParser parser, MapFactory mapFactory) throws IOException {
Map map = mapFactory.newMap();
- XContentParser.Token t = parser.currentToken();
- if (t == null) {
- t = parser.nextToken();
+ XContentParser.Token token = parser.currentToken();
+ if (token == null) {
+ token = parser.nextToken();
}
- if (t == XContentParser.Token.START_OBJECT) {
- t = parser.nextToken();
+ if (token == XContentParser.Token.START_OBJECT) {
+ token = parser.nextToken();
}
- for (; t == XContentParser.Token.FIELD_NAME; t = parser.nextToken()) {
+ for (; token == XContentParser.Token.FIELD_NAME; token = parser.nextToken()) {
// Must point to field name
String fieldName = parser.currentName();
// And then the value...
- t = parser.nextToken();
- Object value = readValue(parser, mapFactory, t);
+ token = parser.nextToken();
+ Object value = readValue(parser, mapFactory, token);
map.put(fieldName, value);
}
return map;
}
- private static List