Skip to content

Commit

Permalink
Merge remote-tracking branch 'elastic/master' into pr/38140
Browse files Browse the repository at this point in the history
* elastic/master: (54 commits)
  Introduce retention leases versioning (elastic#37951)
  Correctly disable tests for FIPS JVMs (elastic#38214)
  AwaitsFix testAbortedSnapshotDuringInitDoesNotStart (elastic#38227)
  Preserve ILM operation mode when creating new lifecycles (elastic#38134)
  Enable trace log in FollowerFailOverIT (elastic#38148)
  SnapshotShardsService Simplifications (elastic#38025)
  Default include_type_name to false in the yml test harness. (elastic#38058)
  Disable bwc preparing to backport of#37977, elastic#37857 and elastic#37872 (elastic#38126)
  Adding ml_settings entry to HLRC and Docs for deprecation_info (elastic#38118)
  Replace awaitBusy with assertBusy in atLeastDocsIndexed (elastic#38190)
  Adjust SearchRequest version checks (elastic#38181)
  AwaitsFix testClientSucceedsWithVerificationDisabled (elastic#38213)
  Zen2ify RareClusterStateIT (elastic#38184)
  ML: Fix error race condition on stop _all datafeeds and close _all jobs (elastic#38113)
  AwaitsFix PUT mapping with _doc on an index that has types (elastic#38204)
  Allow built-in monitoring_user role to call GET _xpack API (elastic#38060)
  Update geo_shape docs to include unsupported features (elastic#38138)
  [ML] Remove "8" prefixes from file structure finder timestamp formats (elastic#38016)
  Disable bwc tests while backporting elastic#38104 (elastic#38182)
  Enable TLSv1.3 by default for JDKs with support (elastic#38103)
  ...
  • Loading branch information
jasontedor committed Feb 1, 2019
2 parents 906da63 + f181e17 commit 99a2acf
Show file tree
Hide file tree
Showing 384 changed files with 5,998 additions and 3,163 deletions.
4 changes: 2 additions & 2 deletions build.gradle
Expand Up @@ -159,8 +159,8 @@ task verifyVersions {
* the enabled state of every bwc task. It should be set back to true
* after the backport of the backcompat code is complete.
*/
final boolean bwc_tests_enabled = true
final String bwc_tests_disabled_issue = "" /* place a PR link here when committing bwc changes */
final boolean bwc_tests_enabled = false
final String bwc_tests_disabled_issue = "https://github.com/elastic/elasticsearch/pull/37951" /* place a PR link here when committing bwc changes */
if (bwc_tests_enabled == false) {
if (bwc_tests_disabled_issue.isEmpty()) {
throw new GradleException("bwc_tests_disabled_issue must be set when bwc_tests_enabled == false")
Expand Down
Expand Up @@ -95,6 +95,8 @@
import org.elasticsearch.search.aggregations.bucket.filter.ParsedFilters;
import org.elasticsearch.search.aggregations.bucket.geogrid.GeoHashGridAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoHashGrid;
import org.elasticsearch.search.aggregations.bucket.geogrid.ParsedGeoTileGrid;
import org.elasticsearch.search.aggregations.bucket.geogrid.GeoTileGridAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.global.GlobalAggregationBuilder;
import org.elasticsearch.search.aggregations.bucket.global.ParsedGlobal;
import org.elasticsearch.search.aggregations.bucket.histogram.AutoDateHistogramAggregationBuilder;
Expand Down Expand Up @@ -1760,6 +1762,7 @@ static List<NamedXContentRegistry.Entry> getDefaultNamedXContents() {
map.put(FilterAggregationBuilder.NAME, (p, c) -> ParsedFilter.fromXContent(p, (String) c));
map.put(InternalSampler.PARSER_NAME, (p, c) -> ParsedSampler.fromXContent(p, (String) c));
map.put(GeoHashGridAggregationBuilder.NAME, (p, c) -> ParsedGeoHashGrid.fromXContent(p, (String) c));
map.put(GeoTileGridAggregationBuilder.NAME, (p, c) -> ParsedGeoTileGrid.fromXContent(p, (String) c));
map.put(RangeAggregationBuilder.NAME, (p, c) -> ParsedRange.fromXContent(p, (String) c));
map.put(DateRangeAggregationBuilder.NAME, (p, c) -> ParsedDateRange.fromXContent(p, (String) c));
map.put(GeoDistanceAggregationBuilder.NAME, (p, c) -> ParsedGeoDistance.fromXContent(p, (String) c));
Expand Down
Expand Up @@ -37,16 +37,19 @@ public class DeprecationInfoResponse {
private static final ParseField CLUSTER_SETTINGS = new ParseField("cluster_settings");
private static final ParseField NODE_SETTINGS = new ParseField("node_settings");
private static final ParseField INDEX_SETTINGS = new ParseField("index_settings");
private static final ParseField ML_SETTINGS = new ParseField("ml_settings");

private final List<DeprecationIssue> clusterSettingsIssues;
private final List<DeprecationIssue> nodeSettingsIssues;
private final Map<String, List<DeprecationIssue>> indexSettingsIssues;
private final List<DeprecationIssue> mlSettingsIssues;

public DeprecationInfoResponse(List<DeprecationIssue> clusterSettingsIssues, List<DeprecationIssue> nodeSettingsIssues,
Map<String, List<DeprecationIssue>> indexSettingsIssues) {
Map<String, List<DeprecationIssue>> indexSettingsIssues, List<DeprecationIssue> mlSettingsIssues) {
this.clusterSettingsIssues = Objects.requireNonNull(clusterSettingsIssues, "cluster settings issues cannot be null");
this.nodeSettingsIssues = Objects.requireNonNull(nodeSettingsIssues, "node settings issues cannot be null");
this.indexSettingsIssues = Objects.requireNonNull(indexSettingsIssues, "index settings issues cannot be null");
this.mlSettingsIssues = Objects.requireNonNull(mlSettingsIssues, "ml settings issues cannot be null");
}

public List<DeprecationIssue> getClusterSettingsIssues() {
Expand All @@ -61,6 +64,10 @@ public Map<String, List<DeprecationIssue>> getIndexSettingsIssues() {
return indexSettingsIssues;
}

public List<DeprecationIssue> getMlSettingsIssues() {
return mlSettingsIssues;
}

private static List<DeprecationIssue> parseDeprecationIssues(XContentParser parser) throws IOException {
List<DeprecationIssue> issues = new ArrayList<>();
XContentParser.Token token = null;
Expand All @@ -76,6 +83,7 @@ public static DeprecationInfoResponse fromXContent(XContentParser parser) throws
Map<String, List<DeprecationIssue>> indexSettings = new HashMap<>();
List<DeprecationIssue> clusterSettings = new ArrayList<>();
List<DeprecationIssue> nodeSettings = new ArrayList<>();
List<DeprecationIssue> mlSettings = new ArrayList<>();
String fieldName = null;
XContentParser.Token token;
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
Expand All @@ -85,6 +93,8 @@ public static DeprecationInfoResponse fromXContent(XContentParser parser) throws
clusterSettings.addAll(parseDeprecationIssues(parser));
} else if (NODE_SETTINGS.getPreferredName().equals(fieldName)) {
nodeSettings.addAll(parseDeprecationIssues(parser));
} else if (ML_SETTINGS.getPreferredName().equals(fieldName)) {
mlSettings.addAll(parseDeprecationIssues(parser));
} else if (INDEX_SETTINGS.getPreferredName().equals(fieldName)) {
// parse out the key/value pairs
while ((token = parser.nextToken()) != XContentParser.Token.END_OBJECT) {
Expand All @@ -96,7 +106,7 @@ public static DeprecationInfoResponse fromXContent(XContentParser parser) throws
}
}
}
return new DeprecationInfoResponse(clusterSettings, nodeSettings, indexSettings);
return new DeprecationInfoResponse(clusterSettings, nodeSettings, indexSettings, mlSettings);
}

@Override
Expand All @@ -106,17 +116,19 @@ public boolean equals(Object o) {
DeprecationInfoResponse that = (DeprecationInfoResponse) o;
return Objects.equals(clusterSettingsIssues, that.clusterSettingsIssues) &&
Objects.equals(nodeSettingsIssues, that.nodeSettingsIssues) &&
Objects.equals(mlSettingsIssues, that.mlSettingsIssues) &&
Objects.equals(indexSettingsIssues, that.indexSettingsIssues);
}

@Override
public int hashCode() {
return Objects.hash(clusterSettingsIssues, nodeSettingsIssues, indexSettingsIssues);
return Objects.hash(clusterSettingsIssues, nodeSettingsIssues, indexSettingsIssues, mlSettingsIssues);
}

@Override
public String toString() {
return clusterSettingsIssues.toString() + ":" + nodeSettingsIssues.toString() + ":" + indexSettingsIssues.toString();
return clusterSettingsIssues.toString() + ":" + nodeSettingsIssues.toString() + ":" + indexSettingsIssues.toString() +
":" + mlSettingsIssues.toString();
}

/**
Expand Down
Expand Up @@ -182,6 +182,7 @@ public void testClusterHealthYellowClusterLevel() throws IOException {
assertThat(response.getIndices().size(), equalTo(0));
}

@AwaitsFix(bugUrl="https://github.com/elastic/elasticsearch/issues/35450")
public void testClusterHealthYellowIndicesLevel() throws IOException {
createIndex("index", Settings.EMPTY);
createIndex("index2", Settings.EMPTY);
Expand Down
Expand Up @@ -85,6 +85,7 @@ public void testGetDeprecationInfo() throws IOException {
assertThat(response.getClusterSettingsIssues().size(), equalTo(0));
assertThat(response.getIndexSettingsIssues().size(), equalTo(0));
assertThat(response.getNodeSettingsIssues().size(), equalTo(0));
assertThat(response.getMlSettingsIssues().size(), equalTo(0));
}

/**
Expand Down
Expand Up @@ -182,6 +182,8 @@ public void testGetDeprecationInfo() throws IOException, InterruptedException {
deprecationInfoResponse.getNodeSettingsIssues(); // <2>
Map<String, List<DeprecationInfoResponse.DeprecationIssue>> indexIssues =
deprecationInfoResponse.getIndexSettingsIssues(); // <3>
List<DeprecationInfoResponse.DeprecationIssue> mlIssues =
deprecationInfoResponse.getMlSettingsIssues(); // <4>
// end::get-deprecation-info-response

// tag::get-deprecation-info-execute-listener
Expand All @@ -195,6 +197,8 @@ public void onResponse(DeprecationInfoResponse deprecationInfoResponse1) { // <1
deprecationInfoResponse.getNodeSettingsIssues();
Map<String, List<DeprecationInfoResponse.DeprecationIssue>> indexIssues =
deprecationInfoResponse.getIndexSettingsIssues();
List<DeprecationInfoResponse.DeprecationIssue> mlIssues =
deprecationInfoResponse.getMlSettingsIssues();
}

@Override
Expand Down
Expand Up @@ -65,6 +65,12 @@ private void toXContent(DeprecationInfoResponse response, XContentBuilder builde
}
}
builder.endObject();

builder.startArray("ml_settings");
for (DeprecationInfoResponse.DeprecationIssue issue : response.getMlSettingsIssues()) {
toXContent(issue, builder);
}
builder.endArray();
}
builder.endObject();
}
Expand Down Expand Up @@ -105,12 +111,14 @@ private List<DeprecationInfoResponse.DeprecationIssue> createRandomIssues(boolea
}

private DeprecationInfoResponse createInstance() {
return new DeprecationInfoResponse(createRandomIssues(true), createRandomIssues(true), createIndexSettingsIssues());
return new DeprecationInfoResponse(createRandomIssues(true), createRandomIssues(true), createIndexSettingsIssues(),
createRandomIssues(true));
}

private DeprecationInfoResponse copyInstance(DeprecationInfoResponse req) {
return new DeprecationInfoResponse(new ArrayList<>(req.getClusterSettingsIssues()),
new ArrayList<>(req.getNodeSettingsIssues()), new HashMap<>(req.getIndexSettingsIssues()));
new ArrayList<>(req.getNodeSettingsIssues()), new HashMap<>(req.getIndexSettingsIssues()),
new ArrayList<>(req.getMlSettingsIssues()));
}

private DeprecationInfoResponse mutateInstance(DeprecationInfoResponse req) {
Expand All @@ -128,16 +136,21 @@ public void testFromXContent() throws IOException {
}

public void testNullFailedIndices() {
NullPointerException exception =
expectThrows(NullPointerException.class, () -> new DeprecationInfoResponse(null, null, null));
NullPointerException exception = expectThrows(NullPointerException.class,
() -> new DeprecationInfoResponse(null, null, null, null));
assertEquals("cluster settings issues cannot be null", exception.getMessage());

exception = expectThrows(NullPointerException.class, () -> new DeprecationInfoResponse(Collections.emptyList(), null, null));
exception = expectThrows(NullPointerException.class,
() -> new DeprecationInfoResponse(Collections.emptyList(), null, null, null));
assertEquals("node settings issues cannot be null", exception.getMessage());

exception = expectThrows(NullPointerException.class,
() -> new DeprecationInfoResponse(Collections.emptyList(), Collections.emptyList(), null));
() -> new DeprecationInfoResponse(Collections.emptyList(), Collections.emptyList(), null, null));
assertEquals("index settings issues cannot be null", exception.getMessage());

exception = expectThrows(NullPointerException.class,
() -> new DeprecationInfoResponse(Collections.emptyList(), Collections.emptyList(), Collections.emptyMap(), null));
assertEquals("ml settings issues cannot be null", exception.getMessage());
}

public void testEqualsAndHashCode() {
Expand Down
Expand Up @@ -33,3 +33,4 @@ include-tagged::{doc-tests-file}[{api}-response]
<1> a List of Cluster deprecations
<2> a List of Node deprecations
<3> a Map of key IndexName, value List of deprecations for the index
<4> a list of Machine Learning related deprecations
185 changes: 185 additions & 0 deletions docs/reference/aggregations/bucket/geotilegrid-aggregation.asciidoc
@@ -0,0 +1,185 @@
[[search-aggregations-bucket-geotilegrid-aggregation]]
=== GeoTile Grid Aggregation

A multi-bucket aggregation that works on `geo_point` fields and groups points into
buckets that represent cells in a grid. The resulting grid can be sparse and only
contains cells that have matching data. Each cell corresponds to a
https://en.wikipedia.org/wiki/Tiled_web_map[map tile] as used by many online map
sites. Each cell is labeled using a "{zoom}/{x}/{y}" format, where zoom is equal
to the user-specified precision.

* High precision keys have a larger range for x and y, and represent tiles that
cover only a small area.
* Low precision keys have a smaller range for x and y, and represent tiles that
each cover a large area.

See https://wiki.openstreetmap.org/wiki/Zoom_levels[Zoom level documentation]
on how precision (zoom) correlates to size on the ground. Precision for this
aggregation can be between 0 and 29, inclusive.

WARNING: The highest-precision geotile of length 29 produces cells that cover
less than a 10cm by 10cm of land and so high-precision requests can be very
costly in terms of RAM and result sizes. Please see the example below on how
to first filter the aggregation to a smaller geographic area before requesting
high-levels of detail.

The specified field must be of type `geo_point` (which can only be set
explicitly in the mappings) and it can also hold an array of `geo_point`
fields, in which case all points will be taken into account during aggregation.


==== Simple low-precision request

[source,js]
--------------------------------------------------
PUT /museums
{
"mappings": {
"properties": {
"location": {
"type": "geo_point"
}
}
}
}
POST /museums/_bulk?refresh
{"index":{"_id":1}}
{"location": "52.374081,4.912350", "name": "NEMO Science Museum"}
{"index":{"_id":2}}
{"location": "52.369219,4.901618", "name": "Museum Het Rembrandthuis"}
{"index":{"_id":3}}
{"location": "52.371667,4.914722", "name": "Nederlands Scheepvaartmuseum"}
{"index":{"_id":4}}
{"location": "51.222900,4.405200", "name": "Letterenhuis"}
{"index":{"_id":5}}
{"location": "48.861111,2.336389", "name": "Musée du Louvre"}
{"index":{"_id":6}}
{"location": "48.860000,2.327000", "name": "Musée d'Orsay"}
POST /museums/_search?size=0
{
"aggregations" : {
"large-grid" : {
"geotile_grid" : {
"field" : "location",
"precision" : 8
}
}
}
}
--------------------------------------------------
// CONSOLE

Response:

[source,js]
--------------------------------------------------
{
...
"aggregations": {
"large-grid": {
"buckets": [
{
"key" : "8/131/84",
"doc_count" : 3
},
{
"key" : "8/129/88",
"doc_count" : 2
},
{
"key" : "8/131/85",
"doc_count" : 1
}
]
}
}
}
--------------------------------------------------
// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/]

==== High-precision requests

When requesting detailed buckets (typically for displaying a "zoomed in" map)
a filter like <<query-dsl-geo-bounding-box-query,geo_bounding_box>> should be
applied to narrow the subject area otherwise potentially millions of buckets
will be created and returned.

[source,js]
--------------------------------------------------
POST /museums/_search?size=0
{
"aggregations" : {
"zoomed-in" : {
"filter" : {
"geo_bounding_box" : {
"location" : {
"top_left" : "52.4, 4.9",
"bottom_right" : "52.3, 5.0"
}
}
},
"aggregations":{
"zoom1":{
"geotile_grid" : {
"field": "location",
"precision": 22
}
}
}
}
}
}
--------------------------------------------------
// CONSOLE
// TEST[continued]

[source,js]
--------------------------------------------------
{
...
"aggregations" : {
"zoomed-in" : {
"doc_count" : 3,
"zoom1" : {
"buckets" : [
{
"key" : "22/2154412/1378379",
"doc_count" : 1
},
{
"key" : "22/2154385/1378332",
"doc_count" : 1
},
{
"key" : "22/2154259/1378425",
"doc_count" : 1
}
]
}
}
}
}
--------------------------------------------------
// TESTRESPONSE[s/\.\.\./"took": $body.took,"_shards": $body._shards,"hits":$body.hits,"timed_out":false,/]


==== Options

[horizontal]
field:: Mandatory. The name of the field indexed with GeoPoints.

precision:: Optional. The integer zoom of the key used to define
cells/buckets in the results. Defaults to 7.
Values outside of [0,29] will be rejected.

size:: Optional. The maximum number of geohash buckets to return
(defaults to 10,000). When results are trimmed, buckets are
prioritised based on the volumes of documents they contain.

shard_size:: Optional. To allow for more accurate counting of the top cells
returned in the final result the aggregation defaults to
returning `max(10,(size x number-of-shards))` buckets from each
shard. If this heuristic is undesirable, the number considered
from each shard can be over-ridden using this parameter.

0 comments on commit 99a2acf

Please sign in to comment.