From 3efbe95ca4f53cc9fb018d5b65a1b69c77d09616 Mon Sep 17 00:00:00 2001 From: Simon Willnauer Date: Mon, 22 Feb 2016 11:37:57 -0800 Subject: [PATCH 001/103] RestClient prototype --- client/build.gradle | 82 +++++++ .../org/elasticsearch/client/IndexClient.java | 65 ++++++ .../org/elasticsearch/client/RestClient.java | 217 ++++++++++++++++++ .../elasticsearch/client/RestClientTests.java | 152 ++++++++++++ settings.gradle | 1 + 5 files changed, 517 insertions(+) create mode 100644 client/build.gradle create mode 100644 client/src/main/java/org/elasticsearch/client/IndexClient.java create mode 100644 client/src/main/java/org/elasticsearch/client/RestClient.java create mode 100644 client/src/test/java/org/elasticsearch/client/RestClientTests.java diff --git a/client/build.gradle b/client/build.gradle new file mode 100644 index 0000000000000..1ae32bf6e98cc --- /dev/null +++ b/client/build.gradle @@ -0,0 +1,82 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.precommit.PrecommitTasks; + +group = 'org.elasticsearch.client' +apply plugin: 'elasticsearch.build' + +dependencies { + // we use the lucene test-framework here but we are not pulling in ES core or the test framework + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile 'org.hamcrest:hamcrest-all:1.3' + testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" + testCompile "org.apache.lucene:lucene-core:${versions.lucene}" + testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" + + + // TODO once we got rid of the client in the test framework we should use a version variable here + // we use httpclient here since the JDK support has several issue + // - httpclient supports basic and digest auth and other schemes + // - URLConnection has issues with SSL and not all system patches are available + // - URLConnection can't stream data but httpclient can + // - URLConnection doesn't expose responsecodes unless it's a 200 + // - httpclient supports pipelining which we might wanna expose down the road? + compile "org.apache.httpcomponents:httpclient:4.5.1" + compile "org.apache.httpcomponents:httpcore:4.4.4" + compile "org.apache.httpcomponents:httpcore-nio:4.4.4" // currently unused + compile "commons-logging:commons-logging:1.2" + compile 'org.apache.httpcomponents:httpasyncclient:4.1.1' // currently unused +} + + +compileJava.options.compilerArgs << '-Xlint:-cast,-rawtypes,-try,-unchecked' +compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' + +// the main files are actually test files, so use the appopriate forbidden api sigs +forbiddenApisMain { + bundledSignatures = ['jdk-unsafe', 'jdk-deprecated'] + signaturesURLs = [PrecommitTasks.getResource('/forbidden/all-signatures.txt'), + PrecommitTasks.getResource('/forbidden/test-signatures.txt')] +} + +forbiddenApisTest.enabled=false +forbiddenApisMain.enabled=false + +// dependency license are currently checked in distribution +dependencyLicenses.enabled = false +jarHell.enabled = false +thirdPartyAudit.enabled = false +thirdPartyAudit.excludes = [ + // classes are missing + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener', + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + // we intentionally exclude the ant tasks because people were depending on them from their tests!!!!!!! + 'org.apache.tools.ant.BuildException', + 'org.apache.tools.ant.DirectoryScanner', + 'org.apache.tools.ant.Task', + 'org.apache.tools.ant.types.FileSet', + 'org.easymock.EasyMock', + 'org.easymock.IArgumentMatcher', + 'org.jmock.core.Constraint', +] diff --git a/client/src/main/java/org/elasticsearch/client/IndexClient.java b/client/src/main/java/org/elasticsearch/client/IndexClient.java new file mode 100644 index 0000000000000..ce3905b2a124e --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/IndexClient.java @@ -0,0 +1,65 @@ +package org.elasticsearch.client; + +import java.io.IOException; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; +import java.util.Objects; + +/** + * Created by simon on 2/16/16. + */ +public class IndexClient { + + private final RestClient client; + + public IndexClient(RestClient client) { + this.client = client; + } + + public void delete(String index, String type, String id) throws IOException { + delete(index, type, id, null); + } + public void delete(String index, String type, String id, DeleteOptions params) throws IOException { + Objects.requireNonNull(index, "index must not be null"); + Objects.requireNonNull(type, "type must not be null"); + Objects.requireNonNull(id, "id must not be null"); + String deleteEndpoint = String.format("/%s/%s/%s", index, type, id); + client.httpDelete(deleteEndpoint, params == null ? Collections.emptyMap() : params.options); + } + + public class DeleteOptions { + private final Map options = new HashMap<>(); + /** Specific write consistency setting for the operation one of "one", "quorum", "all"*/ + public void consistency(String consistency) { + options.put("consistency", consistency); + }; + /** ID of parent document */ + public void parent(String parent){ + options.put("parent", parent); + }; + /** Refresh the index after performing the operation */ + public void refresh(Boolean refresh) { + options.put("refresh", refresh); + }; + /** Specific routing value */ + public void routing(String routing) { + options.put("routing", routing); + }; + /** Explicit version number for concurrency control */ + public void version(Number version) { + options.put("version", version); + }; + /** Specific version type one of "internal", "external", "external_gte", "force" */ + public void versionType(String versionType) { + options.put("version_type", versionType); + }; + /** Explicit operation timeout */ + public void timeout(String timeout) { + options.put("timeout", timeout); + }; + } + + + +} diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java new file mode 100644 index 0000000000000..8c2c9f0e8ee4f --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -0,0 +1,217 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.HttpResponse; +import org.apache.http.NameValuePair; +import org.apache.http.StatusLine; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.client.utils.URLEncodedUtils; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.message.BasicNameValuePair; + +import java.io.BufferedReader; +import java.io.Closeable; +import java.io.IOException; +import java.io.InputStreamReader; +import java.net.URI; +import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.concurrent.CopyOnWriteArraySet; +import java.util.stream.Collectors; + +/** + */ +public class RestClient implements Closeable{ + + private final CloseableHttpClient client; + private volatile Set hosts; + private final String scheme; + private final Set blackList = new CopyOnWriteArraySet<>(); + + public RestClient(HttpHost... hosts) { + this("http", HttpClientBuilder.create().setDefaultRequestConfig(RequestConfig.custom().setConnectTimeout(100).build()).build(), hosts); + } + + public RestClient(String scheme, CloseableHttpClient client, HttpHost[] hosts) { + if (hosts.length == 0) { + throw new IllegalArgumentException("hosts must note be empty"); + } + this.scheme = scheme; + this.client = client; + this.hosts = new HashSet<>(Arrays.asList(hosts)); + } + + + public HttpResponse httpGet(String endpoint, Map params) throws IOException { + return httpGet(getHostIterator(true), endpoint, params); + } + + HttpResponse httpGet(Iterable hosts, String endpoint, Map params) throws IOException { + HttpUriRequest request = new HttpGet(buildUri(endpoint, pairs(params))); + return execute(request, hosts); + } + + HttpResponse httpDelete(String endpoint, Map params) throws IOException { + HttpUriRequest request = new HttpDelete(buildUri(endpoint, pairs(params))); + return execute(request, getHostIterator(true)); + } + + HttpResponse httpPut(String endpoint, HttpEntity body, Map params) throws IOException { + HttpPut request = new HttpPut(buildUri(endpoint, pairs(params))); + request.setEntity(body); + return execute(request, getHostIterator(true)); + } + + HttpResponse httpPost(String endpoint, HttpEntity body, Map params) throws IOException { + HttpPost request = new HttpPost(buildUri(endpoint, pairs(params))); + request.setEntity(body); + return execute(request, getHostIterator(true)); + } + + private List pairs(Map options) { + return options.entrySet().stream().map(e -> new BasicNameValuePair(e.getKey(), e.getValue().toString())) + .collect(Collectors.toList()); + } + + public HttpResponse execute(HttpUriRequest request, Iterable retryHosts) throws IOException { + IOException exception = null; + for (HttpHost singleHost : retryHosts) { + try { + return client.execute(singleHost, request); + } catch (IOException ex) { + if (this.hosts.contains(singleHost)) { + blackList.add(singleHost); + } + if (exception != null) { + exception.addSuppressed(ex); + } else { + exception = ex; + } + } + } + throw exception; + } + + public URI buildUri(String path, List query) { + try { + return new URI(null, null, null, -1, path, URLEncodedUtils.format(query, StandardCharsets.UTF_8), null); + } catch (URISyntaxException e) { + throw new RuntimeException(e.getMessage(), e); + } + } + + public Set fetchNodes(HttpHost host, boolean useClientNodes, boolean local, boolean checkAvailable) throws IOException { + HttpResponse httpResponse = httpGet(Collections.singleton(host), "/_cat/nodes", Collections.singletonMap("h", "http,role")); + StatusLine statusLine = httpResponse.getStatusLine(); + if (statusLine.getStatusCode() != 200) { + throw new RuntimeException("failed to fetch nodes: " + statusLine.getReasonPhrase()); + } + HttpEntity entity = httpResponse.getEntity(); + Set hosts = new HashSet<>(); + try (BufferedReader content = new BufferedReader(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))) { + String line; + while((line = content.readLine()) != null) { + final String[] split = line.split("\\s+"); + assert split.length == 2; + String boundAddress = split[0]; + String role = split[1]; + if ("-".equals(split[0].trim()) == false) { + if ("d".equals(role.trim()) == false && useClientNodes == false) { + continue; + } + URI boundAddressAsURI = URI.create("http://" + boundAddress); + HttpHost newHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), scheme); + if (checkAvailable == false || isAvailable(newHost)) { + hosts.add(newHost); + } + } + } + } + return hosts; + } + + public String getClusterName(HttpHost host) throws IOException { + HttpResponse httpResponse = httpGet(Collections.singleton(host), "/_cat/health", Collections.singletonMap("h", "cluster")); + StatusLine statusLine = httpResponse.getStatusLine(); + if (statusLine.getStatusCode() != 200) { + throw new RuntimeException("failed to fetch nodes: " + statusLine.getReasonPhrase()); + } + HttpEntity entity = httpResponse.getEntity(); + try (BufferedReader content = new BufferedReader(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))) { + String clusterName = content.readLine().trim(); + if (clusterName.length() == 0) { + throw new IllegalStateException("clustername must not be empty"); + } + return clusterName; + } + } + + public boolean isAvailable(HttpHost host) { + try { + HttpResponse httpResponse = httpGet(Collections.singleton(host), "/", Collections.emptyMap()); + StatusLine statusLine = httpResponse.getStatusLine(); + return statusLine.getStatusCode() == 200; + } catch (IOException ex) { + return false; + } + } + + public synchronized void setNodes(Set hosts) { + this.hosts = Collections.unmodifiableSet(new HashSet<>(hosts)); + blackList.retainAll(hosts); + } + + public Set getHosts() { + return hosts; + } + + protected Iterable getHostIterator(boolean clearBlacklist) { + if (hosts.size() == blackList.size() && clearBlacklist) { + blackList.clear(); // lets try again + } + return () -> hosts.stream().filter((h) -> blackList.contains(h) == false).iterator(); + } + + int getNumHosts() { + return hosts.size(); + } + + int getNumBlacklistedHosts() { + return blackList.size(); + } + @Override + public void close() throws IOException { + client.close(); + } +} diff --git a/client/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/src/test/java/org/elasticsearch/client/RestClientTests.java new file mode 100644 index 0000000000000..366ae69233998 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -0,0 +1,152 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.HttpHost; +import org.apache.http.HttpResponse; +import org.apache.lucene.util.LuceneTestCase; + +import java.io.IOException; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.Collections; +import java.util.List; +import java.util.concurrent.ExecutionException; +public class RestClientTests extends LuceneTestCase { + //TODO this should be refactored into a base test!! + HttpServer server; + protected String clusterName = "elasticsearch"; + protected List additionalNodes = Collections.emptyList(); + + + public void setUp() throws Exception { + super.setUp(); + server = HttpServer.create(new InetSocketAddress(0), 0); + server.setExecutor(null); // creates a default executor + server.start(); + server.createContext("/", (t) -> { + handle("/", t); + }); + server.createContext("/_cat/nodes", (t) -> { + handle("/_cat/nodes", t); + }); + server.createContext("/_cat/health", (t) -> { + handle("/_cat/health", t); + }); + } + + protected void handle(String path, HttpExchange t) throws IOException { + final String response; + switch (path) { + case "/": + response = "{}"; + break; + case "/_cat/nodes": + StringBuilder builder = new StringBuilder( "127.0.0.1:" + server.getAddress().getPort() + " " + "d\n"); + for (String host : additionalNodes) { + builder.append(host).append("\n"); + } + response = builder.toString(); + break; + case "/_cat/health": + response = clusterName; + break; + default: + throw new IllegalArgumentException("no such handler " + path); + } + t.sendResponseHeaders(200, response.length()); + OutputStream os = t.getResponseBody(); + os.write(response.getBytes()); + os.close(); + } + + public void tearDown() throws Exception { + super.tearDown(); + server.stop(0); + } + + public void testGetClustername() throws IOException { + HttpHost httpHost = new HttpHost("127.0.0.1", server.getAddress().getPort(), "http"); + try(RestClient client = new RestClient(httpHost)) { + assertEquals(clusterName, client.getClusterName(httpHost)); + } + } + + public void testFetchNodes() throws IOException { + additionalNodes = Arrays.asList("127.0.0.2:9200 c", "127.0.0.3:9200 d"); + HttpHost httpHost = new HttpHost("127.0.0.1", server.getAddress().getPort(), "http"); + try(RestClient client = new RestClient(httpHost)) { + assertEquals(3, client.fetchNodes(httpHost, true, true, false).size()); + assertTrue(client.fetchNodes(httpHost, true, true, false).toString(), client.fetchNodes(httpHost, true, true, false).contains(new HttpHost("127.0.0.2", 9200, "http"))); + assertTrue(client.fetchNodes(httpHost, true, true, false).contains(new HttpHost("127.0.0.3", 9200, "http"))); + assertTrue(client.fetchNodes(httpHost, true, true, false).contains(httpHost)); + assertEquals(1, client.fetchNodes(httpHost, true, true, true).size()); + } + } + + public void testSimpleRetry() throws IOException{ + additionalNodes = Arrays.asList("127.0.0.2:9200 c", "127.0.0.3:9200 d"); + HttpHost httpHost = new HttpHost("127.0.0.1", server.getAddress().getPort(), "http"); + try(RestClient client = new RestClient(httpHost)) { + client.setNodes(client.fetchNodes(httpHost, true, true, false)); + HttpResponse httpResponse = client.httpGet("/_cat/health", Collections.emptyMap()); + assertEquals(httpResponse.getStatusLine().getStatusCode(), 200); + server.stop(0); + try { + client.httpGet("/_cat/health", Collections.emptyMap()); + fail(); + } catch (IOException ex) { + assertTrue(ex.getMessage(), ex.getMessage().endsWith("failed: connect timed out") || ex.getMessage().endsWith("failed: Connection refused")); + } + } + } + + public void testBlacklist() throws IOException{ + additionalNodes = Arrays.asList("127.0.0.2:9200 c", "127.0.0.3:9200 d"); + HttpHost httpHost = new HttpHost("127.0.0.1", server.getAddress().getPort(), "http"); + try(RestClient client = new RestClient(httpHost)) { + client.setNodes(client.fetchNodes(httpHost, true, true, false)); + assertEquals(3, client.getNumHosts()); + assertEquals(0, client.getNumBlacklistedHosts()); + server.stop(0); + try { + client.httpGet("/_cat/health", Collections.emptyMap()); + fail(); + } catch (IOException ex) { + assertTrue(ex.getMessage(), ex.getMessage().endsWith("failed: connect timed out") || ex.getMessage().endsWith("failed: Connection refused")); + } + assertEquals(3, client.getNumHosts()); + assertEquals(3, client.getNumBlacklistedHosts()); + int num = 0; + for (HttpHost host : client.getHostIterator(false)) { + num++; // nothing here + } + assertEquals(0, num); + for (HttpHost host : client.getHostIterator(true)) { + num++; // all there - we have to retry now + } + assertEquals(3, num); + } + } + + +} diff --git a/settings.gradle b/settings.gradle index 215b436e20a27..831b2c5d2fb35 100644 --- a/settings.gradle +++ b/settings.gradle @@ -5,6 +5,7 @@ List projects = [ 'rest-api-spec', 'core', 'docs', + 'client', 'distribution:integ-test-zip', 'distribution:zip', 'distribution:tar', From c9c33a7719d7f2bb00c507b50e08d49d1d08474c Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 8 Apr 2016 15:24:21 +0200 Subject: [PATCH 002/103] Fix checkstyle issues, setup thirdPartyAudit checks and enable forbiddenApisMain --- client/build.gradle | 69 ++++++++----------- .../org/elasticsearch/client/IndexClient.java | 27 ++++++-- .../org/elasticsearch/client/RestClient.java | 3 +- .../elasticsearch/client/RestClientTests.java | 14 ++-- 4 files changed, 59 insertions(+), 54 deletions(-) diff --git a/client/build.gradle b/client/build.gradle index 1ae32bf6e98cc..bd916cd2cee9c 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -23,60 +23,51 @@ group = 'org.elasticsearch.client' apply plugin: 'elasticsearch.build' dependencies { - // we use the lucene test-framework here but we are not pulling in ES core or the test framework + // TODO once we got rid of the client in the test framework we should use a version variable here + compile "org.apache.httpcomponents:httpclient:4.5.2" + compile "org.apache.httpcomponents:httpcore:4.4.4" + //compile "org.apache.httpcomponents:httpcore-nio:4.4.4" + //compile "org.apache.httpcomponents:httpasyncclient:4.1.1" + compile "commons-codec:commons-codec:1.9" + compile "commons-logging:commons-logging:1.2" + + // we use lucene-test-framework here without pulling in ES core or its own test-framework testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile 'org.hamcrest:hamcrest-all:1.3' testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" - - - // TODO once we got rid of the client in the test framework we should use a version variable here - // we use httpclient here since the JDK support has several issue - // - httpclient supports basic and digest auth and other schemes - // - URLConnection has issues with SSL and not all system patches are available - // - URLConnection can't stream data but httpclient can - // - URLConnection doesn't expose responsecodes unless it's a 200 - // - httpclient supports pipelining which we might wanna expose down the road? - compile "org.apache.httpcomponents:httpclient:4.5.1" - compile "org.apache.httpcomponents:httpcore:4.4.4" - compile "org.apache.httpcomponents:httpcore-nio:4.4.4" // currently unused - compile "commons-logging:commons-logging:1.2" - compile 'org.apache.httpcomponents:httpasyncclient:4.1.1' // currently unused } - compileJava.options.compilerArgs << '-Xlint:-cast,-rawtypes,-try,-unchecked' compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' -// the main files are actually test files, so use the appopriate forbidden api sigs forbiddenApisMain { - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated'] - signaturesURLs = [PrecommitTasks.getResource('/forbidden/all-signatures.txt'), - PrecommitTasks.getResource('/forbidden/test-signatures.txt')] + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } +forbiddenApisMain.enabled=true +//TODO remove use of sun http server and enable forbidden-apis for tests forbiddenApisTest.enabled=false -forbiddenApisMain.enabled=false +//TODO add licenses for deps and check out distribution task +//dependency license are currently checked in distribution +dependencyLicenses.enabled=false +//TODO enable jarhell checks +jarHell.enabled=false +//NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core +namingConventions.enabled=false -// dependency license are currently checked in distribution -dependencyLicenses.enabled = false -jarHell.enabled = false -thirdPartyAudit.enabled = false thirdPartyAudit.excludes = [ - // classes are missing - 'javax.servlet.ServletContextEvent', - 'javax.servlet.ServletContextListener', - 'org.apache.avalon.framework.logger.Logger', - 'org.apache.log.Hierarchy', - 'org.apache.log.Logger', - // we intentionally exclude the ant tasks because people were depending on them from their tests!!!!!!! - 'org.apache.tools.ant.BuildException', - 'org.apache.tools.ant.DirectoryScanner', - 'org.apache.tools.ant.Task', - 'org.apache.tools.ant.types.FileSet', - 'org.easymock.EasyMock', - 'org.easymock.IArgumentMatcher', - 'org.jmock.core.Constraint', + //commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority', + //commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener' ] diff --git a/client/src/main/java/org/elasticsearch/client/IndexClient.java b/client/src/main/java/org/elasticsearch/client/IndexClient.java index ce3905b2a124e..6b116b359938b 100644 --- a/client/src/main/java/org/elasticsearch/client/IndexClient.java +++ b/client/src/main/java/org/elasticsearch/client/IndexClient.java @@ -1,14 +1,30 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ package org.elasticsearch.client; import java.io.IOException; import java.util.Collections; import java.util.HashMap; +import java.util.Locale; import java.util.Map; import java.util.Objects; -/** - * Created by simon on 2/16/16. - */ public class IndexClient { private final RestClient client; @@ -24,7 +40,7 @@ public void delete(String index, String type, String id, DeleteOptions params) t Objects.requireNonNull(index, "index must not be null"); Objects.requireNonNull(type, "type must not be null"); Objects.requireNonNull(id, "id must not be null"); - String deleteEndpoint = String.format("/%s/%s/%s", index, type, id); + String deleteEndpoint = String.format(Locale.ROOT, "/%s/%s/%s", index, type, id); client.httpDelete(deleteEndpoint, params == null ? Collections.emptyMap() : params.options); } @@ -59,7 +75,4 @@ public void timeout(String timeout) { options.put("timeout", timeout); }; } - - - } diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 8c2c9f0e8ee4f..25d96aa903df2 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -60,7 +60,8 @@ public class RestClient implements Closeable{ private final Set blackList = new CopyOnWriteArraySet<>(); public RestClient(HttpHost... hosts) { - this("http", HttpClientBuilder.create().setDefaultRequestConfig(RequestConfig.custom().setConnectTimeout(100).build()).build(), hosts); + this("http", HttpClientBuilder.create().setDefaultRequestConfig( + RequestConfig.custom().setConnectTimeout(100).build()).build(), hosts); } public RestClient(String scheme, CloseableHttpClient client, HttpHost[] hosts) { diff --git a/client/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/src/test/java/org/elasticsearch/client/RestClientTests.java index 366ae69233998..038cb8bab3c7d 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -26,11 +26,10 @@ import java.io.IOException; import java.io.OutputStream; import java.net.InetSocketAddress; -import java.net.URISyntaxException; import java.util.Arrays; import java.util.Collections; import java.util.List; -import java.util.concurrent.ExecutionException; + public class RestClientTests extends LuceneTestCase { //TODO this should be refactored into a base test!! HttpServer server; @@ -96,7 +95,8 @@ public void testFetchNodes() throws IOException { HttpHost httpHost = new HttpHost("127.0.0.1", server.getAddress().getPort(), "http"); try(RestClient client = new RestClient(httpHost)) { assertEquals(3, client.fetchNodes(httpHost, true, true, false).size()); - assertTrue(client.fetchNodes(httpHost, true, true, false).toString(), client.fetchNodes(httpHost, true, true, false).contains(new HttpHost("127.0.0.2", 9200, "http"))); + assertTrue(client.fetchNodes(httpHost, true, true, false).toString(), client.fetchNodes(httpHost, true, true, false) + .contains(new HttpHost("127.0.0.2", 9200, "http"))); assertTrue(client.fetchNodes(httpHost, true, true, false).contains(new HttpHost("127.0.0.3", 9200, "http"))); assertTrue(client.fetchNodes(httpHost, true, true, false).contains(httpHost)); assertEquals(1, client.fetchNodes(httpHost, true, true, true).size()); @@ -115,7 +115,8 @@ public void testSimpleRetry() throws IOException{ client.httpGet("/_cat/health", Collections.emptyMap()); fail(); } catch (IOException ex) { - assertTrue(ex.getMessage(), ex.getMessage().endsWith("failed: connect timed out") || ex.getMessage().endsWith("failed: Connection refused")); + assertTrue(ex.getMessage(), ex.getMessage().endsWith("failed: connect timed out") || + ex.getMessage().endsWith("failed: Connection refused")); } } } @@ -132,7 +133,8 @@ public void testBlacklist() throws IOException{ client.httpGet("/_cat/health", Collections.emptyMap()); fail(); } catch (IOException ex) { - assertTrue(ex.getMessage(), ex.getMessage().endsWith("failed: connect timed out") || ex.getMessage().endsWith("failed: Connection refused")); + assertTrue(ex.getMessage(), ex.getMessage().endsWith("failed: connect timed out") || + ex.getMessage().endsWith("failed: Connection refused")); } assertEquals(3, client.getNumHosts()); assertEquals(3, client.getNumBlacklistedHosts()); @@ -147,6 +149,4 @@ public void testBlacklist() throws IOException{ assertEquals(3, num); } } - - } From 59707239452bdf24fe6a32deed5f21c5205f79e9 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 11 Apr 2016 14:32:19 +0200 Subject: [PATCH 003/103] clarify why jarhell and dependency license check are disabled, add okhttp mockwebserver dependency Also enable forbiddenApisTest (will fail till we get rid of the sun http web server in RestClientTests that will be replaced with mockwebserver) --- client/build.gradle | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/client/build.gradle b/client/build.gradle index bd916cd2cee9c..d94676267d0e5 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -31,13 +31,18 @@ dependencies { compile "commons-codec:commons-codec:1.9" compile "commons-logging:commons-logging:1.2" - // we use lucene-test-framework here without pulling in ES core or its own test-framework testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile 'org.hamcrest:hamcrest-all:1.3' testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" + //mock web server + testCompile "com.squareup.okhttp3:mockwebserver:3.2.0" + testCompile "com.squareup.okhttp3:okhttp:3.2.0" + testCompile "com.squareup.okhttp3:okhttp-ws:3.2.0" + testCompile "com.squareup.okio:okio:1.6.0" + testCompile "org.bouncycastle:bcprov-jdk15on:1.54" } compileJava.options.compilerArgs << '-Xlint:-cast,-rawtypes,-try,-unchecked' @@ -48,13 +53,15 @@ forbiddenApisMain { signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } -forbiddenApisMain.enabled=true -//TODO remove use of sun http server and enable forbidden-apis for tests -forbiddenApisTest.enabled=false -//TODO add licenses for deps and check out distribution task +forbiddenApisTest { + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +//TODO add licenses for dependencies and take care of distribution //dependency license are currently checked in distribution dependencyLicenses.enabled=false -//TODO enable jarhell checks +//JarHell is part of es core, which we don't want to pull in jarHell.enabled=false //NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core namingConventions.enabled=false @@ -64,6 +71,7 @@ thirdPartyAudit.excludes = [ 'org.apache.avalon.framework.logger.Logger', 'org.apache.log.Hierarchy', 'org.apache.log.Logger', + 'org.apache.log4j.Category', 'org.apache.log4j.Level', 'org.apache.log4j.Logger', 'org.apache.log4j.Priority', From 6ae8be55bd3e120c4ed3186295aac10feb78ac64 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 28 Apr 2016 23:16:06 +0200 Subject: [PATCH 004/103] Introduce notion of Transport, Connection and ConnectionPool There are two implementations of connection pool, a static one that allows to enable/disable pings, and a sniffing one that sniffs nodes from the nodes info api. Transport retrieves a stream of connections from the connection for each request and calls onSuccess or onFailure depending on the result of the request. Transport also supports a max retry timeout to control the timeout for the request retries overall. --- client/build.gradle | 3 +- .../client/AbstractStaticConnectionPool.java | 105 +++++++ .../org/elasticsearch/client/Connection.java | 45 +++ .../elasticsearch/client/ConnectionPool.java | 69 +++++ .../client/ElasticsearchResponse.java | 99 +++++++ .../ElasticsearchResponseException.java | 60 ++++ .../client/HttpDeleteWithEntity.java | 41 +++ .../client/HttpGetWithEntity.java | 41 +++ .../org/elasticsearch/client/IndexClient.java | 78 ------ .../java/org/elasticsearch/client/Node.java | 125 +++++++++ .../elasticsearch/client/RequestLogger.java | 51 ++++ .../org/elasticsearch/client/RestClient.java | 189 +------------ .../client/RetryTimeoutException.java | 29 ++ .../org/elasticsearch/client/Sniffer.java | 162 +++++++++++ .../client/SniffingConnectionPool.java | 196 +++++++++++++ .../client/StatefulConnection.java | 125 +++++++++ .../client/StaticConnectionPool.java | 93 +++++++ .../org/elasticsearch/client/Transport.java | 207 ++++++++++++++ .../java/org/elasticsearch/client/Verb.java | 27 ++ .../org/elasticsearch/client/NodeTests.java | 136 +++++++++ .../elasticsearch/client/RestClientTests.java | 152 ---------- .../elasticsearch/client/SnifferTests.java | 259 ++++++++++++++++++ .../client/SniffingConnectionPoolTests.java | 147 ++++++++++ .../client/StaticConnectionPoolTests.java | 87 ++++++ .../elasticsearch/client/TransportTests.java | 95 +++++++ 25 files changed, 2209 insertions(+), 412 deletions(-) create mode 100644 client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java create mode 100644 client/src/main/java/org/elasticsearch/client/Connection.java create mode 100644 client/src/main/java/org/elasticsearch/client/ConnectionPool.java create mode 100644 client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java create mode 100644 client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java create mode 100644 client/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java create mode 100644 client/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java delete mode 100644 client/src/main/java/org/elasticsearch/client/IndexClient.java create mode 100644 client/src/main/java/org/elasticsearch/client/Node.java create mode 100644 client/src/main/java/org/elasticsearch/client/RequestLogger.java create mode 100644 client/src/main/java/org/elasticsearch/client/RetryTimeoutException.java create mode 100644 client/src/main/java/org/elasticsearch/client/Sniffer.java create mode 100644 client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java create mode 100644 client/src/main/java/org/elasticsearch/client/StatefulConnection.java create mode 100644 client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java create mode 100644 client/src/main/java/org/elasticsearch/client/Transport.java create mode 100644 client/src/main/java/org/elasticsearch/client/Verb.java create mode 100644 client/src/test/java/org/elasticsearch/client/NodeTests.java delete mode 100644 client/src/test/java/org/elasticsearch/client/RestClientTests.java create mode 100644 client/src/test/java/org/elasticsearch/client/SnifferTests.java create mode 100644 client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java create mode 100644 client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java create mode 100644 client/src/test/java/org/elasticsearch/client/TransportTests.java diff --git a/client/build.gradle b/client/build.gradle index d94676267d0e5..7a9377d0acb00 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -30,10 +30,11 @@ dependencies { //compile "org.apache.httpcomponents:httpasyncclient:4.1.1" compile "commons-codec:commons-codec:1.9" compile "commons-logging:commons-logging:1.2" + compile "com.fasterxml.jackson.core:jackson-core:2.7.3" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" - testCompile 'org.hamcrest:hamcrest-all:1.3' + testCompile "org.hamcrest:hamcrest-all:1.3" testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" diff --git a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java new file mode 100644 index 0000000000000..f30a2b9c5c1a9 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java @@ -0,0 +1,105 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; +import java.util.function.Predicate; +import java.util.stream.Stream; + +/** + * Base static connection pool implementation that deals with mutable connections. Marks connections as dead/alive when needed. + * Provides a stream of alive connections or dead ones that should be retried for each {@link #nextConnection()} call, which + * allows to filter connections through a customizable {@link Predicate}, called connection selector. + * In case the returned stream is empty a last resort dead connection should be retrieved by calling {@link #lastResortConnection()} + * and resurrected so that a single request attempt can be performed. + * The {@link #onSuccess(StatefulConnection)} method marks the connection provided as an argument alive. + * The {@link #onFailure(StatefulConnection)} method marks the connection provided as an argument dead. + * This base implementation doesn't define the list implementation that stores connections, so that concurrency can be + * handled in the subclasses depending on the usecase (e.g. defining the list volatile when needed). + */ +public abstract class AbstractStaticConnectionPool implements ConnectionPool { + + private static final Log logger = LogFactory.getLog(AbstractStaticConnectionPool.class); + + private final Predicate connectionSelector; + + private final AtomicInteger lastConnectionIndex = new AtomicInteger(0); + + protected AbstractStaticConnectionPool(Predicate connectionSelector) { + Objects.requireNonNull(connectionSelector, "connection selector predicate cannot be null"); + this.connectionSelector = connectionSelector; + } + + protected abstract List getConnections(); + + @Override + public final Stream nextConnection() { + return nextUnfilteredConnection().filter(connectionSelector); + } + + protected final Stream nextUnfilteredConnection() { + List connections = getConnections(); + if (connections.isEmpty()) { + throw new IllegalStateException("no connections available in the connection pool"); + } + + List sortedConnections = new ArrayList<>(connections); + //TODO is it possible to make this O(1)? (rotate is O(n)) + Collections.rotate(sortedConnections, sortedConnections.size() - lastConnectionIndex.getAndIncrement()); + return sortedConnections.stream().filter(connection -> connection.isAlive() || connection.shouldBeRetried()); + } + + protected List createConnections(Node... nodes) { + List connections = new ArrayList<>(); + for (Node node : nodes) { + Objects.requireNonNull(node, "node cannot be null"); + connections.add(new StatefulConnection(node)); + } + return Collections.unmodifiableList(connections); + } + + @Override + public StatefulConnection lastResortConnection() { + StatefulConnection statefulConnection = getConnections().stream() + .sorted((o1, o2) -> Long.compare(o1.getDeadUntil(), o2.getDeadUntil())).findFirst().get(); + statefulConnection.markResurrected(); + return statefulConnection; + } + + @Override + public void onSuccess(StatefulConnection connection) { + connection.markAlive(); + logger.trace("marked connection alive for " + connection.getNode()); + } + + @Override + public void onFailure(StatefulConnection connection) throws IOException { + connection.markDead(); + logger.debug("marked connection dead for " + connection.getNode()); + } +} diff --git a/client/src/main/java/org/elasticsearch/client/Connection.java b/client/src/main/java/org/elasticsearch/client/Connection.java new file mode 100644 index 0000000000000..225c812ebfaac --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/Connection.java @@ -0,0 +1,45 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +/** + * Simplest representation of a connection to an elasticsearch node. + * It doesn't have any mutable state. It holds the node that the connection points to. + * Allows the transport to deal with very simple connection objects that are immutable. + * Any change to the state of connections should be made through the connection pool + * which is aware of the connection object that it supports. + */ +public class Connection { + private final Node node; + + /** + * Creates a new connection pointing to the provided {@link Node} argument + */ + public Connection(Node node) { + this.node = node; + } + + /** + * Returns the {@link Node} that the connection points to + */ + public Node getNode() { + return node; + } +} diff --git a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java new file mode 100644 index 0000000000000..3f2eb89da9718 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.io.Closeable; +import java.io.IOException; +import java.util.stream.Stream; + +/** + * Pool of connections to the different nodes that belong to an elasticsearch cluster. + * It keeps track of the different nodes to communicate with and allows to retrieve a stream of connections to be used + * for each request. Exposes the needed hooks to be able to eventually mark connections dead or alive and execute + * arbitrary operations before each single request attempt. + * + * @param the type of {@link Connection} that the pool supports + */ +public interface ConnectionPool extends Closeable { + + /** + * Returns a stream of connections that should be used for a request call. + * Ideally, the first connection is retrieved from the stream and used successfully for the request. + * Otherwise, after each failure the next connection should be retrieved from the stream so that the request can be retried. + * The maximum total of attempts is equal to the number of connections that are available in the stream. + * It may happen that the stream is empty, in which case it means that there aren't healthy connections to use. + * Then {@link #lastResortConnection()} should be called to retrieve a non healthy connection and try it. + */ + Stream nextConnection(); + + /** + * Returns a connection that is not necessarily healthy, but can be used for a request attempt. To be called as last resort + * only in case {@link #nextConnection()} returns an empty stream + */ + C lastResortConnection(); + + /** + * Called before each single request attempt. Allows to execute operations (e.g. ping) before each request. + * Receives as an argument the connection that is going to be used for the request. + */ + void beforeAttempt(C connection) throws IOException; + + /** + * Called after each successful request call. + * Receives as an argument the connection that was used for the successful request. + */ + void onSuccess(C connection); + + /** + * Called after each failed attempt. + * Receives as an argument the connection that was used for the failed attempt. + */ + void onFailure(C connection) throws IOException; +} diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java new file mode 100644 index 0000000000000..13c92c164535c --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java @@ -0,0 +1,99 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.RequestLine; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; + +import java.io.Closeable; +import java.io.IOException; +import java.util.Objects; + +/** + * Holds an elasticsearch response. It wraps the {@link CloseableHttpResponse} response and associates it with + * its corresponding {@link RequestLine} and {@link Node} + */ +public class ElasticsearchResponse implements Closeable { + + private final RequestLine requestLine; + private final Node node; + private final CloseableHttpResponse response; + + ElasticsearchResponse(RequestLine requestLine, Node node, CloseableHttpResponse response) { + Objects.requireNonNull(requestLine, "requestLine cannot be null"); + Objects.requireNonNull(node, "node cannot be null"); + Objects.requireNonNull(response, "response cannot be null"); + this.requestLine = requestLine; + this.node = node; + this.response = response; + } + + /** + * Returns the request line that generated this response + */ + public RequestLine getRequestLine() { + return requestLine; + } + + /** + * Returns the node that returned this response + */ + public Node getNode() { + return node; + } + + /** + * Returns the status line of the current response + */ + public StatusLine getStatusLine() { + return response.getStatusLine(); + } + + /** + * Returns all the response headers + */ + public Header[] getHeaders() { + return response.getAllHeaders(); + } + + /** + * Returns the response bodyi available, null otherwise + */ + public HttpEntity getEntity() { + return response.getEntity(); + } + + @Override + public String toString() { + return "ElasticsearchResponse{" + + "requestLine=" + requestLine + + ", node=" + node + + ", response=" + response.getStatusLine() + + '}'; + } + + @Override + public void close() throws IOException { + this.response.close(); + } +} diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java new file mode 100644 index 0000000000000..6c2ef2d2e0d48 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java @@ -0,0 +1,60 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.RequestLine; +import org.apache.http.StatusLine; + +import java.io.IOException; + +public class ElasticsearchResponseException extends IOException { + + private final Node node; + private final RequestLine requestLine; + private final StatusLine statusLine; + + ElasticsearchResponseException(RequestLine requestLine, Node node, StatusLine statusLine) { + super(buildMessage(requestLine, node, statusLine)); + this.node = node; + this.requestLine = requestLine; + this.statusLine = statusLine; + } + + private static String buildMessage(RequestLine requestLine, Node node, StatusLine statusLine) { + return requestLine.getMethod() + " " + node.getHttpHost() + requestLine.getUri() + ": " + statusLine.toString(); + } + + public boolean isRecoverable() { + //clients don't retry on 500 because elasticsearch still misuses it instead of 400 in some places + return statusLine.getStatusCode() >= 502 && statusLine.getStatusCode() <= 504; + } + + public Node getNode() { + return node; + } + + public RequestLine getRequestLine() { + return requestLine; + } + + public StatusLine getStatusLine() { + return statusLine; + } +} diff --git a/client/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java b/client/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java new file mode 100644 index 0000000000000..4f378f19c8f2c --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpDelete; +import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; + +import java.net.URI; + +/** + * Allows to send DELETE requests providing a body (not supported out of the box) + */ +final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase { + + final static String METHOD_NAME = HttpDelete.METHOD_NAME; + + public HttpDeleteWithEntity(final URI uri) { + setURI(uri); + } + + @Override + public String getMethod() { + return METHOD_NAME; + } +} diff --git a/client/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java b/client/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java new file mode 100644 index 0000000000000..18039ba9b3425 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java @@ -0,0 +1,41 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ +package org.elasticsearch.client; + +import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; +import org.apache.http.client.methods.HttpGet; + +import java.net.URI; + +/** + * Allows to send GET requests providing a body (not supported out of the box) + */ +final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase { + + final static String METHOD_NAME = HttpGet.METHOD_NAME; + + public HttpGetWithEntity(final URI uri) { + setURI(uri); + } + + @Override + public String getMethod() { + return METHOD_NAME; + } +} \ No newline at end of file diff --git a/client/src/main/java/org/elasticsearch/client/IndexClient.java b/client/src/main/java/org/elasticsearch/client/IndexClient.java deleted file mode 100644 index 6b116b359938b..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/IndexClient.java +++ /dev/null @@ -1,78 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client; - -import java.io.IOException; -import java.util.Collections; -import java.util.HashMap; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; - -public class IndexClient { - - private final RestClient client; - - public IndexClient(RestClient client) { - this.client = client; - } - - public void delete(String index, String type, String id) throws IOException { - delete(index, type, id, null); - } - public void delete(String index, String type, String id, DeleteOptions params) throws IOException { - Objects.requireNonNull(index, "index must not be null"); - Objects.requireNonNull(type, "type must not be null"); - Objects.requireNonNull(id, "id must not be null"); - String deleteEndpoint = String.format(Locale.ROOT, "/%s/%s/%s", index, type, id); - client.httpDelete(deleteEndpoint, params == null ? Collections.emptyMap() : params.options); - } - - public class DeleteOptions { - private final Map options = new HashMap<>(); - /** Specific write consistency setting for the operation one of "one", "quorum", "all"*/ - public void consistency(String consistency) { - options.put("consistency", consistency); - }; - /** ID of parent document */ - public void parent(String parent){ - options.put("parent", parent); - }; - /** Refresh the index after performing the operation */ - public void refresh(Boolean refresh) { - options.put("refresh", refresh); - }; - /** Specific routing value */ - public void routing(String routing) { - options.put("routing", routing); - }; - /** Explicit version number for concurrency control */ - public void version(Number version) { - options.put("version", version); - }; - /** Specific version type one of "internal", "external", "external_gte", "force" */ - public void versionType(String versionType) { - options.put("version_type", versionType); - }; - /** Explicit operation timeout */ - public void timeout(String timeout) { - options.put("timeout", timeout); - }; - } -} diff --git a/client/src/main/java/org/elasticsearch/client/Node.java b/client/src/main/java/org/elasticsearch/client/Node.java new file mode 100644 index 0000000000000..ab6d3631527f2 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/Node.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; + +import java.util.Arrays; +import java.util.Collections; +import java.util.HashSet; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +/** + * Represents an Elasticsearch node. + * Holds its http address as an {@link HttpHost} instance, as well as its optional set of roles and attributes. + * Roles and attributes can be populated in one of the two following ways: + * 1) using a connection pool that supports sniffing, so that all the info is retrieved from elasticsearch itself + * 2) manually passing the info through the {@link #Node(HttpHost, Set, Map)} constructor + * Roles and attributes may be taken into account as part of connection selection by the connection pool, which + * can be customized by passing in a predicate at connection pool creation. + */ +public class Node { + private final HttpHost httpHost; + private final Set roles; + private final Map attributes; + + /** + * Creates a node given its http address as an {@link HttpHost} instance. + * Roles are not provided hence all possible roles will be assumed, as that is the default in Elasticsearch. + * No attributes will be associated with the node. + * + * @param httpHost the http address of the node + */ + public Node(HttpHost httpHost) { + this(httpHost, new HashSet<>(Arrays.asList(Role.values())), Collections.emptyMap()); + } + + /** + * Creates a node given its http address as an {@link HttpHost} instance, its set or roles and attributes. + * + * @param httpHost the http address of the node + * @param roles the set of roles that the node fulfills within the cluster + * @param attributes the attributes associated with the node + */ + public Node(HttpHost httpHost, Set roles, Map attributes) { + Objects.requireNonNull(httpHost, "host cannot be null"); + Objects.requireNonNull(roles, "roles cannot be null"); + Objects.requireNonNull(attributes, "attributes cannot be null"); + this.httpHost = httpHost; + this.roles = Collections.unmodifiableSet(roles); + this.attributes = Collections.unmodifiableMap(attributes); + } + + /** + * Returns the http address of the node + */ + public HttpHost getHttpHost() { + return httpHost; + } + + /** + * Returns the set of roles associated with the node + */ + public Set getRoles() { + return roles; + } + + /** + * Returns the set of attributes associated with the node + */ + public Map getAttributes() { + return attributes; + } + + @Override + public String toString() { + return "Node{" + + "httpHost=" + httpHost + + ", roles=" + roles + + ", attributes=" + attributes + + '}'; + } + + /** + * Holds all the potential roles that a node can fulfill within a cluster + */ + public enum Role { + /** + * Data node + */ + DATA, + /** + * Master eligible node + */ + MASTER, + /** + * Ingest node + */ + INGEST; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } +} diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java new file mode 100644 index 0000000000000..8423dcb8adeb0 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -0,0 +1,51 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.http.RequestLine; +import org.apache.http.StatusLine; + +import java.io.IOException; + +/** + * Helper class that exposes static method to unify the way requests are logged + */ +public final class RequestLogger { + + private RequestLogger() { + } + + /** + * Logs a request that yielded a response + */ + public static void log(Log logger, String message, RequestLine requestLine, Node node, StatusLine statusLine) { + logger.debug(message + " [" + requestLine.getMethod() + " " + node.getHttpHost() + + requestLine.getUri() + "] [" + statusLine + "]"); + } + + /** + * Logs a request that failed + */ + public static void log(Log logger, String message, RequestLine requestLine, Node node, IOException e) { + logger.debug(message + " [" + requestLine.getMethod() + " " + node.getHttpHost() + + requestLine.getUri() + "]", e); + } +} diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 25d96aa903df2..a91501196675a 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -19,200 +19,27 @@ package org.elasticsearch.client; import org.apache.http.HttpEntity; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.http.NameValuePair; -import org.apache.http.StatusLine; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.HttpDelete; -import org.apache.http.client.methods.HttpGet; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.client.utils.URLEncodedUtils; import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.message.BasicNameValuePair; -import java.io.BufferedReader; import java.io.Closeable; import java.io.IOException; -import java.io.InputStreamReader; -import java.net.URI; -import java.net.URISyntaxException; -import java.nio.charset.StandardCharsets; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.List; import java.util.Map; -import java.util.Set; -import java.util.concurrent.CopyOnWriteArraySet; -import java.util.stream.Collectors; -/** - */ -public class RestClient implements Closeable{ +public final class RestClient implements Closeable { - private final CloseableHttpClient client; - private volatile Set hosts; - private final String scheme; - private final Set blackList = new CopyOnWriteArraySet<>(); + private final Transport transport; - public RestClient(HttpHost... hosts) { - this("http", HttpClientBuilder.create().setDefaultRequestConfig( - RequestConfig.custom().setConnectTimeout(100).build()).build(), hosts); + public RestClient(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { + this.transport = new Transport<>(client, connectionPool, maxRetryTimeout); } - public RestClient(String scheme, CloseableHttpClient client, HttpHost[] hosts) { - if (hosts.length == 0) { - throw new IllegalArgumentException("hosts must note be empty"); - } - this.scheme = scheme; - this.client = client; - this.hosts = new HashSet<>(Arrays.asList(hosts)); + public ElasticsearchResponse performRequest(Verb verb, String endpoint, Map params, HttpEntity entity) + throws IOException { + return transport.performRequest(verb, endpoint, params, entity); } - - public HttpResponse httpGet(String endpoint, Map params) throws IOException { - return httpGet(getHostIterator(true), endpoint, params); - } - - HttpResponse httpGet(Iterable hosts, String endpoint, Map params) throws IOException { - HttpUriRequest request = new HttpGet(buildUri(endpoint, pairs(params))); - return execute(request, hosts); - } - - HttpResponse httpDelete(String endpoint, Map params) throws IOException { - HttpUriRequest request = new HttpDelete(buildUri(endpoint, pairs(params))); - return execute(request, getHostIterator(true)); - } - - HttpResponse httpPut(String endpoint, HttpEntity body, Map params) throws IOException { - HttpPut request = new HttpPut(buildUri(endpoint, pairs(params))); - request.setEntity(body); - return execute(request, getHostIterator(true)); - } - - HttpResponse httpPost(String endpoint, HttpEntity body, Map params) throws IOException { - HttpPost request = new HttpPost(buildUri(endpoint, pairs(params))); - request.setEntity(body); - return execute(request, getHostIterator(true)); - } - - private List pairs(Map options) { - return options.entrySet().stream().map(e -> new BasicNameValuePair(e.getKey(), e.getValue().toString())) - .collect(Collectors.toList()); - } - - public HttpResponse execute(HttpUriRequest request, Iterable retryHosts) throws IOException { - IOException exception = null; - for (HttpHost singleHost : retryHosts) { - try { - return client.execute(singleHost, request); - } catch (IOException ex) { - if (this.hosts.contains(singleHost)) { - blackList.add(singleHost); - } - if (exception != null) { - exception.addSuppressed(ex); - } else { - exception = ex; - } - } - } - throw exception; - } - - public URI buildUri(String path, List query) { - try { - return new URI(null, null, null, -1, path, URLEncodedUtils.format(query, StandardCharsets.UTF_8), null); - } catch (URISyntaxException e) { - throw new RuntimeException(e.getMessage(), e); - } - } - - public Set fetchNodes(HttpHost host, boolean useClientNodes, boolean local, boolean checkAvailable) throws IOException { - HttpResponse httpResponse = httpGet(Collections.singleton(host), "/_cat/nodes", Collections.singletonMap("h", "http,role")); - StatusLine statusLine = httpResponse.getStatusLine(); - if (statusLine.getStatusCode() != 200) { - throw new RuntimeException("failed to fetch nodes: " + statusLine.getReasonPhrase()); - } - HttpEntity entity = httpResponse.getEntity(); - Set hosts = new HashSet<>(); - try (BufferedReader content = new BufferedReader(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))) { - String line; - while((line = content.readLine()) != null) { - final String[] split = line.split("\\s+"); - assert split.length == 2; - String boundAddress = split[0]; - String role = split[1]; - if ("-".equals(split[0].trim()) == false) { - if ("d".equals(role.trim()) == false && useClientNodes == false) { - continue; - } - URI boundAddressAsURI = URI.create("http://" + boundAddress); - HttpHost newHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), scheme); - if (checkAvailable == false || isAvailable(newHost)) { - hosts.add(newHost); - } - } - } - } - return hosts; - } - - public String getClusterName(HttpHost host) throws IOException { - HttpResponse httpResponse = httpGet(Collections.singleton(host), "/_cat/health", Collections.singletonMap("h", "cluster")); - StatusLine statusLine = httpResponse.getStatusLine(); - if (statusLine.getStatusCode() != 200) { - throw new RuntimeException("failed to fetch nodes: " + statusLine.getReasonPhrase()); - } - HttpEntity entity = httpResponse.getEntity(); - try (BufferedReader content = new BufferedReader(new InputStreamReader(entity.getContent(), StandardCharsets.UTF_8))) { - String clusterName = content.readLine().trim(); - if (clusterName.length() == 0) { - throw new IllegalStateException("clustername must not be empty"); - } - return clusterName; - } - } - - public boolean isAvailable(HttpHost host) { - try { - HttpResponse httpResponse = httpGet(Collections.singleton(host), "/", Collections.emptyMap()); - StatusLine statusLine = httpResponse.getStatusLine(); - return statusLine.getStatusCode() == 200; - } catch (IOException ex) { - return false; - } - } - - public synchronized void setNodes(Set hosts) { - this.hosts = Collections.unmodifiableSet(new HashSet<>(hosts)); - blackList.retainAll(hosts); - } - - public Set getHosts() { - return hosts; - } - - protected Iterable getHostIterator(boolean clearBlacklist) { - if (hosts.size() == blackList.size() && clearBlacklist) { - blackList.clear(); // lets try again - } - return () -> hosts.stream().filter((h) -> blackList.contains(h) == false).iterator(); - } - - int getNumHosts() { - return hosts.size(); - } - - int getNumBlacklistedHosts() { - return blackList.size(); - } @Override public void close() throws IOException { - client.close(); + transport.close(); } } diff --git a/client/src/main/java/org/elasticsearch/client/RetryTimeoutException.java b/client/src/main/java/org/elasticsearch/client/RetryTimeoutException.java new file mode 100644 index 0000000000000..632597bfa37bc --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/RetryTimeoutException.java @@ -0,0 +1,29 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.io.IOException; + +public class RetryTimeoutException extends IOException { + + RetryTimeoutException(String message) { + super(message); + } +} diff --git a/client/src/main/java/org/elasticsearch/client/Sniffer.java b/client/src/main/java/org/elasticsearch/client/Sniffer.java new file mode 100644 index 0000000000000..07d485de87916 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/Sniffer.java @@ -0,0 +1,162 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.StatusLine; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpGet; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.util.EntityUtils; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.util.ArrayList; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Locale; +import java.util.Map; +import java.util.Objects; +import java.util.Set; + +final class Sniffer { + + private static final Log logger = LogFactory.getLog(Sniffer.class); + + private final CloseableHttpClient client; + private final RequestConfig sniffRequestConfig; + private final int sniffRequestTimeout; + private final String scheme; + private final JsonFactory jsonFactory; + + Sniffer(CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, String scheme) { + Objects.requireNonNull(client, "client cannot be null"); + Objects.requireNonNull(sniffRequestConfig, "sniffRequestConfig cannot be null"); + if (sniffRequestTimeout <=0) { + throw new IllegalArgumentException("sniffRequestTimeout must be greater than 0"); + } + Objects.requireNonNull(scheme, "scheme cannot be null"); + this.client = client; + this.sniffRequestConfig = sniffRequestConfig; + this.sniffRequestTimeout = sniffRequestTimeout; + this.scheme = scheme; + this.jsonFactory = new JsonFactory(); + } + + List sniffNodes(Node node) throws IOException { + HttpGet httpGet = new HttpGet("/_nodes/http?timeout=" + sniffRequestTimeout + "ms"); + httpGet.setConfig(sniffRequestConfig); + + try (CloseableHttpResponse response = client.execute(node.getHttpHost(), httpGet)) { + StatusLine statusLine = response.getStatusLine(); + if (statusLine.getStatusCode() >= 300) { + RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), node, statusLine); + EntityUtils.consume(response.getEntity()); + throw new ElasticsearchResponseException(httpGet.getRequestLine(), node, statusLine); + } else { + List nodes = readNodes(response.getEntity()); + RequestLogger.log(logger, "sniff succeeded", httpGet.getRequestLine(), node, statusLine); + return nodes; + } + } catch(IOException e) { + RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), node, e); + throw e; + } + } + + private List readNodes(HttpEntity entity) throws IOException { + try (InputStream inputStream = entity.getContent()) { + JsonParser parser = jsonFactory.createParser(inputStream); + if (parser.nextToken() != JsonToken.START_OBJECT) { + throw new IOException("expected data to start with an object"); + } + List nodes = new ArrayList<>(); + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if ("nodes".equals(parser.getCurrentName())) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + JsonToken token = parser.nextToken(); + assert token == JsonToken.START_OBJECT; + String nodeId = parser.getCurrentName(); + Node sniffedNode = readNode(nodeId, parser, this.scheme); + if (sniffedNode != null) { + logger.trace("adding node [" + nodeId + "]"); + nodes.add(sniffedNode); + } + } + } else { + parser.skipChildren(); + } + } + } + return nodes; + } + } + + private static Node readNode(String nodeId, JsonParser parser, String scheme) throws IOException { + HttpHost httpHost = null; + Set roles = new HashSet<>(); + Map attributes = new HashMap<>(); + String fieldName = null; + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { + fieldName = parser.getCurrentName(); + } else if (parser.getCurrentToken() == JsonToken.START_ARRAY && "roles".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_ARRAY) { + roles.add(Node.Role.valueOf(parser.getValueAsString().toUpperCase(Locale.ROOT))); + } + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if ("attributes".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + attributes.put(parser.getCurrentName(), parser.getValueAsString()); + } + } else if ("http".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING && + "publish_address".equals(parser.getCurrentName())) { + URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), + boundAddressAsURI.getScheme()); + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + parser.skipChildren(); + } + } + } else { + parser.skipChildren(); + } + } + } + //http section is not present if http is not enabled on the node, ignore such nodes + if (httpHost == null) { + logger.debug("skipping node [" + nodeId + "] with http disabled"); + return null; + } + return new Node(httpHost, roles, attributes); + } +} diff --git a/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java new file mode 100644 index 0000000000000..0488033c1ca20 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java @@ -0,0 +1,196 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.impl.client.CloseableHttpClient; + +import java.io.IOException; +import java.util.Iterator; +import java.util.List; +import java.util.Locale; +import java.util.Objects; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; +import java.util.function.Predicate; +import java.util.stream.Stream; + +public class SniffingConnectionPool extends AbstractStaticConnectionPool { + + private static final Log logger = LogFactory.getLog(SniffingConnectionPool.class); + + private final boolean sniffOnFailure; + private final Sniffer sniffer; + private volatile List connections; + private final SnifferTask snifferTask; + + public SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sniffAfterFailureDelay, + CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, Scheme scheme, + Predicate connectionSelector, Node... nodes) { + super(connectionSelector); + if (sniffInterval <= 0) { + throw new IllegalArgumentException("sniffInterval must be greater than 0"); + } + if (sniffAfterFailureDelay <= 0) { + throw new IllegalArgumentException("sniffAfterFailureDelay must be greater than 0"); + } + Objects.requireNonNull(scheme, "scheme cannot be null"); + if (nodes == null || nodes.length == 0) { + throw new IllegalArgumentException("no nodes provided"); + } + this.sniffOnFailure = sniffOnFailure; + this.sniffer = new Sniffer(client, sniffRequestConfig, sniffRequestTimeout, scheme.toString()); + this.connections = createConnections(nodes); + this.snifferTask = new SnifferTask(sniffInterval, sniffAfterFailureDelay); + } + + @Override + protected List getConnections() { + return this.connections; + } + + @Override + public void beforeAttempt(StatefulConnection connection) throws IOException { + + } + + @Override + public void onFailure(StatefulConnection connection) throws IOException { + super.onFailure(connection); + if (sniffOnFailure) { + //re-sniff immediately but take out the node that failed + snifferTask.sniffOnFailure(connection.getNode()); + } + } + + @Override + public void close() throws IOException { + snifferTask.shutdown(); + } + + public enum Scheme { + HTTP, HTTPS; + + @Override + public String toString() { + return name().toLowerCase(Locale.ROOT); + } + } + + private class SnifferTask implements Runnable { + private final int sniffInterval; + private final int sniffAfterFailureDelay; + private final ScheduledExecutorService scheduledExecutorService; + private final AtomicBoolean running = new AtomicBoolean(false); + private volatile boolean failure = false; + private volatile ScheduledFuture scheduledFuture; + + private SnifferTask(int sniffInterval, int sniffAfterFailureDelay) { + this.sniffInterval = sniffInterval; + this.sniffAfterFailureDelay = sniffAfterFailureDelay; + this.scheduledExecutorService = Executors.newScheduledThreadPool(1); + this.scheduledFuture = this.scheduledExecutorService.schedule(this, 0, TimeUnit.MILLISECONDS); + } + + @Override + public void run() { + sniff(node -> true); + } + + void sniffOnFailure(Node failedNode) { + //sync sniff straightaway on failure + failure = true; + sniff(node -> node.getHttpHost().equals(failedNode.getHttpHost()) == false); + } + + void sniff(Predicate nodeFilter) { + if (running.compareAndSet(false, true)) { + try { + Iterator connectionIterator = nextUnfilteredConnection().iterator(); + if (connectionIterator.hasNext()) { + sniff(connectionIterator, nodeFilter); + } else { + StatefulConnection connection = lastResortConnection(); + logger.info("no healthy nodes available, trying " + connection.getNode()); + sniff(Stream.of(connection).iterator(), nodeFilter); + } + } catch (Throwable t) { + logger.error("error while sniffing nodes", t); + } finally { + try { + //regardless of whether and when the next sniff is scheduled, cancel it and schedule a new one with updated delay + this.scheduledFuture.cancel(false); + if (this.failure) { + this.scheduledFuture = this.scheduledExecutorService.schedule(this, + sniffAfterFailureDelay, TimeUnit.MILLISECONDS); + this.failure = false; + } else { + this.scheduledFuture = this.scheduledExecutorService.schedule(this, sniffInterval, TimeUnit.MILLISECONDS); + } + } catch (Throwable t) { + logger.error("error while scheduling next sniffer task", t); + } finally { + running.set(false); + } + } + } + } + + void sniff(Iterator connectionIterator, Predicate nodeFilter) throws IOException { + IOException lastSeenException = null; + while (connectionIterator.hasNext()) { + StatefulConnection connection = connectionIterator.next(); + try { + List sniffedNodes = sniffer.sniffNodes(connection.getNode()); + Node[] filteredNodes = sniffedNodes.stream().filter(nodeFilter).toArray(Node[]::new); + logger.debug("adding " + filteredNodes.length + " nodes out of " + sniffedNodes.size() + " sniffed nodes"); + connections = createConnections(filteredNodes); + onSuccess(connection); + return; + } catch (IOException e) { + //here we have control over the request, if it fails something is really wrong, always call onFailure + onFailure(connection); + if (lastSeenException != null) { + e.addSuppressed(lastSeenException); + } + lastSeenException = e; + } + } + logger.warn("failed to sniff nodes", lastSeenException); + } + + void shutdown() { + scheduledExecutorService.shutdown(); + try { + if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) { + return; + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + scheduledExecutorService.shutdownNow(); + } + } +} diff --git a/client/src/main/java/org/elasticsearch/client/StatefulConnection.java b/client/src/main/java/org/elasticsearch/client/StatefulConnection.java new file mode 100644 index 0000000000000..1ac72382e1994 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/StatefulConnection.java @@ -0,0 +1,125 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.concurrent.TimeUnit; + +/** + * {@link Connection} subclass that has a mutable state, based on previous usage. + * When first created, a connection is in unknown state, till it is used for the first time + * and marked either dead or alive based on the outcome of the first usage. + * Should be marked alive when properly working. + * Should be marked dead when it caused a failure, in which case the connection may be retried some time later, + * as soon as {@link #shouldBeRetried()} returns true, which depends on how many consecutive failed attempts + * were counted and when the last one was registered. + * Should be marked resurrected if in dead state, as last resort in case there are no live connections available + * and none of the dead ones are ready to be retried yet. When marked resurrected, the number of failed attempts + * and its timeout is not reset so that if it gets marked dead again it returns to the exact state before resurrection. + */ +public final class StatefulConnection extends Connection { + //TODO make these values configurable through the connection pool? + private static final long DEFAULT_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(1); + private static final long MAX_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); + + private volatile State state = State.UNKNOWN; + private volatile int failedAttempts = -1; + private volatile long deadUntil = -1; + + /** + * Creates a new mutable connection pointing to the provided {@link Node} argument + */ + public StatefulConnection(Node node) { + super(node); + } + + /** + * Marks connection as dead. Should be called in case the corresponding node is not responding or caused failures. + * Once marked dead, the number of failed attempts will be incremented on each call to this method. A dead connection + * should be retried once {@link #shouldBeRetried()} returns true, which depends on the number of previous failed attempts + * and when the last failure was registered. + */ + void markDead() { + synchronized (this) { + int failedAttempts = Math.max(this.failedAttempts, 0); + long timeoutMillis = (long)Math.min(DEFAULT_CONNECTION_TIMEOUT_MILLIS * 2 * Math.pow(2, failedAttempts * 0.5 - 1), + MAX_CONNECTION_TIMEOUT_MILLIS); + this.deadUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeoutMillis); + this.failedAttempts = ++failedAttempts; + this.state = State.DEAD; + } + } + + /** + * Marks this connection alive. Should be called when the corresponding node is working properly. + * Will reset the number of failed attempts that were counted in case the connection was previously dead, + * as well as its dead timeout. + */ + void markAlive() { + if (this.state != State.ALIVE) { + synchronized (this) { + this.deadUntil = -1; + this.failedAttempts = 0; + this.state = State.ALIVE; + } + } + } + + /** + * Resets the connection to its initial state, so it will be retried. To be called when all the connections in the pool + * are dead, so that one connection can be retried. Note that calling this method only changes the state of the connection, + * it doesn't reset its failed attempts and dead until timestamp. That way if the connection goes back to dead straightaway + * all of its previous failed attempts are taken into account. + */ + void markResurrected() { + if (this.state == State.DEAD) { + synchronized (this) { + this.state = State.UNKNOWN; + } + } + } + + /** + * Returns the timestamp till the connection is supposed to stay dead till it can be retried + */ + public long getDeadUntil() { + return deadUntil; + } + + /** + * Returns true if the connection is alive, false otherwise. + */ + public boolean isAlive() { + return state == State.ALIVE; + } + + /** + * Returns true in case the connection is not alive but should be used/retried, false otherwise. + * Returns true in case the connection is in unknown state (never used before) or resurrected. When the connection is dead, + * returns true when it is time to retry it, depending on how many failed attempts were registered and when the last failure + * happened (minimum 1 minute, maximum 30 minutes). + */ + public boolean shouldBeRetried() { + return state == State.UNKNOWN || (state == State.DEAD && System.nanoTime() - deadUntil >= 0); + } + + private enum State { + UNKNOWN, DEAD, ALIVE + } +} diff --git a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java new file mode 100644 index 0000000000000..5fb32341c99a5 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java @@ -0,0 +1,93 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.StatusLine; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.util.EntityUtils; + +import java.io.IOException; +import java.util.List; +import java.util.Objects; +import java.util.function.Predicate; + +public class StaticConnectionPool extends AbstractStaticConnectionPool { + + private static final Log logger = LogFactory.getLog(StaticConnectionPool.class); + + private final CloseableHttpClient client; + private final boolean pingEnabled; + private final RequestConfig pingRequestConfig; + private final List connections; + + public StaticConnectionPool(CloseableHttpClient client, boolean pingEnabled, RequestConfig pingRequestConfig, + Predicate connectionSelector, Node... nodes) { + super(connectionSelector); + Objects.requireNonNull(client, "client cannot be null"); + Objects.requireNonNull(pingRequestConfig, "pingRequestConfig cannot be null"); + if (nodes == null || nodes.length == 0) { + throw new IllegalArgumentException("no nodes provided"); + } + this.client = client; + this.pingEnabled = pingEnabled; + this.pingRequestConfig = pingRequestConfig; + this.connections = createConnections(nodes); + } + + @Override + protected List getConnections() { + return connections; + } + + @Override + public void beforeAttempt(StatefulConnection connection) throws IOException { + if (pingEnabled && connection.shouldBeRetried()) { + HttpHead httpHead = new HttpHead("/"); + httpHead.setConfig(pingRequestConfig); + StatusLine statusLine; + try(CloseableHttpResponse httpResponse = client.execute(connection.getNode().getHttpHost(), httpHead)) { + statusLine = httpResponse.getStatusLine(); + EntityUtils.consume(httpResponse.getEntity()); + } catch(IOException e) { + RequestLogger.log(logger, "ping failed", httpHead.getRequestLine(), connection.getNode(), e); + onFailure(connection); + throw e; + } + if (statusLine.getStatusCode() >= 300) { + RequestLogger.log(logger, "ping failed", httpHead.getRequestLine(), connection.getNode(), statusLine); + onFailure(connection); + throw new ElasticsearchResponseException(httpHead.getRequestLine(), connection.getNode(), statusLine); + } else { + RequestLogger.log(logger, "ping succeeded", httpHead.getRequestLine(), connection.getNode(), statusLine); + onSuccess(connection); + } + } + } + + @Override + public void close() throws IOException { + //no-op nothing to close + } +} diff --git a/client/src/main/java/org/elasticsearch/client/Transport.java b/client/src/main/java/org/elasticsearch/client/Transport.java new file mode 100644 index 0000000000000..40f68ab61cc24 --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/Transport.java @@ -0,0 +1,207 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpEntity; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.util.EntityUtils; + +import java.io.Closeable; +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Iterator; +import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; + +final class Transport implements Closeable { + + private static final Log logger = LogFactory.getLog(Transport.class); + + private final CloseableHttpClient client; + private final ConnectionPool connectionPool; + private final long maxRetryTimeout; + + Transport(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { + Objects.requireNonNull(client, "client cannot be null"); + Objects.requireNonNull(connectionPool, "connectionPool cannot be null"); + if (maxRetryTimeout <= 0) { + throw new IllegalArgumentException("maxRetryTimeout must be greater than 0"); + } + this.client = client; + this.connectionPool = connectionPool; + this.maxRetryTimeout = maxRetryTimeout; + } + + ElasticsearchResponse performRequest(Verb verb, String endpoint, Map params, HttpEntity entity) throws IOException { + URI uri = buildUri(endpoint, params); + HttpRequestBase request = createHttpRequest(verb, uri, entity); + Iterator connectionIterator = connectionPool.nextConnection().iterator(); + if (connectionIterator.hasNext() == false) { + C connection = connectionPool.lastResortConnection(); + logger.info("no healthy nodes available, trying " + connection.getNode()); + return performRequest(request, Stream.of(connection).iterator()); + } + return performRequest(request, connectionIterator); + } + + private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator connectionIterator) throws IOException { + //we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt + long retryTimeout = Math.round(this.maxRetryTimeout / (float)100 * 98); + IOException lastSeenException = null; + long startTime = System.nanoTime(); + + while (connectionIterator.hasNext()) { + C connection = connectionIterator.next(); + + if (lastSeenException != null) { + long timeElapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); + long timeout = retryTimeout - timeElapsed; + if (timeout <= 0) { + RetryTimeoutException retryTimeoutException = new RetryTimeoutException( + "request retries exceeded max retry timeout [" + retryTimeout + "]"); + retryTimeoutException.addSuppressed(lastSeenException); + throw retryTimeoutException; + } + } + + try { + connectionPool.beforeAttempt(connection); + } catch(IOException e) { + lastSeenException = addSuppressedException(lastSeenException, e); + continue; + } + + try { + ElasticsearchResponse response = performRequest(request, connection); + connectionPool.onSuccess(connection); + return response; + } catch(ElasticsearchResponseException e) { + if (e.isRecoverable()) { + connectionPool.onFailure(connection); + lastSeenException = addSuppressedException(lastSeenException, e); + } else { + //don't retry and call onSuccess as the error should be a request problem + connectionPool.onSuccess(connection); + throw e; + } + } catch(IOException e) { + connectionPool.onFailure(connection); + lastSeenException = addSuppressedException(lastSeenException, e); + } + } + assert lastSeenException != null; + throw lastSeenException; + } + + private ElasticsearchResponse performRequest(HttpRequestBase request, C connection) throws IOException { + CloseableHttpResponse response; + try { + response = client.execute(connection.getNode().getHttpHost(), request); + } catch(IOException e) { + RequestLogger.log(logger, "request failed", request.getRequestLine(), connection.getNode(), e); + throw e; + } finally { + request.reset(); + } + StatusLine statusLine = response.getStatusLine(); + //TODO make ignore status code configurable. rest-spec and tests support that parameter. + if (statusLine.getStatusCode() < 300 || + request.getMethod().equals(HttpHead.METHOD_NAME) && statusLine.getStatusCode() == 404) { + RequestLogger.log(logger, "request succeeded", request.getRequestLine(), connection.getNode(), response.getStatusLine()); + return new ElasticsearchResponse(request.getRequestLine(), connection.getNode(), response); + } else { + EntityUtils.consume(response.getEntity()); + RequestLogger.log(logger, "request failed", request.getRequestLine(), connection.getNode(), response.getStatusLine()); + throw new ElasticsearchResponseException(request.getRequestLine(), connection.getNode(), statusLine); + } + } + + private static IOException addSuppressedException(IOException suppressedException, IOException currentException) { + if (suppressedException != null) { + currentException.addSuppressed(suppressedException); + } + return currentException; + } + + private static HttpRequestBase createHttpRequest(Verb verb, URI uri, HttpEntity entity) { + switch(verb) { + case DELETE: + HttpDeleteWithEntity httpDeleteWithEntity = new HttpDeleteWithEntity(uri); + addRequestBody(httpDeleteWithEntity, entity); + return httpDeleteWithEntity; + case GET: + HttpGetWithEntity httpGetWithEntity = new HttpGetWithEntity(uri); + addRequestBody(httpGetWithEntity, entity); + return httpGetWithEntity; + case HEAD: + if (entity != null) { + throw new UnsupportedOperationException("HEAD with body is not supported"); + } + return new HttpHead(uri); + case POST: + HttpPost httpPost = new HttpPost(uri); + addRequestBody(httpPost, entity); + return httpPost; + case PUT: + HttpPut httpPut = new HttpPut(uri); + addRequestBody(httpPut, entity); + return httpPut; + default: + throw new UnsupportedOperationException("http method not supported: " + verb); + } + } + + private static void addRequestBody(HttpEntityEnclosingRequestBase httpRequest, HttpEntity entity) { + if (entity != null) { + httpRequest.setEntity(entity); + } + } + + private static URI buildUri(String path, Map params) { + try { + URIBuilder uriBuilder = new URIBuilder(path); + for (Map.Entry param : params.entrySet()) { + uriBuilder.addParameter(param.getKey(), param.getValue().toString()); + } + return uriBuilder.build(); + } catch(URISyntaxException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } + } + + @Override + public void close() throws IOException { + connectionPool.close(); + client.close(); + } +} diff --git a/client/src/main/java/org/elasticsearch/client/Verb.java b/client/src/main/java/org/elasticsearch/client/Verb.java new file mode 100644 index 0000000000000..74a883539776e --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/Verb.java @@ -0,0 +1,27 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +/** + * Holds the http verbs/methods supported by elasticsearch, which can be used when sending a request + */ +public enum Verb { + DELETE, GET, HEAD, POST, PUT +} diff --git a/client/src/test/java/org/elasticsearch/client/NodeTests.java b/client/src/test/java/org/elasticsearch/client/NodeTests.java new file mode 100644 index 0000000000000..73bad0f6d7b18 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/NodeTests.java @@ -0,0 +1,136 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.http.HttpHost; +import org.apache.lucene.util.LuceneTestCase; + +import java.util.Arrays; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Map; +import java.util.Set; + +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.notNullValue; +import static org.hamcrest.CoreMatchers.sameInstance; + +public class NodeTests extends LuceneTestCase { + + public void testSingleArgumentConstructor() { + HttpHost httpHost = new HttpHost(randomHost(), randomPort(), randomScheme()); + Node node = new Node(httpHost); + assertThat(node.getHttpHost(), sameInstance(httpHost)); + assertThat(node.getAttributes(), notNullValue()); + assertThat(node.getAttributes().size(), equalTo(0)); + assertThat(node.getRoles(), notNullValue()); + assertThat(node.getRoles(), equalTo(new HashSet<>(Arrays.asList(Node.Role.values())))); + + try { + new Node(null); + fail("node construction should have failed"); + } catch(NullPointerException e) { + assertThat(e.getMessage(), equalTo("host cannot be null")); + } + + } + + public void testThreeArgumentsConstructor() { + HttpHost httpHost = new HttpHost(randomHost(), randomPort(), randomScheme()); + Set roles = randomRoles(); + Map attributes = randomAttributes(); + Node node = new Node(httpHost, roles, attributes); + assertThat(node.getHttpHost(), sameInstance(httpHost)); + assertThat(node.getAttributes(), equalTo(attributes)); + assertThat(node.getRoles(), equalTo(roles)); + + try { + new Node(null, roles, attributes); + fail("node construction should have failed"); + } catch(NullPointerException e) { + assertThat(e.getMessage(), equalTo("host cannot be null")); + } + + try { + new Node(httpHost, null, attributes); + fail("node construction should have failed"); + } catch(NullPointerException e) { + assertThat(e.getMessage(), equalTo("roles cannot be null")); + } + + try { + new Node(httpHost, roles, null); + fail("node construction should have failed"); + } catch(NullPointerException e) { + assertThat(e.getMessage(), equalTo("attributes cannot be null")); + } + } + + public void testToString() { + HttpHost httpHost = new HttpHost(randomHost(), randomPort(), randomScheme()); + Set roles = randomRoles(); + Map attributes = randomAttributes(); + Node node = new Node(httpHost, roles, attributes); + String expectedString = "Node{" + + "httpHost=" + httpHost.toString() + + ", roles=" + roles.toString() + + ", attributes=" + attributes.toString() + + '}'; + assertThat(node.toString(), equalTo(expectedString)); + } + + private static String randomHost() { + return RandomStrings.randomAsciiOfLengthBetween(random(), 5, 10); + } + + private static int randomPort() { + return random().nextInt(); + } + + private static String randomScheme() { + if (rarely()) { + return null; + } + return random().nextBoolean() ? "http" : "https"; + } + + private static Map randomAttributes() { + int numAttributes = RandomInts.randomIntBetween(random(), 0, 5); + Map attributes = new HashMap<>(numAttributes); + for (int i = 0; i < numAttributes; i++) { + String key = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); + String value = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); + attributes.put(key, value); + } + return attributes; + } + + private static Set randomRoles() { + int numRoles = RandomInts.randomIntBetween(random(), 0, 3); + Set roles = new HashSet<>(numRoles); + for (int j = 0; j < numRoles; j++) { + roles.add(RandomPicks.randomFrom(random(), Node.Role.values())); + } + return roles; + } +} diff --git a/client/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/src/test/java/org/elasticsearch/client/RestClientTests.java deleted file mode 100644 index 038cb8bab3c7d..0000000000000 --- a/client/src/test/java/org/elasticsearch/client/RestClientTests.java +++ /dev/null @@ -1,152 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.client; -import com.sun.net.httpserver.HttpExchange; -import com.sun.net.httpserver.HttpServer; -import org.apache.http.HttpHost; -import org.apache.http.HttpResponse; -import org.apache.lucene.util.LuceneTestCase; - -import java.io.IOException; -import java.io.OutputStream; -import java.net.InetSocketAddress; -import java.util.Arrays; -import java.util.Collections; -import java.util.List; - -public class RestClientTests extends LuceneTestCase { - //TODO this should be refactored into a base test!! - HttpServer server; - protected String clusterName = "elasticsearch"; - protected List additionalNodes = Collections.emptyList(); - - - public void setUp() throws Exception { - super.setUp(); - server = HttpServer.create(new InetSocketAddress(0), 0); - server.setExecutor(null); // creates a default executor - server.start(); - server.createContext("/", (t) -> { - handle("/", t); - }); - server.createContext("/_cat/nodes", (t) -> { - handle("/_cat/nodes", t); - }); - server.createContext("/_cat/health", (t) -> { - handle("/_cat/health", t); - }); - } - - protected void handle(String path, HttpExchange t) throws IOException { - final String response; - switch (path) { - case "/": - response = "{}"; - break; - case "/_cat/nodes": - StringBuilder builder = new StringBuilder( "127.0.0.1:" + server.getAddress().getPort() + " " + "d\n"); - for (String host : additionalNodes) { - builder.append(host).append("\n"); - } - response = builder.toString(); - break; - case "/_cat/health": - response = clusterName; - break; - default: - throw new IllegalArgumentException("no such handler " + path); - } - t.sendResponseHeaders(200, response.length()); - OutputStream os = t.getResponseBody(); - os.write(response.getBytes()); - os.close(); - } - - public void tearDown() throws Exception { - super.tearDown(); - server.stop(0); - } - - public void testGetClustername() throws IOException { - HttpHost httpHost = new HttpHost("127.0.0.1", server.getAddress().getPort(), "http"); - try(RestClient client = new RestClient(httpHost)) { - assertEquals(clusterName, client.getClusterName(httpHost)); - } - } - - public void testFetchNodes() throws IOException { - additionalNodes = Arrays.asList("127.0.0.2:9200 c", "127.0.0.3:9200 d"); - HttpHost httpHost = new HttpHost("127.0.0.1", server.getAddress().getPort(), "http"); - try(RestClient client = new RestClient(httpHost)) { - assertEquals(3, client.fetchNodes(httpHost, true, true, false).size()); - assertTrue(client.fetchNodes(httpHost, true, true, false).toString(), client.fetchNodes(httpHost, true, true, false) - .contains(new HttpHost("127.0.0.2", 9200, "http"))); - assertTrue(client.fetchNodes(httpHost, true, true, false).contains(new HttpHost("127.0.0.3", 9200, "http"))); - assertTrue(client.fetchNodes(httpHost, true, true, false).contains(httpHost)); - assertEquals(1, client.fetchNodes(httpHost, true, true, true).size()); - } - } - - public void testSimpleRetry() throws IOException{ - additionalNodes = Arrays.asList("127.0.0.2:9200 c", "127.0.0.3:9200 d"); - HttpHost httpHost = new HttpHost("127.0.0.1", server.getAddress().getPort(), "http"); - try(RestClient client = new RestClient(httpHost)) { - client.setNodes(client.fetchNodes(httpHost, true, true, false)); - HttpResponse httpResponse = client.httpGet("/_cat/health", Collections.emptyMap()); - assertEquals(httpResponse.getStatusLine().getStatusCode(), 200); - server.stop(0); - try { - client.httpGet("/_cat/health", Collections.emptyMap()); - fail(); - } catch (IOException ex) { - assertTrue(ex.getMessage(), ex.getMessage().endsWith("failed: connect timed out") || - ex.getMessage().endsWith("failed: Connection refused")); - } - } - } - - public void testBlacklist() throws IOException{ - additionalNodes = Arrays.asList("127.0.0.2:9200 c", "127.0.0.3:9200 d"); - HttpHost httpHost = new HttpHost("127.0.0.1", server.getAddress().getPort(), "http"); - try(RestClient client = new RestClient(httpHost)) { - client.setNodes(client.fetchNodes(httpHost, true, true, false)); - assertEquals(3, client.getNumHosts()); - assertEquals(0, client.getNumBlacklistedHosts()); - server.stop(0); - try { - client.httpGet("/_cat/health", Collections.emptyMap()); - fail(); - } catch (IOException ex) { - assertTrue(ex.getMessage(), ex.getMessage().endsWith("failed: connect timed out") || - ex.getMessage().endsWith("failed: Connection refused")); - } - assertEquals(3, client.getNumHosts()); - assertEquals(3, client.getNumBlacklistedHosts()); - int num = 0; - for (HttpHost host : client.getHostIterator(false)) { - num++; // nothing here - } - assertEquals(0, num); - for (HttpHost host : client.getHostIterator(true)) { - num++; // all there - we have to retry now - } - assertEquals(3, num); - } - } -} diff --git a/client/src/test/java/org/elasticsearch/client/SnifferTests.java b/client/src/test/java/org/elasticsearch/client/SnifferTests.java new file mode 100644 index 0000000000000..9169afd7695b9 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/SnifferTests.java @@ -0,0 +1,259 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonGenerator; +import okhttp3.mockwebserver.Dispatcher; +import okhttp3.mockwebserver.MockResponse; +import okhttp3.mockwebserver.MockWebServer; +import okhttp3.mockwebserver.RecordedRequest; +import org.apache.http.HttpHost; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.lucene.util.LuceneTestCase; +import org.junit.After; +import org.junit.Before; + +import java.io.IOException; +import java.io.StringWriter; +import java.io.UnsupportedEncodingException; +import java.net.URISyntaxException; +import java.net.URLDecoder; +import java.nio.charset.StandardCharsets; +import java.util.ArrayList; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.Iterator; +import java.util.List; +import java.util.Map; +import java.util.Set; +import java.util.logging.LogManager; + +import static org.hamcrest.CoreMatchers.containsString; +import static org.hamcrest.CoreMatchers.equalTo; + +public class SnifferTests extends LuceneTestCase { + + static { + //prevent MockWebServer from logging to stdout and stderr + LogManager.getLogManager().reset(); + } + + private int sniffRequestTimeout; + private SniffingConnectionPool.Scheme scheme; + private SniffResponse sniffResponse; + private MockWebServer server; + + @Before + public void startMockWebServer() throws IOException { + this.sniffRequestTimeout = RandomInts.randomIntBetween(random(), 1000, 10000); + this.scheme = RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()); + if (rarely()) { + this.sniffResponse = SniffResponse.buildFailure(); + } else { + this.sniffResponse = buildSniffResponse(scheme); + } + this.server = buildMockWebServer(sniffResponse, sniffRequestTimeout); + this.server.start(); + } + + @After + public void stopMockWebServer() throws IOException { + server.shutdown(); + } + + public void testSniffNodes() throws IOException, URISyntaxException { + CloseableHttpClient client = HttpClientBuilder.create().build(); + Sniffer sniffer = new Sniffer(client, RequestConfig.DEFAULT, sniffRequestTimeout, scheme.toString()); + HttpHost httpHost = new HttpHost(server.getHostName(), server.getPort()); + try { + List sniffedNodes = sniffer.sniffNodes(new Node(httpHost)); + if (sniffResponse.isFailure) { + fail("sniffNodes should have failed"); + } + assertThat(sniffedNodes.size(), equalTo(sniffResponse.nodes.size())); + Iterator responseNodesIterator = sniffResponse.nodes.iterator(); + for (Node sniffedNode : sniffedNodes) { + Node responseNode = responseNodesIterator.next(); + assertThat(sniffedNode.getHttpHost(), equalTo(responseNode.getHttpHost())); + assertThat(sniffedNode.getRoles(), equalTo(responseNode.getRoles())); + assertThat(sniffedNode.getAttributes(), equalTo(responseNode.getAttributes())); + } + } catch(ElasticsearchResponseException e) { + if (sniffResponse.isFailure) { + assertThat(e.getMessage(), containsString("GET http://localhost:" + server.getPort() + + "/_nodes/http?timeout=" + sniffRequestTimeout)); + assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); + assertThat(e.getNode().getHttpHost(), equalTo(httpHost)); + assertThat(e.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); + assertThat(e.getRequestLine().toString(), equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1")); + } else { + fail("sniffNodes should have succeeded: " + e.getStatusLine()); + } + } + } + + private static MockWebServer buildMockWebServer(SniffResponse sniffResponse, int sniffTimeout) throws UnsupportedEncodingException { + MockWebServer server = new MockWebServer(); + final Dispatcher dispatcher = new Dispatcher() { + @Override + public MockResponse dispatch(RecordedRequest request) throws InterruptedException { + String decodedUrl; + try { + decodedUrl = URLDecoder.decode(request.getPath(), StandardCharsets.UTF_8.name()); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + String sniffUrl = "/_nodes/http?timeout=" + sniffTimeout + "ms"; + if (sniffUrl.equals(decodedUrl)) { + return new MockResponse().setBody(sniffResponse.nodesInfoBody).setResponseCode(sniffResponse.nodesInfoResponseCode); + } else { + return new MockResponse().setResponseCode(404); + } + } + }; + server.setDispatcher(dispatcher); + return server; + } + + private static SniffResponse buildSniffResponse(SniffingConnectionPool.Scheme scheme) throws IOException { + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); + List nodes = new ArrayList<>(numNodes); + JsonFactory jsonFactory = new JsonFactory(); + StringWriter writer = new StringWriter(); + JsonGenerator generator = jsonFactory.createGenerator(writer); + generator.writeStartObject(); + if (random().nextBoolean()) { + generator.writeStringField("cluster_name", "elasticsearch"); + } + if (random().nextBoolean()) { + generator.writeObjectFieldStart("bogus_object"); + generator.writeEndObject(); + } + generator.writeObjectFieldStart("nodes"); + for (int i = 0; i < numNodes; i++) { + String nodeId = RandomStrings.randomAsciiOfLengthBetween(random(), 5, 10); + generator.writeObjectFieldStart(nodeId); + if (random().nextBoolean()) { + generator.writeObjectFieldStart("bogus_object"); + generator.writeEndObject(); + } + if (random().nextBoolean()) { + generator.writeArrayFieldStart("bogus_array"); + generator.writeStartObject(); + generator.writeEndObject(); + generator.writeEndArray(); + } + boolean isHttpEnabled = rarely() == false; + int numRoles = RandomInts.randomIntBetween(random(), 0, 3); + Set nodeRoles = new HashSet<>(numRoles); + for (int j = 0; j < numRoles; j++) { + Node.Role role; + do { + role = RandomPicks.randomFrom(random(), Node.Role.values()); + } while(nodeRoles.add(role) == false); + } + + int numAttributes = RandomInts.randomIntBetween(random(), 0, 3); + Map attributes = new HashMap<>(numAttributes); + for (int j = 0; j < numAttributes; j++) { + attributes.put("attr" + j, "value" + j); + } + if (isHttpEnabled) { + String host = "host" + i; + int port = RandomInts.randomIntBetween(random(), 9200, 9299); + HttpHost httpHost = new HttpHost(host, port, scheme.toString()); + nodes.add(new Node(httpHost, nodeRoles, attributes)); + generator.writeObjectFieldStart("http"); + if (random().nextBoolean()) { + generator.writeArrayFieldStart("bound_address"); + generator.writeString("[fe80::1]:" + port); + generator.writeString("[::1]:" + port); + generator.writeString("127.0.0.1:" + port); + generator.writeEndArray(); + } + if (random().nextBoolean()) { + generator.writeObjectFieldStart("bogus_object"); + generator.writeEndObject(); + } + generator.writeStringField("publish_address", httpHost.toHostString()); + if (random().nextBoolean()) { + generator.writeNumberField("max_content_length_in_bytes", 104857600); + } + generator.writeEndObject(); + } + generator.writeArrayFieldStart("roles"); + for (Node.Role nodeRole : nodeRoles) { + generator.writeString(nodeRole.toString()); + } + generator.writeEndArray(); + if (numAttributes > 0) { + generator.writeObjectFieldStart("attributes"); + } + for (Map.Entry entry : attributes.entrySet()) { + generator.writeStringField(entry.getKey(), entry.getValue()); + } + if (numAttributes > 0) { + generator.writeEndObject(); + } + generator.writeEndObject(); + } + generator.writeEndObject(); + generator.writeEndObject(); + generator.close(); + return SniffResponse.buildResponse(writer.toString(), nodes); + } + + private static class SniffResponse { + private final String nodesInfoBody; + private final int nodesInfoResponseCode; + private final List nodes; + private final boolean isFailure; + + SniffResponse(String nodesInfoBody, List nodes, boolean isFailure) { + this.nodesInfoBody = nodesInfoBody; + this.nodes = nodes; + this.isFailure = isFailure; + if (isFailure) { + this.nodesInfoResponseCode = randomErrorResponseCode(); + } else { + this.nodesInfoResponseCode = 200; + } + } + + static SniffResponse buildFailure() { + return new SniffResponse("", Collections.emptyList(), true); + } + + static SniffResponse buildResponse(String nodesInfoBody, List nodes) { + return new SniffResponse(nodesInfoBody, nodes, false); + } + } + + private static int randomErrorResponseCode() { + return RandomInts.randomIntBetween(random(), 400, 599); + } +} diff --git a/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java new file mode 100644 index 0000000000000..13eaadd619a61 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java @@ -0,0 +1,147 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.http.HttpHost; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.lucene.util.LuceneTestCase; + +import java.util.logging.LogManager; + +public class SniffingConnectionPoolTests extends LuceneTestCase { + + static { + LogManager.getLogManager().reset(); + } + + public void testConstructor() throws Exception { + CloseableHttpClient httpClient = HttpClientBuilder.create().build(); + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); + Node[] nodes = new Node[numNodes]; + for (int i = 0; i < numNodes; i++) { + nodes[i] = new Node(new HttpHost("localhost", 9200)); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + + fail("pool creation should have failed " + connectionPool); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffInterval must be greater than 0"); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + fail("pool creation should have failed " + connectionPool); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), null, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + fail("pool creation should have failed " + connectionPool); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "client cannot be null"); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, null, + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + fail("pool creation should have failed " + connectionPool); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "sniffRequestConfig cannot be null"); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + fail("pool creation should have failed " + connectionPool); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), null, nodes)) { + fail("pool creation should have failed " + connectionPool); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "connection selector predicate cannot be null"); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), + connection -> random().nextBoolean(), (Node[])null)) { + fail("pool creation should have failed " + connectionPool); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no nodes provided"); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), + (Node)null)) { + fail("pool creation should have failed " + connectionPool); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "node cannot be null"); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean())) { + fail("pool creation should have failed " + connectionPool); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no nodes provided"); + } + + try (SniffingConnectionPool sniffingConnectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + assertNotNull(sniffingConnectionPool); + } + } +} diff --git a/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java new file mode 100644 index 0000000000000..9b5b29df1b165 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java @@ -0,0 +1,87 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.HttpHost; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.lucene.util.LuceneTestCase; + +import java.util.logging.LogManager; + +public class StaticConnectionPoolTests extends LuceneTestCase { + + static { + LogManager.getLogManager().reset(); + } + + public void testConstructor() { + CloseableHttpClient httpClient = HttpClientBuilder.create().build(); + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); + Node[] nodes = new Node[numNodes]; + for (int i = 0; i < numNodes; i++) { + nodes[i] = new Node(new HttpHost("localhost", 9200)); + } + + try { + new StaticConnectionPool(null, random().nextBoolean(), RequestConfig.DEFAULT, connection -> random().nextBoolean(), nodes); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "client cannot be null"); + } + + try { + new StaticConnectionPool(httpClient, random().nextBoolean(), null, connection -> random().nextBoolean(), nodes); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "pingRequestConfig cannot be null"); + } + + try { + new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, null, nodes); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "connection selector predicate cannot be null"); + } + + try { + new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, + connection -> random().nextBoolean(), (Node)null); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "node cannot be null"); + } + + try { + new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, + connection -> random().nextBoolean(), (Node[])null); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no nodes provided"); + } + + try { + new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, connection -> random().nextBoolean()); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no nodes provided"); + } + + StaticConnectionPool staticConnectionPool = new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, + connection -> random().nextBoolean(), nodes); + assertNotNull(staticConnectionPool); + } +} diff --git a/client/src/test/java/org/elasticsearch/client/TransportTests.java b/client/src/test/java/org/elasticsearch/client/TransportTests.java new file mode 100644 index 0000000000000..591eb516c59d0 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/TransportTests.java @@ -0,0 +1,95 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.lucene.util.LuceneTestCase; + +import java.io.IOException; +import java.util.logging.LogManager; +import java.util.stream.Stream; + +public class TransportTests extends LuceneTestCase { + + static { + LogManager.getLogManager().reset(); + } + + public void testConstructor() { + CloseableHttpClient httpClient = HttpClientBuilder.create().build(); + ConnectionPool connectionPool = new ConnectionPool() { + @Override + public Stream nextConnection() { + return null; + } + + @Override + public Connection lastResortConnection() { + return null; + } + + @Override + public void beforeAttempt(Connection connection) throws IOException { + + } + + @Override + public void onSuccess(Connection connection) { + + } + + @Override + public void onFailure(Connection connection) throws IOException { + + } + + @Override + public void close() throws IOException { + + } + }; + + try { + new Transport<>(null, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + fail("transport creation should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "client cannot be null"); + } + + try { + new Transport<>(httpClient, null, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + fail("transport creation should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "connectionPool cannot be null"); + } + + try { + new Transport<>(httpClient, connectionPool, RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("transport creation should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "maxRetryTimeout must be greater than 0"); + } + + Transport transport = new Transport<>(httpClient, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + assertNotNull(transport); + } +} From 25892351e7d7f6739dacc04db0c59308b7879c88 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 May 2016 15:43:00 +0200 Subject: [PATCH 005/103] remove Verb enum --- .../org/elasticsearch/client/Transport.java | 21 ++++++++++--------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/Transport.java b/client/src/main/java/org/elasticsearch/client/Transport.java index 40f68ab61cc24..8898cf6714f75 100644 --- a/client/src/main/java/org/elasticsearch/client/Transport.java +++ b/client/src/main/java/org/elasticsearch/client/Transport.java @@ -38,6 +38,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.Iterator; +import java.util.Locale; import java.util.Map; import java.util.Objects; import java.util.concurrent.TimeUnit; @@ -62,9 +63,9 @@ final class Transport implements Closeable { this.maxRetryTimeout = maxRetryTimeout; } - ElasticsearchResponse performRequest(Verb verb, String endpoint, Map params, HttpEntity entity) throws IOException { + ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity) throws IOException { URI uri = buildUri(endpoint, params); - HttpRequestBase request = createHttpRequest(verb, uri, entity); + HttpRequestBase request = createHttpRequest(method, uri, entity); Iterator connectionIterator = connectionPool.nextConnection().iterator(); if (connectionIterator.hasNext() == false) { C connection = connectionPool.lastResortConnection(); @@ -153,31 +154,31 @@ private static IOException addSuppressedException(IOException suppressedExceptio return currentException; } - private static HttpRequestBase createHttpRequest(Verb verb, URI uri, HttpEntity entity) { - switch(verb) { - case DELETE: + private static HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { + switch(method.toUpperCase(Locale.ROOT)) { + case HttpDeleteWithEntity.METHOD_NAME: HttpDeleteWithEntity httpDeleteWithEntity = new HttpDeleteWithEntity(uri); addRequestBody(httpDeleteWithEntity, entity); return httpDeleteWithEntity; - case GET: + case HttpGetWithEntity.METHOD_NAME: HttpGetWithEntity httpGetWithEntity = new HttpGetWithEntity(uri); addRequestBody(httpGetWithEntity, entity); return httpGetWithEntity; - case HEAD: + case HttpHead.METHOD_NAME: if (entity != null) { throw new UnsupportedOperationException("HEAD with body is not supported"); } return new HttpHead(uri); - case POST: + case HttpPost.METHOD_NAME: HttpPost httpPost = new HttpPost(uri); addRequestBody(httpPost, entity); return httpPost; - case PUT: + case HttpPut.METHOD_NAME: HttpPut httpPut = new HttpPut(uri); addRequestBody(httpPut, entity); return httpPut; default: - throw new UnsupportedOperationException("http method not supported: " + verb); + throw new UnsupportedOperationException("http method not supported: " + method); } } From 94cf8437d0b2b729fba10590e15c052ba571b391 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 May 2016 15:47:38 +0200 Subject: [PATCH 006/103] get rid of retry timeout exception --- .../org/elasticsearch/client/RestClient.java | 4 +-- .../client/RetryTimeoutException.java | 29 ------------------- .../org/elasticsearch/client/Transport.java | 2 +- .../java/org/elasticsearch/client/Verb.java | 27 ----------------- 4 files changed, 3 insertions(+), 59 deletions(-) delete mode 100644 client/src/main/java/org/elasticsearch/client/RetryTimeoutException.java delete mode 100644 client/src/main/java/org/elasticsearch/client/Verb.java diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index a91501196675a..35af27232677a 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -33,9 +33,9 @@ public RestClient(CloseableHttpClient client, ConnectionPool(client, connectionPool, maxRetryTimeout); } - public ElasticsearchResponse performRequest(Verb verb, String endpoint, Map params, HttpEntity entity) + public ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity) throws IOException { - return transport.performRequest(verb, endpoint, params, entity); + return transport.performRequest(method, endpoint, params, entity); } @Override diff --git a/client/src/main/java/org/elasticsearch/client/RetryTimeoutException.java b/client/src/main/java/org/elasticsearch/client/RetryTimeoutException.java deleted file mode 100644 index 632597bfa37bc..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/RetryTimeoutException.java +++ /dev/null @@ -1,29 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import java.io.IOException; - -public class RetryTimeoutException extends IOException { - - RetryTimeoutException(String message) { - super(message); - } -} diff --git a/client/src/main/java/org/elasticsearch/client/Transport.java b/client/src/main/java/org/elasticsearch/client/Transport.java index 8898cf6714f75..88bf1bf6c7aa5 100644 --- a/client/src/main/java/org/elasticsearch/client/Transport.java +++ b/client/src/main/java/org/elasticsearch/client/Transport.java @@ -88,7 +88,7 @@ private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator Date: Wed, 4 May 2016 16:27:52 +0200 Subject: [PATCH 007/103] get rid of Node abstraction --- .../client/AbstractStaticConnectionPool.java | 13 +- .../org/elasticsearch/client/Connection.java | 18 +-- .../client/ElasticsearchResponse.java | 20 +-- .../ElasticsearchResponseException.java | 32 +++-- .../java/org/elasticsearch/client/Node.java | 125 ---------------- .../elasticsearch/client/RequestLogger.java | 13 +- .../org/elasticsearch/client/Sniffer.java | 54 +++---- .../client/SniffingConnectionPool.java | 29 ++-- .../client/StatefulConnection.java | 8 +- .../client/StaticConnectionPool.java | 19 +-- .../org/elasticsearch/client/Transport.java | 14 +- .../org/elasticsearch/client/NodeTests.java | 136 ------------------ .../elasticsearch/client/SnifferTests.java | 63 ++++---- .../client/SniffingConnectionPoolTests.java | 28 ++-- .../client/StaticConnectionPoolTests.java | 22 +-- 15 files changed, 171 insertions(+), 423 deletions(-) delete mode 100644 client/src/main/java/org/elasticsearch/client/Node.java delete mode 100644 client/src/test/java/org/elasticsearch/client/NodeTests.java diff --git a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java index f30a2b9c5c1a9..c043dd1705954 100644 --- a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java @@ -21,6 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpHost; import java.io.IOException; import java.util.ArrayList; @@ -74,11 +75,11 @@ protected final Stream nextUnfilteredConnection() { return sortedConnections.stream().filter(connection -> connection.isAlive() || connection.shouldBeRetried()); } - protected List createConnections(Node... nodes) { + protected List createConnections(HttpHost... hosts) { List connections = new ArrayList<>(); - for (Node node : nodes) { - Objects.requireNonNull(node, "node cannot be null"); - connections.add(new StatefulConnection(node)); + for (HttpHost host : hosts) { + Objects.requireNonNull(host, "host cannot be null"); + connections.add(new StatefulConnection(host)); } return Collections.unmodifiableList(connections); } @@ -94,12 +95,12 @@ public StatefulConnection lastResortConnection() { @Override public void onSuccess(StatefulConnection connection) { connection.markAlive(); - logger.trace("marked connection alive for " + connection.getNode()); + logger.trace("marked connection alive for " + connection.getHost()); } @Override public void onFailure(StatefulConnection connection) throws IOException { connection.markDead(); - logger.debug("marked connection dead for " + connection.getNode()); + logger.debug("marked connection dead for " + connection.getHost()); } } diff --git a/client/src/main/java/org/elasticsearch/client/Connection.java b/client/src/main/java/org/elasticsearch/client/Connection.java index 225c812ebfaac..51a430b254053 100644 --- a/client/src/main/java/org/elasticsearch/client/Connection.java +++ b/client/src/main/java/org/elasticsearch/client/Connection.java @@ -19,27 +19,29 @@ package org.elasticsearch.client; +import org.apache.http.HttpHost; + /** * Simplest representation of a connection to an elasticsearch node. - * It doesn't have any mutable state. It holds the node that the connection points to. + * It doesn't have any mutable state. It holds the host that the connection points to. * Allows the transport to deal with very simple connection objects that are immutable. * Any change to the state of connections should be made through the connection pool * which is aware of the connection object that it supports. */ public class Connection { - private final Node node; + private final HttpHost host; /** - * Creates a new connection pointing to the provided {@link Node} argument + * Creates a new connection pointing to the provided {@link HttpHost} argument */ - public Connection(Node node) { - this.node = node; + public Connection(HttpHost host) { + this.host = host; } /** - * Returns the {@link Node} that the connection points to + * Returns the {@link HttpHost} that the connection points to */ - public Node getNode() { - return node; + public HttpHost getHost() { + return host; } } diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java index 13c92c164535c..124a233dea7ff 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java @@ -21,6 +21,7 @@ import org.apache.http.Header; import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; import org.apache.http.RequestLine; import org.apache.http.StatusLine; import org.apache.http.client.methods.CloseableHttpResponse; @@ -31,20 +32,20 @@ /** * Holds an elasticsearch response. It wraps the {@link CloseableHttpResponse} response and associates it with - * its corresponding {@link RequestLine} and {@link Node} + * its corresponding {@link RequestLine} and {@link HttpHost} */ public class ElasticsearchResponse implements Closeable { private final RequestLine requestLine; - private final Node node; + private final HttpHost host; private final CloseableHttpResponse response; - ElasticsearchResponse(RequestLine requestLine, Node node, CloseableHttpResponse response) { + ElasticsearchResponse(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) { Objects.requireNonNull(requestLine, "requestLine cannot be null"); - Objects.requireNonNull(node, "node cannot be null"); + Objects.requireNonNull(host, "node cannot be null"); Objects.requireNonNull(response, "response cannot be null"); this.requestLine = requestLine; - this.node = node; + this.host = host; this.response = response; } @@ -58,8 +59,8 @@ public RequestLine getRequestLine() { /** * Returns the node that returned this response */ - public Node getNode() { - return node; + public HttpHost getHost() { + return host; } /** @@ -77,7 +78,8 @@ public Header[] getHeaders() { } /** - * Returns the response bodyi available, null otherwise + * Returns the response body available, null otherwise + * @see HttpEntity */ public HttpEntity getEntity() { return response.getEntity(); @@ -87,7 +89,7 @@ public HttpEntity getEntity() { public String toString() { return "ElasticsearchResponse{" + "requestLine=" + requestLine + - ", node=" + node + + ", host=" + host + ", response=" + response.getStatusLine() + '}'; } diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java index 6c2ef2d2e0d48..9f4b123bc8c95 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java @@ -19,41 +19,57 @@ package org.elasticsearch.client; +import org.apache.http.HttpHost; import org.apache.http.RequestLine; import org.apache.http.StatusLine; import java.io.IOException; +/** + * Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error + */ public class ElasticsearchResponseException extends IOException { - private final Node node; + private final HttpHost host; private final RequestLine requestLine; private final StatusLine statusLine; - ElasticsearchResponseException(RequestLine requestLine, Node node, StatusLine statusLine) { - super(buildMessage(requestLine, node, statusLine)); - this.node = node; + ElasticsearchResponseException(RequestLine requestLine, HttpHost host, StatusLine statusLine) { + super(buildMessage(requestLine, host, statusLine)); + this.host = host; this.requestLine = requestLine; this.statusLine = statusLine; } - private static String buildMessage(RequestLine requestLine, Node node, StatusLine statusLine) { - return requestLine.getMethod() + " " + node.getHttpHost() + requestLine.getUri() + ": " + statusLine.toString(); + private static String buildMessage(RequestLine requestLine, HttpHost host, StatusLine statusLine) { + return requestLine.getMethod() + " " + host + requestLine.getUri() + ": " + statusLine.toString(); } + /** + * Returns whether the error is recoverable or not, hence whether the same request should be retried on other nodes or not + */ public boolean isRecoverable() { //clients don't retry on 500 because elasticsearch still misuses it instead of 400 in some places return statusLine.getStatusCode() >= 502 && statusLine.getStatusCode() <= 504; } - public Node getNode() { - return node; + /** + * Returns the {@link HttpHost} that returned the error + */ + public HttpHost getHost() { + return host; } + /** + * Returns the {@link RequestLine} that triggered the error + */ public RequestLine getRequestLine() { return requestLine; } + /** + * Returns the {@link StatusLine} that was returned by elasticsearch + */ public StatusLine getStatusLine() { return statusLine; } diff --git a/client/src/main/java/org/elasticsearch/client/Node.java b/client/src/main/java/org/elasticsearch/client/Node.java deleted file mode 100644 index ab6d3631527f2..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/Node.java +++ /dev/null @@ -1,125 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import org.apache.http.HttpHost; - -import java.util.Arrays; -import java.util.Collections; -import java.util.HashSet; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.Set; - -/** - * Represents an Elasticsearch node. - * Holds its http address as an {@link HttpHost} instance, as well as its optional set of roles and attributes. - * Roles and attributes can be populated in one of the two following ways: - * 1) using a connection pool that supports sniffing, so that all the info is retrieved from elasticsearch itself - * 2) manually passing the info through the {@link #Node(HttpHost, Set, Map)} constructor - * Roles and attributes may be taken into account as part of connection selection by the connection pool, which - * can be customized by passing in a predicate at connection pool creation. - */ -public class Node { - private final HttpHost httpHost; - private final Set roles; - private final Map attributes; - - /** - * Creates a node given its http address as an {@link HttpHost} instance. - * Roles are not provided hence all possible roles will be assumed, as that is the default in Elasticsearch. - * No attributes will be associated with the node. - * - * @param httpHost the http address of the node - */ - public Node(HttpHost httpHost) { - this(httpHost, new HashSet<>(Arrays.asList(Role.values())), Collections.emptyMap()); - } - - /** - * Creates a node given its http address as an {@link HttpHost} instance, its set or roles and attributes. - * - * @param httpHost the http address of the node - * @param roles the set of roles that the node fulfills within the cluster - * @param attributes the attributes associated with the node - */ - public Node(HttpHost httpHost, Set roles, Map attributes) { - Objects.requireNonNull(httpHost, "host cannot be null"); - Objects.requireNonNull(roles, "roles cannot be null"); - Objects.requireNonNull(attributes, "attributes cannot be null"); - this.httpHost = httpHost; - this.roles = Collections.unmodifiableSet(roles); - this.attributes = Collections.unmodifiableMap(attributes); - } - - /** - * Returns the http address of the node - */ - public HttpHost getHttpHost() { - return httpHost; - } - - /** - * Returns the set of roles associated with the node - */ - public Set getRoles() { - return roles; - } - - /** - * Returns the set of attributes associated with the node - */ - public Map getAttributes() { - return attributes; - } - - @Override - public String toString() { - return "Node{" + - "httpHost=" + httpHost + - ", roles=" + roles + - ", attributes=" + attributes + - '}'; - } - - /** - * Holds all the potential roles that a node can fulfill within a cluster - */ - public enum Role { - /** - * Data node - */ - DATA, - /** - * Master eligible node - */ - MASTER, - /** - * Ingest node - */ - INGEST; - - @Override - public String toString() { - return name().toLowerCase(Locale.ROOT); - } - } -} diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java index 8423dcb8adeb0..99ce924c16456 100644 --- a/client/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -20,13 +20,14 @@ package org.elasticsearch.client; import org.apache.commons.logging.Log; +import org.apache.http.HttpHost; import org.apache.http.RequestLine; import org.apache.http.StatusLine; import java.io.IOException; /** - * Helper class that exposes static method to unify the way requests are logged + * Helper class that exposes static methods to unify the way requests are logged */ public final class RequestLogger { @@ -36,16 +37,14 @@ private RequestLogger() { /** * Logs a request that yielded a response */ - public static void log(Log logger, String message, RequestLine requestLine, Node node, StatusLine statusLine) { - logger.debug(message + " [" + requestLine.getMethod() + " " + node.getHttpHost() + - requestLine.getUri() + "] [" + statusLine + "]"); + public static void log(Log logger, String message, RequestLine requestLine, HttpHost host, StatusLine statusLine) { + logger.debug(message + " [" + requestLine.getMethod() + " " + host + requestLine.getUri() + "] [" + statusLine + "]"); } /** * Logs a request that failed */ - public static void log(Log logger, String message, RequestLine requestLine, Node node, IOException e) { - logger.debug(message + " [" + requestLine.getMethod() + " " + node.getHttpHost() + - requestLine.getUri() + "]", e); + public static void log(Log logger, String message, RequestLine requestLine, HttpHost host, IOException e) { + logger.debug(message + " [" + requestLine.getMethod() + " " + host + requestLine.getUri() + "]", e); } } diff --git a/client/src/main/java/org/elasticsearch/client/Sniffer.java b/client/src/main/java/org/elasticsearch/client/Sniffer.java index 07d485de87916..1bec4eb87e1e2 100644 --- a/client/src/main/java/org/elasticsearch/client/Sniffer.java +++ b/client/src/main/java/org/elasticsearch/client/Sniffer.java @@ -37,14 +37,13 @@ import java.io.InputStream; import java.net.URI; import java.util.ArrayList; -import java.util.HashMap; -import java.util.HashSet; import java.util.List; -import java.util.Locale; -import java.util.Map; import java.util.Objects; -import java.util.Set; +/** + * Calls nodes info api and returns a list of http hosts extracted from it + */ +//TODO this could potentially a call to _cat/nodes (although it doesn't support timeout param), but how would we handle bw comp with 2.x? final class Sniffer { private static final Log logger = LogFactory.getLog(Sniffer.class); @@ -69,34 +68,34 @@ final class Sniffer { this.jsonFactory = new JsonFactory(); } - List sniffNodes(Node node) throws IOException { + List sniffNodes(HttpHost host) throws IOException { HttpGet httpGet = new HttpGet("/_nodes/http?timeout=" + sniffRequestTimeout + "ms"); httpGet.setConfig(sniffRequestConfig); - try (CloseableHttpResponse response = client.execute(node.getHttpHost(), httpGet)) { + try (CloseableHttpResponse response = client.execute(host, httpGet)) { StatusLine statusLine = response.getStatusLine(); if (statusLine.getStatusCode() >= 300) { - RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), node, statusLine); + RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), host, statusLine); EntityUtils.consume(response.getEntity()); - throw new ElasticsearchResponseException(httpGet.getRequestLine(), node, statusLine); + throw new ElasticsearchResponseException(httpGet.getRequestLine(), host, statusLine); } else { - List nodes = readNodes(response.getEntity()); - RequestLogger.log(logger, "sniff succeeded", httpGet.getRequestLine(), node, statusLine); + List nodes = readHosts(response.getEntity()); + RequestLogger.log(logger, "sniff succeeded", httpGet.getRequestLine(), host, statusLine); return nodes; } } catch(IOException e) { - RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), node, e); + RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), host, e); throw e; } } - private List readNodes(HttpEntity entity) throws IOException { + private List readHosts(HttpEntity entity) throws IOException { try (InputStream inputStream = entity.getContent()) { JsonParser parser = jsonFactory.createParser(inputStream); if (parser.nextToken() != JsonToken.START_OBJECT) { throw new IOException("expected data to start with an object"); } - List nodes = new ArrayList<>(); + List hosts = new ArrayList<>(); while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.START_OBJECT) { if ("nodes".equals(parser.getCurrentName())) { @@ -104,10 +103,10 @@ private List readNodes(HttpEntity entity) throws IOException { JsonToken token = parser.nextToken(); assert token == JsonToken.START_OBJECT; String nodeId = parser.getCurrentName(); - Node sniffedNode = readNode(nodeId, parser, this.scheme); - if (sniffedNode != null) { + HttpHost sniffedHost = readNode(nodeId, parser, this.scheme); + if (sniffedHost != null) { logger.trace("adding node [" + nodeId + "]"); - nodes.add(sniffedNode); + hosts.add(sniffedHost); } } } else { @@ -115,31 +114,20 @@ private List readNodes(HttpEntity entity) throws IOException { } } } - return nodes; + return hosts; } } - private static Node readNode(String nodeId, JsonParser parser, String scheme) throws IOException { + private static HttpHost readNode(String nodeId, JsonParser parser, String scheme) throws IOException { HttpHost httpHost = null; - Set roles = new HashSet<>(); - Map attributes = new HashMap<>(); String fieldName = null; while (parser.nextToken() != JsonToken.END_OBJECT) { if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { fieldName = parser.getCurrentName(); - } else if (parser.getCurrentToken() == JsonToken.START_ARRAY && "roles".equals(fieldName)) { - while (parser.nextToken() != JsonToken.END_ARRAY) { - roles.add(Node.Role.valueOf(parser.getValueAsString().toUpperCase(Locale.ROOT))); - } } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { - if ("attributes".equals(fieldName)) { - while (parser.nextToken() != JsonToken.END_OBJECT) { - attributes.put(parser.getCurrentName(), parser.getValueAsString()); - } - } else if ("http".equals(fieldName)) { + if ("http".equals(fieldName)) { while (parser.nextToken() != JsonToken.END_OBJECT) { - if (parser.getCurrentToken() == JsonToken.VALUE_STRING && - "publish_address".equals(parser.getCurrentName())) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), boundAddressAsURI.getScheme()); @@ -157,6 +145,6 @@ private static Node readNode(String nodeId, JsonParser parser, String scheme) th logger.debug("skipping node [" + nodeId + "] with http disabled"); return null; } - return new Node(httpHost, roles, attributes); + return httpHost; } } diff --git a/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java index 0488033c1ca20..25d17b45ee586 100644 --- a/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java @@ -21,6 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.impl.client.CloseableHttpClient; @@ -48,7 +49,7 @@ public class SniffingConnectionPool extends AbstractStaticConnectionPool { public SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sniffAfterFailureDelay, CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, Scheme scheme, - Predicate connectionSelector, Node... nodes) { + Predicate connectionSelector, HttpHost... hosts) { super(connectionSelector); if (sniffInterval <= 0) { throw new IllegalArgumentException("sniffInterval must be greater than 0"); @@ -57,12 +58,12 @@ public SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sni throw new IllegalArgumentException("sniffAfterFailureDelay must be greater than 0"); } Objects.requireNonNull(scheme, "scheme cannot be null"); - if (nodes == null || nodes.length == 0) { - throw new IllegalArgumentException("no nodes provided"); + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("no hosts provided"); } this.sniffOnFailure = sniffOnFailure; this.sniffer = new Sniffer(client, sniffRequestConfig, sniffRequestTimeout, scheme.toString()); - this.connections = createConnections(nodes); + this.connections = createConnections(hosts); this.snifferTask = new SnifferTask(sniffInterval, sniffAfterFailureDelay); } @@ -81,7 +82,7 @@ public void onFailure(StatefulConnection connection) throws IOException { super.onFailure(connection); if (sniffOnFailure) { //re-sniff immediately but take out the node that failed - snifferTask.sniffOnFailure(connection.getNode()); + snifferTask.sniffOnFailure(connection.getHost()); } } @@ -119,22 +120,22 @@ public void run() { sniff(node -> true); } - void sniffOnFailure(Node failedNode) { + void sniffOnFailure(HttpHost failedHost) { //sync sniff straightaway on failure failure = true; - sniff(node -> node.getHttpHost().equals(failedNode.getHttpHost()) == false); + sniff(host -> host.equals(failedHost) == false); } - void sniff(Predicate nodeFilter) { + void sniff(Predicate hostFilter) { if (running.compareAndSet(false, true)) { try { Iterator connectionIterator = nextUnfilteredConnection().iterator(); if (connectionIterator.hasNext()) { - sniff(connectionIterator, nodeFilter); + sniff(connectionIterator, hostFilter); } else { StatefulConnection connection = lastResortConnection(); - logger.info("no healthy nodes available, trying " + connection.getNode()); - sniff(Stream.of(connection).iterator(), nodeFilter); + logger.info("no healthy nodes available, trying " + connection.getHost()); + sniff(Stream.of(connection).iterator(), hostFilter); } } catch (Throwable t) { logger.error("error while sniffing nodes", t); @@ -158,13 +159,13 @@ void sniff(Predicate nodeFilter) { } } - void sniff(Iterator connectionIterator, Predicate nodeFilter) throws IOException { + void sniff(Iterator connectionIterator, Predicate hostFilter) throws IOException { IOException lastSeenException = null; while (connectionIterator.hasNext()) { StatefulConnection connection = connectionIterator.next(); try { - List sniffedNodes = sniffer.sniffNodes(connection.getNode()); - Node[] filteredNodes = sniffedNodes.stream().filter(nodeFilter).toArray(Node[]::new); + List sniffedNodes = sniffer.sniffNodes(connection.getHost()); + HttpHost[] filteredNodes = sniffedNodes.stream().filter(hostFilter).toArray(HttpHost[]::new); logger.debug("adding " + filteredNodes.length + " nodes out of " + sniffedNodes.size() + " sniffed nodes"); connections = createConnections(filteredNodes); onSuccess(connection); diff --git a/client/src/main/java/org/elasticsearch/client/StatefulConnection.java b/client/src/main/java/org/elasticsearch/client/StatefulConnection.java index 1ac72382e1994..50ac78cdde9c6 100644 --- a/client/src/main/java/org/elasticsearch/client/StatefulConnection.java +++ b/client/src/main/java/org/elasticsearch/client/StatefulConnection.java @@ -19,6 +19,8 @@ package org.elasticsearch.client; +import org.apache.http.HttpHost; + import java.util.concurrent.TimeUnit; /** @@ -43,10 +45,10 @@ public final class StatefulConnection extends Connection { private volatile long deadUntil = -1; /** - * Creates a new mutable connection pointing to the provided {@link Node} argument + * Creates a new mutable connection pointing to the provided {@link HttpHost} argument */ - public StatefulConnection(Node node) { - super(node); + public StatefulConnection(HttpHost host) { + super(host); } /** diff --git a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java index 5fb32341c99a5..539b7ad0f1f63 100644 --- a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java @@ -21,6 +21,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpHost; import org.apache.http.StatusLine; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; @@ -43,17 +44,17 @@ public class StaticConnectionPool extends AbstractStaticConnectionPool { private final List connections; public StaticConnectionPool(CloseableHttpClient client, boolean pingEnabled, RequestConfig pingRequestConfig, - Predicate connectionSelector, Node... nodes) { + Predicate connectionSelector, HttpHost... hosts) { super(connectionSelector); Objects.requireNonNull(client, "client cannot be null"); Objects.requireNonNull(pingRequestConfig, "pingRequestConfig cannot be null"); - if (nodes == null || nodes.length == 0) { - throw new IllegalArgumentException("no nodes provided"); + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("no hosts provided"); } this.client = client; this.pingEnabled = pingEnabled; this.pingRequestConfig = pingRequestConfig; - this.connections = createConnections(nodes); + this.connections = createConnections(hosts); } @Override @@ -67,20 +68,20 @@ public void beforeAttempt(StatefulConnection connection) throws IOException { HttpHead httpHead = new HttpHead("/"); httpHead.setConfig(pingRequestConfig); StatusLine statusLine; - try(CloseableHttpResponse httpResponse = client.execute(connection.getNode().getHttpHost(), httpHead)) { + try(CloseableHttpResponse httpResponse = client.execute(connection.getHost(), httpHead)) { statusLine = httpResponse.getStatusLine(); EntityUtils.consume(httpResponse.getEntity()); } catch(IOException e) { - RequestLogger.log(logger, "ping failed", httpHead.getRequestLine(), connection.getNode(), e); + RequestLogger.log(logger, "ping failed", httpHead.getRequestLine(), connection.getHost(), e); onFailure(connection); throw e; } if (statusLine.getStatusCode() >= 300) { - RequestLogger.log(logger, "ping failed", httpHead.getRequestLine(), connection.getNode(), statusLine); + RequestLogger.log(logger, "ping failed", httpHead.getRequestLine(), connection.getHost(), statusLine); onFailure(connection); - throw new ElasticsearchResponseException(httpHead.getRequestLine(), connection.getNode(), statusLine); + throw new ElasticsearchResponseException(httpHead.getRequestLine(), connection.getHost(), statusLine); } else { - RequestLogger.log(logger, "ping succeeded", httpHead.getRequestLine(), connection.getNode(), statusLine); + RequestLogger.log(logger, "ping succeeded", httpHead.getRequestLine(), connection.getHost(), statusLine); onSuccess(connection); } } diff --git a/client/src/main/java/org/elasticsearch/client/Transport.java b/client/src/main/java/org/elasticsearch/client/Transport.java index 88bf1bf6c7aa5..ff0835a991c94 100644 --- a/client/src/main/java/org/elasticsearch/client/Transport.java +++ b/client/src/main/java/org/elasticsearch/client/Transport.java @@ -69,7 +69,7 @@ ElasticsearchResponse performRequest(String method, String endpoint, Map connectionIterator = connectionPool.nextConnection().iterator(); if (connectionIterator.hasNext() == false) { C connection = connectionPool.lastResortConnection(); - logger.info("no healthy nodes available, trying " + connection.getNode()); + logger.info("no healthy nodes available, trying " + connection.getHost()); return performRequest(request, Stream.of(connection).iterator()); } return performRequest(request, connectionIterator); @@ -127,9 +127,9 @@ private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator(Arrays.asList(Node.Role.values())))); - - try { - new Node(null); - fail("node construction should have failed"); - } catch(NullPointerException e) { - assertThat(e.getMessage(), equalTo("host cannot be null")); - } - - } - - public void testThreeArgumentsConstructor() { - HttpHost httpHost = new HttpHost(randomHost(), randomPort(), randomScheme()); - Set roles = randomRoles(); - Map attributes = randomAttributes(); - Node node = new Node(httpHost, roles, attributes); - assertThat(node.getHttpHost(), sameInstance(httpHost)); - assertThat(node.getAttributes(), equalTo(attributes)); - assertThat(node.getRoles(), equalTo(roles)); - - try { - new Node(null, roles, attributes); - fail("node construction should have failed"); - } catch(NullPointerException e) { - assertThat(e.getMessage(), equalTo("host cannot be null")); - } - - try { - new Node(httpHost, null, attributes); - fail("node construction should have failed"); - } catch(NullPointerException e) { - assertThat(e.getMessage(), equalTo("roles cannot be null")); - } - - try { - new Node(httpHost, roles, null); - fail("node construction should have failed"); - } catch(NullPointerException e) { - assertThat(e.getMessage(), equalTo("attributes cannot be null")); - } - } - - public void testToString() { - HttpHost httpHost = new HttpHost(randomHost(), randomPort(), randomScheme()); - Set roles = randomRoles(); - Map attributes = randomAttributes(); - Node node = new Node(httpHost, roles, attributes); - String expectedString = "Node{" + - "httpHost=" + httpHost.toString() + - ", roles=" + roles.toString() + - ", attributes=" + attributes.toString() + - '}'; - assertThat(node.toString(), equalTo(expectedString)); - } - - private static String randomHost() { - return RandomStrings.randomAsciiOfLengthBetween(random(), 5, 10); - } - - private static int randomPort() { - return random().nextInt(); - } - - private static String randomScheme() { - if (rarely()) { - return null; - } - return random().nextBoolean() ? "http" : "https"; - } - - private static Map randomAttributes() { - int numAttributes = RandomInts.randomIntBetween(random(), 0, 5); - Map attributes = new HashMap<>(numAttributes); - for (int i = 0; i < numAttributes; i++) { - String key = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); - String value = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); - attributes.put(key, value); - } - return attributes; - } - - private static Set randomRoles() { - int numRoles = RandomInts.randomIntBetween(random(), 0, 3); - Set roles = new HashSet<>(numRoles); - for (int j = 0; j < numRoles; j++) { - roles.add(RandomPicks.randomFrom(random(), Node.Role.values())); - } - return roles; - } -} diff --git a/client/src/test/java/org/elasticsearch/client/SnifferTests.java b/client/src/test/java/org/elasticsearch/client/SnifferTests.java index 9169afd7695b9..c2caa5ab4fa5e 100644 --- a/client/src/test/java/org/elasticsearch/client/SnifferTests.java +++ b/client/src/test/java/org/elasticsearch/client/SnifferTests.java @@ -90,24 +90,21 @@ public void testSniffNodes() throws IOException, URISyntaxException { Sniffer sniffer = new Sniffer(client, RequestConfig.DEFAULT, sniffRequestTimeout, scheme.toString()); HttpHost httpHost = new HttpHost(server.getHostName(), server.getPort()); try { - List sniffedNodes = sniffer.sniffNodes(new Node(httpHost)); + List sniffedHosts = sniffer.sniffNodes(httpHost); if (sniffResponse.isFailure) { fail("sniffNodes should have failed"); } - assertThat(sniffedNodes.size(), equalTo(sniffResponse.nodes.size())); - Iterator responseNodesIterator = sniffResponse.nodes.iterator(); - for (Node sniffedNode : sniffedNodes) { - Node responseNode = responseNodesIterator.next(); - assertThat(sniffedNode.getHttpHost(), equalTo(responseNode.getHttpHost())); - assertThat(sniffedNode.getRoles(), equalTo(responseNode.getRoles())); - assertThat(sniffedNode.getAttributes(), equalTo(responseNode.getAttributes())); + assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size())); + Iterator responseHostsIterator = sniffResponse.hosts.iterator(); + for (HttpHost sniffedHost : sniffedHosts) { + assertEquals(sniffedHost, responseHostsIterator.next()); } } catch(ElasticsearchResponseException e) { if (sniffResponse.isFailure) { assertThat(e.getMessage(), containsString("GET http://localhost:" + server.getPort() + "/_nodes/http?timeout=" + sniffRequestTimeout)); assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); - assertThat(e.getNode().getHttpHost(), equalTo(httpHost)); + assertThat(e.getHost(), equalTo(httpHost)); assertThat(e.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); assertThat(e.getRequestLine().toString(), equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1")); } else { @@ -141,7 +138,7 @@ public MockResponse dispatch(RecordedRequest request) throws InterruptedExceptio private static SniffResponse buildSniffResponse(SniffingConnectionPool.Scheme scheme) throws IOException { int numNodes = RandomInts.randomIntBetween(random(), 1, 5); - List nodes = new ArrayList<>(numNodes); + List hosts = new ArrayList<>(numNodes); JsonFactory jsonFactory = new JsonFactory(); StringWriter writer = new StringWriter(); JsonGenerator generator = jsonFactory.createGenerator(writer); @@ -168,25 +165,11 @@ private static SniffResponse buildSniffResponse(SniffingConnectionPool.Scheme sc generator.writeEndArray(); } boolean isHttpEnabled = rarely() == false; - int numRoles = RandomInts.randomIntBetween(random(), 0, 3); - Set nodeRoles = new HashSet<>(numRoles); - for (int j = 0; j < numRoles; j++) { - Node.Role role; - do { - role = RandomPicks.randomFrom(random(), Node.Role.values()); - } while(nodeRoles.add(role) == false); - } - - int numAttributes = RandomInts.randomIntBetween(random(), 0, 3); - Map attributes = new HashMap<>(numAttributes); - for (int j = 0; j < numAttributes; j++) { - attributes.put("attr" + j, "value" + j); - } if (isHttpEnabled) { String host = "host" + i; int port = RandomInts.randomIntBetween(random(), 9200, 9299); HttpHost httpHost = new HttpHost(host, port, scheme.toString()); - nodes.add(new Node(httpHost, nodeRoles, attributes)); + hosts.add(httpHost); generator.writeObjectFieldStart("http"); if (random().nextBoolean()) { generator.writeArrayFieldStart("bound_address"); @@ -205,11 +188,25 @@ private static SniffResponse buildSniffResponse(SniffingConnectionPool.Scheme sc } generator.writeEndObject(); } + String[] roles = {"master", "data", "ingest"}; + int numRoles = RandomInts.randomIntBetween(random(), 0, 3); + Set nodeRoles = new HashSet<>(numRoles); + for (int j = 0; j < numRoles; j++) { + String role; + do { + role = RandomPicks.randomFrom(random(), roles); + } while(nodeRoles.add(role) == false); + } generator.writeArrayFieldStart("roles"); - for (Node.Role nodeRole : nodeRoles) { - generator.writeString(nodeRole.toString()); + for (String nodeRole : nodeRoles) { + generator.writeString(nodeRole); } generator.writeEndArray(); + int numAttributes = RandomInts.randomIntBetween(random(), 0, 3); + Map attributes = new HashMap<>(numAttributes); + for (int j = 0; j < numAttributes; j++) { + attributes.put("attr" + j, "value" + j); + } if (numAttributes > 0) { generator.writeObjectFieldStart("attributes"); } @@ -224,18 +221,18 @@ private static SniffResponse buildSniffResponse(SniffingConnectionPool.Scheme sc generator.writeEndObject(); generator.writeEndObject(); generator.close(); - return SniffResponse.buildResponse(writer.toString(), nodes); + return SniffResponse.buildResponse(writer.toString(), hosts); } private static class SniffResponse { private final String nodesInfoBody; private final int nodesInfoResponseCode; - private final List nodes; + private final List hosts; private final boolean isFailure; - SniffResponse(String nodesInfoBody, List nodes, boolean isFailure) { + SniffResponse(String nodesInfoBody, List hosts, boolean isFailure) { this.nodesInfoBody = nodesInfoBody; - this.nodes = nodes; + this.hosts = hosts; this.isFailure = isFailure; if (isFailure) { this.nodesInfoResponseCode = randomErrorResponseCode(); @@ -248,8 +245,8 @@ static SniffResponse buildFailure() { return new SniffResponse("", Collections.emptyList(), true); } - static SniffResponse buildResponse(String nodesInfoBody, List nodes) { - return new SniffResponse(nodesInfoBody, nodes, false); + static SniffResponse buildResponse(String nodesInfoBody, List hosts) { + return new SniffResponse(nodesInfoBody, hosts, false); } } diff --git a/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java index 13eaadd619a61..97b7d0867c69b 100644 --- a/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java +++ b/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java @@ -38,16 +38,16 @@ public class SniffingConnectionPoolTests extends LuceneTestCase { public void testConstructor() throws Exception { CloseableHttpClient httpClient = HttpClientBuilder.create().build(); int numNodes = RandomInts.randomIntBetween(random(), 1, 5); - Node[] nodes = new Node[numNodes]; + HttpHost[] hosts = new HttpHost[numNodes]; for (int i = 0; i < numNodes; i++) { - nodes[i] = new Node(new HttpHost("localhost", 9200)); + hosts[i] = new HttpHost("localhost", 9200); } try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { @@ -58,7 +58,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); @@ -68,7 +68,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), null, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { assertEquals(e.getMessage(), "client cannot be null"); @@ -78,7 +78,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, null, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { assertEquals(e.getMessage(), "sniffRequestConfig cannot be null"); @@ -88,7 +88,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); @@ -98,7 +98,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), null, nodes)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), null, hosts)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { assertEquals(e.getMessage(), "connection selector predicate cannot be null"); @@ -109,10 +109,10 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), - connection -> random().nextBoolean(), (Node[])null)) { + connection -> random().nextBoolean(), (HttpHost[])null)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no nodes provided"); + assertEquals(e.getMessage(), "no hosts provided"); } try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( @@ -120,10 +120,10 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), - (Node)null)) { + (HttpHost) null)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { - assertEquals(e.getMessage(), "node cannot be null"); + assertEquals(e.getMessage(), "host cannot be null"); } try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( @@ -133,14 +133,14 @@ public void testConstructor() throws Exception { RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean())) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no nodes provided"); + assertEquals(e.getMessage(), "no hosts provided"); } try (SniffingConnectionPool sniffingConnectionPool = new SniffingConnectionPool( RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), nodes)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { assertNotNull(sniffingConnectionPool); } } diff --git a/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java index 9b5b29df1b165..82e8c60b02fa7 100644 --- a/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java +++ b/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java @@ -37,51 +37,51 @@ public class StaticConnectionPoolTests extends LuceneTestCase { public void testConstructor() { CloseableHttpClient httpClient = HttpClientBuilder.create().build(); int numNodes = RandomInts.randomIntBetween(random(), 1, 5); - Node[] nodes = new Node[numNodes]; + HttpHost[] hosts = new HttpHost[numNodes]; for (int i = 0; i < numNodes; i++) { - nodes[i] = new Node(new HttpHost("localhost", 9200)); + hosts[i] = new HttpHost("localhost", 9200); } try { - new StaticConnectionPool(null, random().nextBoolean(), RequestConfig.DEFAULT, connection -> random().nextBoolean(), nodes); + new StaticConnectionPool(null, random().nextBoolean(), RequestConfig.DEFAULT, connection -> random().nextBoolean(), hosts); } catch(NullPointerException e) { assertEquals(e.getMessage(), "client cannot be null"); } try { - new StaticConnectionPool(httpClient, random().nextBoolean(), null, connection -> random().nextBoolean(), nodes); + new StaticConnectionPool(httpClient, random().nextBoolean(), null, connection -> random().nextBoolean(), hosts); } catch(NullPointerException e) { assertEquals(e.getMessage(), "pingRequestConfig cannot be null"); } try { - new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, null, nodes); + new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, null, hosts); } catch(NullPointerException e) { assertEquals(e.getMessage(), "connection selector predicate cannot be null"); } try { new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, - connection -> random().nextBoolean(), (Node)null); + connection -> random().nextBoolean(), (HttpHost) null); } catch(NullPointerException e) { - assertEquals(e.getMessage(), "node cannot be null"); + assertEquals(e.getMessage(), "host cannot be null"); } try { new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, - connection -> random().nextBoolean(), (Node[])null); + connection -> random().nextBoolean(), (HttpHost[])null); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no nodes provided"); + assertEquals(e.getMessage(), "no hosts provided"); } try { new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, connection -> random().nextBoolean()); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no nodes provided"); + assertEquals(e.getMessage(), "no hosts provided"); } StaticConnectionPool staticConnectionPool = new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, - connection -> random().nextBoolean(), nodes); + connection -> random().nextBoolean(), hosts); assertNotNull(staticConnectionPool); } } From ce663e9703e1086bd1565a2592f3af2f22149e31 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 May 2016 16:36:13 +0200 Subject: [PATCH 008/103] get rid of connection selector predicate --- .../client/AbstractStaticConnectionPool.java | 11 ------- .../client/SniffingConnectionPool.java | 5 ++-- .../client/StaticConnectionPool.java | 5 +--- .../client/SniffingConnectionPoolTests.java | 30 ++++++------------- .../client/StaticConnectionPoolTests.java | 20 ++++--------- 5 files changed, 18 insertions(+), 53 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java index c043dd1705954..50923fa5406ad 100644 --- a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java @@ -47,23 +47,12 @@ public abstract class AbstractStaticConnectionPool implements ConnectionPool connectionSelector; - private final AtomicInteger lastConnectionIndex = new AtomicInteger(0); - protected AbstractStaticConnectionPool(Predicate connectionSelector) { - Objects.requireNonNull(connectionSelector, "connection selector predicate cannot be null"); - this.connectionSelector = connectionSelector; - } - protected abstract List getConnections(); @Override public final Stream nextConnection() { - return nextUnfilteredConnection().filter(connectionSelector); - } - - protected final Stream nextUnfilteredConnection() { List connections = getConnections(); if (connections.isEmpty()) { throw new IllegalStateException("no connections available in the connection pool"); diff --git a/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java index 25d17b45ee586..eaaf98d05060d 100644 --- a/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java @@ -49,8 +49,7 @@ public class SniffingConnectionPool extends AbstractStaticConnectionPool { public SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sniffAfterFailureDelay, CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, Scheme scheme, - Predicate connectionSelector, HttpHost... hosts) { - super(connectionSelector); + HttpHost... hosts) { if (sniffInterval <= 0) { throw new IllegalArgumentException("sniffInterval must be greater than 0"); } @@ -129,7 +128,7 @@ void sniffOnFailure(HttpHost failedHost) { void sniff(Predicate hostFilter) { if (running.compareAndSet(false, true)) { try { - Iterator connectionIterator = nextUnfilteredConnection().iterator(); + Iterator connectionIterator = nextConnection().iterator(); if (connectionIterator.hasNext()) { sniff(connectionIterator, hostFilter); } else { diff --git a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java index 539b7ad0f1f63..602298b1b3b6a 100644 --- a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java @@ -32,7 +32,6 @@ import java.io.IOException; import java.util.List; import java.util.Objects; -import java.util.function.Predicate; public class StaticConnectionPool extends AbstractStaticConnectionPool { @@ -43,9 +42,7 @@ public class StaticConnectionPool extends AbstractStaticConnectionPool { private final RequestConfig pingRequestConfig; private final List connections; - public StaticConnectionPool(CloseableHttpClient client, boolean pingEnabled, RequestConfig pingRequestConfig, - Predicate connectionSelector, HttpHost... hosts) { - super(connectionSelector); + public StaticConnectionPool(CloseableHttpClient client, boolean pingEnabled, RequestConfig pingRequestConfig, HttpHost... hosts) { Objects.requireNonNull(client, "client cannot be null"); Objects.requireNonNull(pingRequestConfig, "pingRequestConfig cannot be null"); if (hosts == null || hosts.length == 0) { diff --git a/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java index 97b7d0867c69b..1ae9279f78eb1 100644 --- a/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java +++ b/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java @@ -47,7 +47,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { @@ -58,7 +58,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); @@ -68,7 +68,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), null, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { assertEquals(e.getMessage(), "client cannot be null"); @@ -78,7 +78,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, null, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { assertEquals(e.getMessage(), "sniffRequestConfig cannot be null"); @@ -88,7 +88,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); @@ -98,18 +98,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), null, hosts)) { - fail("pool creation should have failed " + connectionPool); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "connection selector predicate cannot be null"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), - connection -> random().nextBoolean(), (HttpHost[])null)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), (HttpHost[])null)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "no hosts provided"); @@ -119,8 +108,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), - (HttpHost) null)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), (HttpHost) null)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { assertEquals(e.getMessage(), "host cannot be null"); @@ -130,7 +118,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean())) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()))) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "no hosts provided"); @@ -140,7 +128,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), connection -> random().nextBoolean(), hosts)) { + RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { assertNotNull(sniffingConnectionPool); } } diff --git a/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java index 82e8c60b02fa7..60398b71c0d68 100644 --- a/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java +++ b/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java @@ -43,45 +43,37 @@ public void testConstructor() { } try { - new StaticConnectionPool(null, random().nextBoolean(), RequestConfig.DEFAULT, connection -> random().nextBoolean(), hosts); + new StaticConnectionPool(null, random().nextBoolean(), RequestConfig.DEFAULT, hosts); } catch(NullPointerException e) { assertEquals(e.getMessage(), "client cannot be null"); } try { - new StaticConnectionPool(httpClient, random().nextBoolean(), null, connection -> random().nextBoolean(), hosts); + new StaticConnectionPool(httpClient, random().nextBoolean(), null, hosts); } catch(NullPointerException e) { assertEquals(e.getMessage(), "pingRequestConfig cannot be null"); } try { - new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, null, hosts); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "connection selector predicate cannot be null"); - } - - try { - new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, - connection -> random().nextBoolean(), (HttpHost) null); + new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, (HttpHost) null); } catch(NullPointerException e) { assertEquals(e.getMessage(), "host cannot be null"); } try { - new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, - connection -> random().nextBoolean(), (HttpHost[])null); + new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, (HttpHost[])null); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "no hosts provided"); } try { - new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, connection -> random().nextBoolean()); + new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "no hosts provided"); } StaticConnectionPool staticConnectionPool = new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, - connection -> random().nextBoolean(), hosts); + hosts); assertNotNull(staticConnectionPool); } } From a472544ab4d3c608e1299617234be3da1d8d4e69 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 4 May 2016 17:06:30 +0200 Subject: [PATCH 009/103] move sniff related stuff to sniff package --- .../java/org/elasticsearch/client/{ => sniff}/Sniffer.java | 4 +++- .../client/{ => sniff}/SniffingConnectionPool.java | 4 +++- .../org/elasticsearch/client/{ => sniff}/SnifferTests.java | 5 ++++- .../client/{ => sniff}/SniffingConnectionPoolTests.java | 2 +- 4 files changed, 11 insertions(+), 4 deletions(-) rename client/src/main/java/org/elasticsearch/client/{ => sniff}/Sniffer.java (97%) rename client/src/main/java/org/elasticsearch/client/{ => sniff}/SniffingConnectionPool.java (98%) rename client/src/test/java/org/elasticsearch/client/{ => sniff}/SnifferTests.java (98%) rename client/src/test/java/org/elasticsearch/client/{ => sniff}/SniffingConnectionPoolTests.java (99%) diff --git a/client/src/main/java/org/elasticsearch/client/Sniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java similarity index 97% rename from client/src/main/java/org/elasticsearch/client/Sniffer.java rename to client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 1bec4eb87e1e2..a812804cbce7e 100644 --- a/client/src/main/java/org/elasticsearch/client/Sniffer.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client; +package org.elasticsearch.client.sniff; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonParser; @@ -32,6 +32,8 @@ import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.RequestLogger; import java.io.IOException; import java.io.InputStream; diff --git a/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java similarity index 98% rename from client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java rename to client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java index eaaf98d05060d..d0171e6b95269 100644 --- a/client/src/main/java/org/elasticsearch/client/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java @@ -17,13 +17,15 @@ * under the License. */ -package org.elasticsearch.client; +package org.elasticsearch.client.sniff; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.impl.client.CloseableHttpClient; +import org.elasticsearch.client.AbstractStaticConnectionPool; +import org.elasticsearch.client.StatefulConnection; import java.io.IOException; import java.util.Iterator; diff --git a/client/src/test/java/org/elasticsearch/client/SnifferTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java similarity index 98% rename from client/src/test/java/org/elasticsearch/client/SnifferTests.java rename to client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index c2caa5ab4fa5e..28a9697ac37d6 100644 --- a/client/src/test/java/org/elasticsearch/client/SnifferTests.java +++ b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client; +package org.elasticsearch.client.sniff; import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; @@ -33,6 +33,9 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.sniff.Sniffer; +import org.elasticsearch.client.sniff.SniffingConnectionPool; import org.junit.After; import org.junit.Before; diff --git a/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java similarity index 99% rename from client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java rename to client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java index 1ae9279f78eb1..06e6b43236bea 100644 --- a/client/src/test/java/org/elasticsearch/client/SniffingConnectionPoolTests.java +++ b/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java @@ -17,7 +17,7 @@ * under the License. */ -package org.elasticsearch.client; +package org.elasticsearch.client.sniff; import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; From 062a21678c144262d98e1f1cf5b0b74071c31463 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 6 May 2016 09:52:33 +0200 Subject: [PATCH 010/103] merge Connection and StatefulConnection into one class, remove generic type from Transport --- .../client/AbstractStaticConnectionPool.java | 32 ++--- .../org/elasticsearch/client/Connection.java | 88 +++++++++++- .../elasticsearch/client/ConnectionPool.java | 18 ++- .../ElasticsearchResponseException.java | 2 +- .../org/elasticsearch/client/RestClient.java | 4 +- .../client/StatefulConnection.java | 127 ------------------ .../client/StaticConnectionPool.java | 8 +- .../org/elasticsearch/client/Transport.java | 16 +-- .../client/sniff/SniffingConnectionPool.java | 19 +-- .../elasticsearch/client/TransportTests.java | 10 +- 10 files changed, 139 insertions(+), 185 deletions(-) delete mode 100644 client/src/main/java/org/elasticsearch/client/StatefulConnection.java diff --git a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java index 50923fa5406ad..192c1b0f35a14 100644 --- a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java @@ -38,57 +38,57 @@ * allows to filter connections through a customizable {@link Predicate}, called connection selector. * In case the returned stream is empty a last resort dead connection should be retrieved by calling {@link #lastResortConnection()} * and resurrected so that a single request attempt can be performed. - * The {@link #onSuccess(StatefulConnection)} method marks the connection provided as an argument alive. - * The {@link #onFailure(StatefulConnection)} method marks the connection provided as an argument dead. + * The {@link #onSuccess(Connection)} method marks the connection provided as an argument alive. + * The {@link #onFailure(Connection)} method marks the connection provided as an argument dead. * This base implementation doesn't define the list implementation that stores connections, so that concurrency can be * handled in the subclasses depending on the usecase (e.g. defining the list volatile when needed). */ -public abstract class AbstractStaticConnectionPool implements ConnectionPool { +public abstract class AbstractStaticConnectionPool implements ConnectionPool { private static final Log logger = LogFactory.getLog(AbstractStaticConnectionPool.class); private final AtomicInteger lastConnectionIndex = new AtomicInteger(0); - protected abstract List getConnections(); + protected abstract List getConnections(); @Override - public final Stream nextConnection() { - List connections = getConnections(); + public final Stream nextConnection() { + List connections = getConnections(); if (connections.isEmpty()) { throw new IllegalStateException("no connections available in the connection pool"); } - List sortedConnections = new ArrayList<>(connections); + List sortedConnections = new ArrayList<>(connections); //TODO is it possible to make this O(1)? (rotate is O(n)) Collections.rotate(sortedConnections, sortedConnections.size() - lastConnectionIndex.getAndIncrement()); return sortedConnections.stream().filter(connection -> connection.isAlive() || connection.shouldBeRetried()); } - protected List createConnections(HttpHost... hosts) { - List connections = new ArrayList<>(); + protected List createConnections(HttpHost... hosts) { + List connections = new ArrayList<>(); for (HttpHost host : hosts) { Objects.requireNonNull(host, "host cannot be null"); - connections.add(new StatefulConnection(host)); + connections.add(new Connection(host)); } return Collections.unmodifiableList(connections); } @Override - public StatefulConnection lastResortConnection() { - StatefulConnection statefulConnection = getConnections().stream() + public Connection lastResortConnection() { + Connection Connection = getConnections().stream() .sorted((o1, o2) -> Long.compare(o1.getDeadUntil(), o2.getDeadUntil())).findFirst().get(); - statefulConnection.markResurrected(); - return statefulConnection; + Connection.markResurrected(); + return Connection; } @Override - public void onSuccess(StatefulConnection connection) { + public void onSuccess(Connection connection) { connection.markAlive(); logger.trace("marked connection alive for " + connection.getHost()); } @Override - public void onFailure(StatefulConnection connection) throws IOException { + public void onFailure(Connection connection) throws IOException { connection.markDead(); logger.debug("marked connection dead for " + connection.getHost()); } diff --git a/client/src/main/java/org/elasticsearch/client/Connection.java b/client/src/main/java/org/elasticsearch/client/Connection.java index 51a430b254053..3d48a4eeae1f4 100644 --- a/client/src/main/java/org/elasticsearch/client/Connection.java +++ b/client/src/main/java/org/elasticsearch/client/Connection.java @@ -21,15 +21,21 @@ import org.apache.http.HttpHost; +import java.util.concurrent.TimeUnit; + /** - * Simplest representation of a connection to an elasticsearch node. - * It doesn't have any mutable state. It holds the host that the connection points to. + * Represents a connection to a host. It holds the host that the connection points to. * Allows the transport to deal with very simple connection objects that are immutable. - * Any change to the state of connections should be made through the connection pool - * which is aware of the connection object that it supports. + * Any change to the state of a connection should be made through the connection pool. */ public class Connection { + //TODO make these values configurable through the connection pool? + private static final long DEFAULT_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(1); + private static final long MAX_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); private final HttpHost host; + private volatile State state = State.UNKNOWN; + private volatile int failedAttempts = -1; + private volatile long deadUntil = -1; /** * Creates a new connection pointing to the provided {@link HttpHost} argument @@ -44,4 +50,78 @@ public Connection(HttpHost host) { public HttpHost getHost() { return host; } + + /** + * Marks connection as dead. Should be called in case the corresponding node is not responding or caused failures. + * Once marked dead, the number of failed attempts will be incremented on each call to this method. A dead connection + * should be retried once {@link #shouldBeRetried()} returns true, which depends on the number of previous failed attempts + * and when the last failure was registered. + */ + void markDead() { + synchronized (this) { + int failedAttempts = Math.max(this.failedAttempts, 0); + long timeoutMillis = (long)Math.min(DEFAULT_CONNECTION_TIMEOUT_MILLIS * 2 * Math.pow(2, failedAttempts * 0.5 - 1), + MAX_CONNECTION_TIMEOUT_MILLIS); + this.deadUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeoutMillis); + this.failedAttempts = ++failedAttempts; + this.state = State.DEAD; + } + } + + /** + * Marks this connection alive. Should be called when the corresponding node is working properly. + * Will reset the number of failed attempts that were counted in case the connection was previously dead, + * as well as its dead timeout. + */ + void markAlive() { + if (this.state != State.ALIVE) { + synchronized (this) { + this.deadUntil = -1; + this.failedAttempts = 0; + this.state = State.ALIVE; + } + } + } + + /** + * Resets the connection to its initial state, so it will be retried. To be called when all the connections in the pool + * are dead, so that one connection can be retried. Note that calling this method only changes the state of the connection, + * it doesn't reset its failed attempts and dead until timestamp. That way if the connection goes back to dead straightaway + * all of its previous failed attempts are taken into account. + */ + void markResurrected() { + if (this.state == State.DEAD) { + synchronized (this) { + this.state = State.UNKNOWN; + } + } + } + + /** + * Returns the timestamp till the connection is supposed to stay dead till it can be retried + */ + public long getDeadUntil() { + return deadUntil; + } + + /** + * Returns true if the connection is alive, false otherwise. + */ + public boolean isAlive() { + return state == State.ALIVE; + } + + /** + * Returns true in case the connection is not alive but should be used/retried, false otherwise. + * Returns true in case the connection is in unknown state (never used before) or resurrected. When the connection is dead, + * returns true when it is time to retry it, depending on how many failed attempts were registered and when the last failure + * happened (minimum 1 minute, maximum 30 minutes). + */ + public boolean shouldBeRetried() { + return state == State.UNKNOWN || (state == State.DEAD && System.nanoTime() - deadUntil >= 0); + } + + private enum State { + UNKNOWN, DEAD, ALIVE + } } diff --git a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java index 3f2eb89da9718..7e47a8192564a 100644 --- a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java @@ -24,14 +24,12 @@ import java.util.stream.Stream; /** - * Pool of connections to the different nodes that belong to an elasticsearch cluster. - * It keeps track of the different nodes to communicate with and allows to retrieve a stream of connections to be used + * Pool of connections to the different hosts that belong to an elasticsearch cluster. + * It keeps track of the different hosts to communicate with and allows to retrieve a stream of connections to be used * for each request. Exposes the needed hooks to be able to eventually mark connections dead or alive and execute * arbitrary operations before each single request attempt. - * - * @param the type of {@link Connection} that the pool supports */ -public interface ConnectionPool extends Closeable { +public interface ConnectionPool extends Closeable { /** * Returns a stream of connections that should be used for a request call. @@ -41,29 +39,29 @@ public interface ConnectionPool extends Closeable { * It may happen that the stream is empty, in which case it means that there aren't healthy connections to use. * Then {@link #lastResortConnection()} should be called to retrieve a non healthy connection and try it. */ - Stream nextConnection(); + Stream nextConnection(); /** * Returns a connection that is not necessarily healthy, but can be used for a request attempt. To be called as last resort * only in case {@link #nextConnection()} returns an empty stream */ - C lastResortConnection(); + Connection lastResortConnection(); /** * Called before each single request attempt. Allows to execute operations (e.g. ping) before each request. * Receives as an argument the connection that is going to be used for the request. */ - void beforeAttempt(C connection) throws IOException; + void beforeAttempt(Connection connection) throws IOException; /** * Called after each successful request call. * Receives as an argument the connection that was used for the successful request. */ - void onSuccess(C connection); + void onSuccess(Connection connection); /** * Called after each failed attempt. * Receives as an argument the connection that was used for the failed attempt. */ - void onFailure(C connection) throws IOException; + void onFailure(Connection connection) throws IOException; } diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java index 9f4b123bc8c95..2d9ceead5ed56 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java @@ -34,7 +34,7 @@ public class ElasticsearchResponseException extends IOException { private final RequestLine requestLine; private final StatusLine statusLine; - ElasticsearchResponseException(RequestLine requestLine, HttpHost host, StatusLine statusLine) { + public ElasticsearchResponseException(RequestLine requestLine, HttpHost host, StatusLine statusLine) { super(buildMessage(requestLine, host, statusLine)); this.host = host; this.requestLine = requestLine; diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 35af27232677a..e4186edc126cf 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -29,8 +29,8 @@ public final class RestClient implements Closeable { private final Transport transport; - public RestClient(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { - this.transport = new Transport<>(client, connectionPool, maxRetryTimeout); + public RestClient(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { + this.transport = new Transport(client, connectionPool, maxRetryTimeout); } public ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity) diff --git a/client/src/main/java/org/elasticsearch/client/StatefulConnection.java b/client/src/main/java/org/elasticsearch/client/StatefulConnection.java deleted file mode 100644 index 50ac78cdde9c6..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/StatefulConnection.java +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import org.apache.http.HttpHost; - -import java.util.concurrent.TimeUnit; - -/** - * {@link Connection} subclass that has a mutable state, based on previous usage. - * When first created, a connection is in unknown state, till it is used for the first time - * and marked either dead or alive based on the outcome of the first usage. - * Should be marked alive when properly working. - * Should be marked dead when it caused a failure, in which case the connection may be retried some time later, - * as soon as {@link #shouldBeRetried()} returns true, which depends on how many consecutive failed attempts - * were counted and when the last one was registered. - * Should be marked resurrected if in dead state, as last resort in case there are no live connections available - * and none of the dead ones are ready to be retried yet. When marked resurrected, the number of failed attempts - * and its timeout is not reset so that if it gets marked dead again it returns to the exact state before resurrection. - */ -public final class StatefulConnection extends Connection { - //TODO make these values configurable through the connection pool? - private static final long DEFAULT_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(1); - private static final long MAX_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); - - private volatile State state = State.UNKNOWN; - private volatile int failedAttempts = -1; - private volatile long deadUntil = -1; - - /** - * Creates a new mutable connection pointing to the provided {@link HttpHost} argument - */ - public StatefulConnection(HttpHost host) { - super(host); - } - - /** - * Marks connection as dead. Should be called in case the corresponding node is not responding or caused failures. - * Once marked dead, the number of failed attempts will be incremented on each call to this method. A dead connection - * should be retried once {@link #shouldBeRetried()} returns true, which depends on the number of previous failed attempts - * and when the last failure was registered. - */ - void markDead() { - synchronized (this) { - int failedAttempts = Math.max(this.failedAttempts, 0); - long timeoutMillis = (long)Math.min(DEFAULT_CONNECTION_TIMEOUT_MILLIS * 2 * Math.pow(2, failedAttempts * 0.5 - 1), - MAX_CONNECTION_TIMEOUT_MILLIS); - this.deadUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeoutMillis); - this.failedAttempts = ++failedAttempts; - this.state = State.DEAD; - } - } - - /** - * Marks this connection alive. Should be called when the corresponding node is working properly. - * Will reset the number of failed attempts that were counted in case the connection was previously dead, - * as well as its dead timeout. - */ - void markAlive() { - if (this.state != State.ALIVE) { - synchronized (this) { - this.deadUntil = -1; - this.failedAttempts = 0; - this.state = State.ALIVE; - } - } - } - - /** - * Resets the connection to its initial state, so it will be retried. To be called when all the connections in the pool - * are dead, so that one connection can be retried. Note that calling this method only changes the state of the connection, - * it doesn't reset its failed attempts and dead until timestamp. That way if the connection goes back to dead straightaway - * all of its previous failed attempts are taken into account. - */ - void markResurrected() { - if (this.state == State.DEAD) { - synchronized (this) { - this.state = State.UNKNOWN; - } - } - } - - /** - * Returns the timestamp till the connection is supposed to stay dead till it can be retried - */ - public long getDeadUntil() { - return deadUntil; - } - - /** - * Returns true if the connection is alive, false otherwise. - */ - public boolean isAlive() { - return state == State.ALIVE; - } - - /** - * Returns true in case the connection is not alive but should be used/retried, false otherwise. - * Returns true in case the connection is in unknown state (never used before) or resurrected. When the connection is dead, - * returns true when it is time to retry it, depending on how many failed attempts were registered and when the last failure - * happened (minimum 1 minute, maximum 30 minutes). - */ - public boolean shouldBeRetried() { - return state == State.UNKNOWN || (state == State.DEAD && System.nanoTime() - deadUntil >= 0); - } - - private enum State { - UNKNOWN, DEAD, ALIVE - } -} diff --git a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java index 602298b1b3b6a..3e25818fb3967 100644 --- a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java @@ -40,7 +40,7 @@ public class StaticConnectionPool extends AbstractStaticConnectionPool { private final CloseableHttpClient client; private final boolean pingEnabled; private final RequestConfig pingRequestConfig; - private final List connections; + private final List connections; public StaticConnectionPool(CloseableHttpClient client, boolean pingEnabled, RequestConfig pingRequestConfig, HttpHost... hosts) { Objects.requireNonNull(client, "client cannot be null"); @@ -55,12 +55,14 @@ public StaticConnectionPool(CloseableHttpClient client, boolean pingEnabled, Req } @Override - protected List getConnections() { + protected List getConnections() { return connections; } + //TODO do we still need pinging? seems like a workaround for some clients that don't support connect timeout but we have that + @Override - public void beforeAttempt(StatefulConnection connection) throws IOException { + public void beforeAttempt(Connection connection) throws IOException { if (pingEnabled && connection.shouldBeRetried()) { HttpHead httpHead = new HttpHead("/"); httpHead.setConfig(pingRequestConfig); diff --git a/client/src/main/java/org/elasticsearch/client/Transport.java b/client/src/main/java/org/elasticsearch/client/Transport.java index ff0835a991c94..b0f843ec19bd5 100644 --- a/client/src/main/java/org/elasticsearch/client/Transport.java +++ b/client/src/main/java/org/elasticsearch/client/Transport.java @@ -44,15 +44,15 @@ import java.util.concurrent.TimeUnit; import java.util.stream.Stream; -final class Transport implements Closeable { +final class Transport implements Closeable { private static final Log logger = LogFactory.getLog(Transport.class); private final CloseableHttpClient client; - private final ConnectionPool connectionPool; + private final ConnectionPool connectionPool; private final long maxRetryTimeout; - Transport(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { + Transport(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { Objects.requireNonNull(client, "client cannot be null"); Objects.requireNonNull(connectionPool, "connectionPool cannot be null"); if (maxRetryTimeout <= 0) { @@ -66,23 +66,23 @@ final class Transport implements Closeable { ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity) throws IOException { URI uri = buildUri(endpoint, params); HttpRequestBase request = createHttpRequest(method, uri, entity); - Iterator connectionIterator = connectionPool.nextConnection().iterator(); + Iterator connectionIterator = connectionPool.nextConnection().iterator(); if (connectionIterator.hasNext() == false) { - C connection = connectionPool.lastResortConnection(); + Connection connection = connectionPool.lastResortConnection(); logger.info("no healthy nodes available, trying " + connection.getHost()); return performRequest(request, Stream.of(connection).iterator()); } return performRequest(request, connectionIterator); } - private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator connectionIterator) throws IOException { + private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator connectionIterator) throws IOException { //we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt long retryTimeout = Math.round(this.maxRetryTimeout / (float)100 * 98); IOException lastSeenException = null; long startTime = System.nanoTime(); while (connectionIterator.hasNext()) { - C connection = connectionIterator.next(); + Connection connection = connectionIterator.next(); if (lastSeenException != null) { long timeElapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); @@ -124,7 +124,7 @@ private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator connections; + private volatile List connections; private final SnifferTask snifferTask; + //TODO do we still need the sniff request timeout? or should we just use a low connect timeout? public SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sniffAfterFailureDelay, CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, Scheme scheme, HttpHost... hosts) { @@ -69,17 +70,17 @@ public SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sni } @Override - protected List getConnections() { + protected List getConnections() { return this.connections; } @Override - public void beforeAttempt(StatefulConnection connection) throws IOException { + public void beforeAttempt(Connection connection) throws IOException { } @Override - public void onFailure(StatefulConnection connection) throws IOException { + public void onFailure(Connection connection) throws IOException { super.onFailure(connection); if (sniffOnFailure) { //re-sniff immediately but take out the node that failed @@ -130,11 +131,11 @@ void sniffOnFailure(HttpHost failedHost) { void sniff(Predicate hostFilter) { if (running.compareAndSet(false, true)) { try { - Iterator connectionIterator = nextConnection().iterator(); + Iterator connectionIterator = nextConnection().iterator(); if (connectionIterator.hasNext()) { sniff(connectionIterator, hostFilter); } else { - StatefulConnection connection = lastResortConnection(); + Connection connection = lastResortConnection(); logger.info("no healthy nodes available, trying " + connection.getHost()); sniff(Stream.of(connection).iterator(), hostFilter); } @@ -160,10 +161,10 @@ void sniff(Predicate hostFilter) { } } - void sniff(Iterator connectionIterator, Predicate hostFilter) throws IOException { + void sniff(Iterator connectionIterator, Predicate hostFilter) throws IOException { IOException lastSeenException = null; while (connectionIterator.hasNext()) { - StatefulConnection connection = connectionIterator.next(); + Connection connection = connectionIterator.next(); try { List sniffedNodes = sniffer.sniffNodes(connection.getHost()); HttpHost[] filteredNodes = sniffedNodes.stream().filter(hostFilter).toArray(HttpHost[]::new); diff --git a/client/src/test/java/org/elasticsearch/client/TransportTests.java b/client/src/test/java/org/elasticsearch/client/TransportTests.java index 591eb516c59d0..1efafd438b0d5 100644 --- a/client/src/test/java/org/elasticsearch/client/TransportTests.java +++ b/client/src/test/java/org/elasticsearch/client/TransportTests.java @@ -36,7 +36,7 @@ public class TransportTests extends LuceneTestCase { public void testConstructor() { CloseableHttpClient httpClient = HttpClientBuilder.create().build(); - ConnectionPool connectionPool = new ConnectionPool() { + ConnectionPool connectionPool = new ConnectionPool() { @Override public Stream nextConnection() { return null; @@ -69,27 +69,27 @@ public void close() throws IOException { }; try { - new Transport<>(null, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + new Transport(null, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); fail("transport creation should have failed"); } catch(NullPointerException e) { assertEquals(e.getMessage(), "client cannot be null"); } try { - new Transport<>(httpClient, null, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + new Transport(httpClient, null, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); fail("transport creation should have failed"); } catch(NullPointerException e) { assertEquals(e.getMessage(), "connectionPool cannot be null"); } try { - new Transport<>(httpClient, connectionPool, RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + new Transport(httpClient, connectionPool, RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("transport creation should have failed"); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "maxRetryTimeout must be greater than 0"); } - Transport transport = new Transport<>(httpClient, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + Transport transport = new Transport(httpClient, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); assertNotNull(transport); } } From bd29dc157259246f181cf911a2fcf5d684675385 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 6 May 2016 10:12:43 +0200 Subject: [PATCH 011/103] Remove Transport class, move all of it within RestClient class --- .../org/elasticsearch/client/RestClient.java | 171 +++++++++++++- .../org/elasticsearch/client/Transport.java | 208 ------------------ ...ansportTests.java => RestClientTests.java} | 15 +- 3 files changed, 175 insertions(+), 219 deletions(-) delete mode 100644 client/src/main/java/org/elasticsearch/client/Transport.java rename client/src/test/java/org/elasticsearch/client/{TransportTests.java => RestClientTests.java} (80%) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index e4186edc126cf..3e59cc297e434 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -18,28 +18,191 @@ */ package org.elasticsearch.client; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; import org.apache.http.HttpEntity; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.utils.URIBuilder; import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.util.EntityUtils; import java.io.Closeable; import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Iterator; +import java.util.Locale; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; +import java.util.stream.Stream; public final class RestClient implements Closeable { - private final Transport transport; + private static final Log logger = LogFactory.getLog(RestClient.class); + + private final CloseableHttpClient client; + private final ConnectionPool connectionPool; + private final long maxRetryTimeout; public RestClient(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { - this.transport = new Transport(client, connectionPool, maxRetryTimeout); + Objects.requireNonNull(client, "client cannot be null"); + Objects.requireNonNull(connectionPool, "connectionPool cannot be null"); + if (maxRetryTimeout <= 0) { + throw new IllegalArgumentException("maxRetryTimeout must be greater than 0"); + } + this.client = client; + this.connectionPool = connectionPool; + this.maxRetryTimeout = maxRetryTimeout; } public ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity) throws IOException { - return transport.performRequest(method, endpoint, params, entity); + URI uri = buildUri(endpoint, params); + HttpRequestBase request = createHttpRequest(method, uri, entity); + Iterator connectionIterator = connectionPool.nextConnection().iterator(); + if (connectionIterator.hasNext() == false) { + Connection connection = connectionPool.lastResortConnection(); + logger.info("no healthy nodes available, trying " + connection.getHost()); + return performRequest(request, Stream.of(connection).iterator()); + } + return performRequest(request, connectionIterator); + } + + private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator connectionIterator) throws IOException { + //we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt + long retryTimeout = Math.round(this.maxRetryTimeout / (float)100 * 98); + IOException lastSeenException = null; + long startTime = System.nanoTime(); + + while (connectionIterator.hasNext()) { + Connection connection = connectionIterator.next(); + + if (lastSeenException != null) { + long timeElapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); + long timeout = retryTimeout - timeElapsed; + if (timeout <= 0) { + IOException retryTimeoutException = new IOException( + "request retries exceeded max retry timeout [" + retryTimeout + "]"); + retryTimeoutException.addSuppressed(lastSeenException); + throw retryTimeoutException; + } + } + + try { + connectionPool.beforeAttempt(connection); + } catch(IOException e) { + lastSeenException = addSuppressedException(lastSeenException, e); + continue; + } + + try { + ElasticsearchResponse response = performRequest(request, connection); + connectionPool.onSuccess(connection); + return response; + } catch(ElasticsearchResponseException e) { + if (e.isRecoverable()) { + connectionPool.onFailure(connection); + lastSeenException = addSuppressedException(lastSeenException, e); + } else { + //don't retry and call onSuccess as the error should be a request problem + connectionPool.onSuccess(connection); + throw e; + } + } catch(IOException e) { + connectionPool.onFailure(connection); + lastSeenException = addSuppressedException(lastSeenException, e); + } + } + assert lastSeenException != null; + throw lastSeenException; + } + + private ElasticsearchResponse performRequest(HttpRequestBase request, Connection connection) throws IOException { + CloseableHttpResponse response; + try { + response = client.execute(connection.getHost(), request); + } catch(IOException e) { + RequestLogger.log(logger, "request failed", request.getRequestLine(), connection.getHost(), e); + throw e; + } finally { + request.reset(); + } + StatusLine statusLine = response.getStatusLine(); + //TODO make ignore status code configurable. rest-spec and tests support that parameter. + if (statusLine.getStatusCode() < 300 || + request.getMethod().equals(HttpHead.METHOD_NAME) && statusLine.getStatusCode() == 404) { + RequestLogger.log(logger, "request succeeded", request.getRequestLine(), connection.getHost(), response.getStatusLine()); + return new ElasticsearchResponse(request.getRequestLine(), connection.getHost(), response); + } else { + EntityUtils.consume(response.getEntity()); + RequestLogger.log(logger, "request failed", request.getRequestLine(), connection.getHost(), response.getStatusLine()); + throw new ElasticsearchResponseException(request.getRequestLine(), connection.getHost(), statusLine); + } + } + + private static IOException addSuppressedException(IOException suppressedException, IOException currentException) { + if (suppressedException != null) { + currentException.addSuppressed(suppressedException); + } + return currentException; + } + + private static HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { + switch(method.toUpperCase(Locale.ROOT)) { + case HttpDeleteWithEntity.METHOD_NAME: + HttpDeleteWithEntity httpDeleteWithEntity = new HttpDeleteWithEntity(uri); + addRequestBody(httpDeleteWithEntity, entity); + return httpDeleteWithEntity; + case HttpGetWithEntity.METHOD_NAME: + HttpGetWithEntity httpGetWithEntity = new HttpGetWithEntity(uri); + addRequestBody(httpGetWithEntity, entity); + return httpGetWithEntity; + case HttpHead.METHOD_NAME: + if (entity != null) { + throw new UnsupportedOperationException("HEAD with body is not supported"); + } + return new HttpHead(uri); + case HttpPost.METHOD_NAME: + HttpPost httpPost = new HttpPost(uri); + addRequestBody(httpPost, entity); + return httpPost; + case HttpPut.METHOD_NAME: + HttpPut httpPut = new HttpPut(uri); + addRequestBody(httpPut, entity); + return httpPut; + default: + throw new UnsupportedOperationException("http method not supported: " + method); + } + } + + private static void addRequestBody(HttpEntityEnclosingRequestBase httpRequest, HttpEntity entity) { + if (entity != null) { + httpRequest.setEntity(entity); + } + } + + private static URI buildUri(String path, Map params) { + try { + URIBuilder uriBuilder = new URIBuilder(path); + for (Map.Entry param : params.entrySet()) { + uriBuilder.addParameter(param.getKey(), param.getValue().toString()); + } + return uriBuilder.build(); + } catch(URISyntaxException e) { + throw new IllegalArgumentException(e.getMessage(), e); + } } @Override public void close() throws IOException { - transport.close(); + connectionPool.close(); + client.close(); } } diff --git a/client/src/main/java/org/elasticsearch/client/Transport.java b/client/src/main/java/org/elasticsearch/client/Transport.java deleted file mode 100644 index b0f843ec19bd5..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/Transport.java +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpEntity; -import org.apache.http.StatusLine; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpRequestBase; -import org.apache.http.client.utils.URIBuilder; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.util.EntityUtils; - -import java.io.Closeable; -import java.io.IOException; -import java.net.URI; -import java.net.URISyntaxException; -import java.util.Iterator; -import java.util.Locale; -import java.util.Map; -import java.util.Objects; -import java.util.concurrent.TimeUnit; -import java.util.stream.Stream; - -final class Transport implements Closeable { - - private static final Log logger = LogFactory.getLog(Transport.class); - - private final CloseableHttpClient client; - private final ConnectionPool connectionPool; - private final long maxRetryTimeout; - - Transport(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { - Objects.requireNonNull(client, "client cannot be null"); - Objects.requireNonNull(connectionPool, "connectionPool cannot be null"); - if (maxRetryTimeout <= 0) { - throw new IllegalArgumentException("maxRetryTimeout must be greater than 0"); - } - this.client = client; - this.connectionPool = connectionPool; - this.maxRetryTimeout = maxRetryTimeout; - } - - ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity) throws IOException { - URI uri = buildUri(endpoint, params); - HttpRequestBase request = createHttpRequest(method, uri, entity); - Iterator connectionIterator = connectionPool.nextConnection().iterator(); - if (connectionIterator.hasNext() == false) { - Connection connection = connectionPool.lastResortConnection(); - logger.info("no healthy nodes available, trying " + connection.getHost()); - return performRequest(request, Stream.of(connection).iterator()); - } - return performRequest(request, connectionIterator); - } - - private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator connectionIterator) throws IOException { - //we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt - long retryTimeout = Math.round(this.maxRetryTimeout / (float)100 * 98); - IOException lastSeenException = null; - long startTime = System.nanoTime(); - - while (connectionIterator.hasNext()) { - Connection connection = connectionIterator.next(); - - if (lastSeenException != null) { - long timeElapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); - long timeout = retryTimeout - timeElapsed; - if (timeout <= 0) { - IOException retryTimeoutException = new IOException( - "request retries exceeded max retry timeout [" + retryTimeout + "]"); - retryTimeoutException.addSuppressed(lastSeenException); - throw retryTimeoutException; - } - } - - try { - connectionPool.beforeAttempt(connection); - } catch(IOException e) { - lastSeenException = addSuppressedException(lastSeenException, e); - continue; - } - - try { - ElasticsearchResponse response = performRequest(request, connection); - connectionPool.onSuccess(connection); - return response; - } catch(ElasticsearchResponseException e) { - if (e.isRecoverable()) { - connectionPool.onFailure(connection); - lastSeenException = addSuppressedException(lastSeenException, e); - } else { - //don't retry and call onSuccess as the error should be a request problem - connectionPool.onSuccess(connection); - throw e; - } - } catch(IOException e) { - connectionPool.onFailure(connection); - lastSeenException = addSuppressedException(lastSeenException, e); - } - } - assert lastSeenException != null; - throw lastSeenException; - } - - private ElasticsearchResponse performRequest(HttpRequestBase request, Connection connection) throws IOException { - CloseableHttpResponse response; - try { - response = client.execute(connection.getHost(), request); - } catch(IOException e) { - RequestLogger.log(logger, "request failed", request.getRequestLine(), connection.getHost(), e); - throw e; - } finally { - request.reset(); - } - StatusLine statusLine = response.getStatusLine(); - //TODO make ignore status code configurable. rest-spec and tests support that parameter. - if (statusLine.getStatusCode() < 300 || - request.getMethod().equals(HttpHead.METHOD_NAME) && statusLine.getStatusCode() == 404) { - RequestLogger.log(logger, "request succeeded", request.getRequestLine(), connection.getHost(), response.getStatusLine()); - return new ElasticsearchResponse(request.getRequestLine(), connection.getHost(), response); - } else { - EntityUtils.consume(response.getEntity()); - RequestLogger.log(logger, "request failed", request.getRequestLine(), connection.getHost(), response.getStatusLine()); - throw new ElasticsearchResponseException(request.getRequestLine(), connection.getHost(), statusLine); - } - } - - private static IOException addSuppressedException(IOException suppressedException, IOException currentException) { - if (suppressedException != null) { - currentException.addSuppressed(suppressedException); - } - return currentException; - } - - private static HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { - switch(method.toUpperCase(Locale.ROOT)) { - case HttpDeleteWithEntity.METHOD_NAME: - HttpDeleteWithEntity httpDeleteWithEntity = new HttpDeleteWithEntity(uri); - addRequestBody(httpDeleteWithEntity, entity); - return httpDeleteWithEntity; - case HttpGetWithEntity.METHOD_NAME: - HttpGetWithEntity httpGetWithEntity = new HttpGetWithEntity(uri); - addRequestBody(httpGetWithEntity, entity); - return httpGetWithEntity; - case HttpHead.METHOD_NAME: - if (entity != null) { - throw new UnsupportedOperationException("HEAD with body is not supported"); - } - return new HttpHead(uri); - case HttpPost.METHOD_NAME: - HttpPost httpPost = new HttpPost(uri); - addRequestBody(httpPost, entity); - return httpPost; - case HttpPut.METHOD_NAME: - HttpPut httpPut = new HttpPut(uri); - addRequestBody(httpPut, entity); - return httpPut; - default: - throw new UnsupportedOperationException("http method not supported: " + method); - } - } - - private static void addRequestBody(HttpEntityEnclosingRequestBase httpRequest, HttpEntity entity) { - if (entity != null) { - httpRequest.setEntity(entity); - } - } - - private static URI buildUri(String path, Map params) { - try { - URIBuilder uriBuilder = new URIBuilder(path); - for (Map.Entry param : params.entrySet()) { - uriBuilder.addParameter(param.getKey(), param.getValue().toString()); - } - return uriBuilder.build(); - } catch(URISyntaxException e) { - throw new IllegalArgumentException(e.getMessage(), e); - } - } - - @Override - public void close() throws IOException { - connectionPool.close(); - client.close(); - } -} diff --git a/client/src/test/java/org/elasticsearch/client/TransportTests.java b/client/src/test/java/org/elasticsearch/client/RestClientTests.java similarity index 80% rename from client/src/test/java/org/elasticsearch/client/TransportTests.java rename to client/src/test/java/org/elasticsearch/client/RestClientTests.java index 1efafd438b0d5..52e1883159124 100644 --- a/client/src/test/java/org/elasticsearch/client/TransportTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -28,13 +28,13 @@ import java.util.logging.LogManager; import java.util.stream.Stream; -public class TransportTests extends LuceneTestCase { +public class RestClientTests extends LuceneTestCase { static { LogManager.getLogManager().reset(); } - public void testConstructor() { + public void testConstructor() throws IOException { CloseableHttpClient httpClient = HttpClientBuilder.create().build(); ConnectionPool connectionPool = new ConnectionPool() { @Override @@ -69,27 +69,28 @@ public void close() throws IOException { }; try { - new Transport(null, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + new RestClient(null, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); fail("transport creation should have failed"); } catch(NullPointerException e) { assertEquals(e.getMessage(), "client cannot be null"); } try { - new Transport(httpClient, null, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + new RestClient(httpClient, null, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); fail("transport creation should have failed"); } catch(NullPointerException e) { assertEquals(e.getMessage(), "connectionPool cannot be null"); } try { - new Transport(httpClient, connectionPool, RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + new RestClient(httpClient, connectionPool, RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("transport creation should have failed"); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "maxRetryTimeout must be greater than 0"); } - Transport transport = new Transport(httpClient, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); - assertNotNull(transport); + try(RestClient client = new RestClient(httpClient, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE))) { + assertNotNull(client); + } } } From 9ffdea9515ea8387e5fafc50ab57d753c7e7ebc2 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 6 May 2016 10:30:44 +0200 Subject: [PATCH 012/103] remove Scheme enum --- client/build.gradle | 1 + .../client/sniff/SniffingConnectionPool.java | 17 +++----- .../client/sniff/SnifferTests.java | 9 ++--- .../sniff/SniffingConnectionPoolTests.java | 39 ++++++++++++++----- 4 files changed, 40 insertions(+), 26 deletions(-) diff --git a/client/build.gradle b/client/build.gradle index 7a9377d0acb00..e19e860c35690 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -30,6 +30,7 @@ dependencies { //compile "org.apache.httpcomponents:httpasyncclient:4.1.1" compile "commons-codec:commons-codec:1.9" compile "commons-logging:commons-logging:1.2" + //jackson is only needed in the sniff package compile "com.fasterxml.jackson.core:jackson-core:2.7.3" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" diff --git a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java index 9bce0b43f0b4b..399bfd8e39b82 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java @@ -30,7 +30,6 @@ import java.io.IOException; import java.util.Iterator; import java.util.List; -import java.util.Locale; import java.util.Objects; import java.util.concurrent.Executors; import java.util.concurrent.ScheduledExecutorService; @@ -51,7 +50,7 @@ public class SniffingConnectionPool extends AbstractStaticConnectionPool { //TODO do we still need the sniff request timeout? or should we just use a low connect timeout? public SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sniffAfterFailureDelay, - CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, Scheme scheme, + CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, String scheme, HttpHost... hosts) { if (sniffInterval <= 0) { throw new IllegalArgumentException("sniffInterval must be greater than 0"); @@ -60,11 +59,14 @@ public SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sni throw new IllegalArgumentException("sniffAfterFailureDelay must be greater than 0"); } Objects.requireNonNull(scheme, "scheme cannot be null"); + if (scheme.equals("http") == false && scheme.equals("https") == false) { + throw new IllegalArgumentException("scheme must be either http or https"); + } if (hosts == null || hosts.length == 0) { throw new IllegalArgumentException("no hosts provided"); } this.sniffOnFailure = sniffOnFailure; - this.sniffer = new Sniffer(client, sniffRequestConfig, sniffRequestTimeout, scheme.toString()); + this.sniffer = new Sniffer(client, sniffRequestConfig, sniffRequestTimeout, scheme); this.connections = createConnections(hosts); this.snifferTask = new SnifferTask(sniffInterval, sniffAfterFailureDelay); } @@ -93,15 +95,6 @@ public void close() throws IOException { snifferTask.shutdown(); } - public enum Scheme { - HTTP, HTTPS; - - @Override - public String toString() { - return name().toLowerCase(Locale.ROOT); - } - } - private class SnifferTask implements Runnable { private final int sniffInterval; private final int sniffAfterFailureDelay; diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index 28a9697ac37d6..4ef17579856e2 100644 --- a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -34,8 +34,6 @@ import org.apache.http.impl.client.HttpClientBuilder; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ElasticsearchResponseException; -import org.elasticsearch.client.sniff.Sniffer; -import org.elasticsearch.client.sniff.SniffingConnectionPool; import org.junit.After; import org.junit.Before; @@ -46,6 +44,7 @@ import java.net.URLDecoder; import java.nio.charset.StandardCharsets; import java.util.ArrayList; +import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -66,14 +65,14 @@ public class SnifferTests extends LuceneTestCase { } private int sniffRequestTimeout; - private SniffingConnectionPool.Scheme scheme; + private String scheme; private SniffResponse sniffResponse; private MockWebServer server; @Before public void startMockWebServer() throws IOException { this.sniffRequestTimeout = RandomInts.randomIntBetween(random(), 1000, 10000); - this.scheme = RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()); + this.scheme = RandomPicks.randomFrom(random(), Arrays.asList("http", "https")); if (rarely()) { this.sniffResponse = SniffResponse.buildFailure(); } else { @@ -139,7 +138,7 @@ public MockResponse dispatch(RecordedRequest request) throws InterruptedExceptio return server; } - private static SniffResponse buildSniffResponse(SniffingConnectionPool.Scheme scheme) throws IOException { + private static SniffResponse buildSniffResponse(String scheme) throws IOException { int numNodes = RandomInts.randomIntBetween(random(), 1, 5); List hosts = new ArrayList<>(numNodes); JsonFactory jsonFactory = new JsonFactory(); diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java index 06e6b43236bea..4e54f8074dba8 100644 --- a/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java +++ b/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java @@ -37,6 +37,7 @@ public class SniffingConnectionPoolTests extends LuceneTestCase { public void testConstructor() throws Exception { CloseableHttpClient httpClient = HttpClientBuilder.create().build(); + String[] schemes = new String[]{"http", "https"}; int numNodes = RandomInts.randomIntBetween(random(), 1, 5); HttpHost[] hosts = new HttpHost[numNodes]; for (int i = 0; i < numNodes; i++) { @@ -47,7 +48,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { + RandomPicks.randomFrom(random(), schemes), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { @@ -58,7 +59,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { + RandomPicks.randomFrom(random(), schemes), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); @@ -68,7 +69,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), null, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { + RandomPicks.randomFrom(random(), schemes), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { assertEquals(e.getMessage(), "client cannot be null"); @@ -78,7 +79,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, null, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { + RandomPicks.randomFrom(random(), schemes), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { assertEquals(e.getMessage(), "sniffRequestConfig cannot be null"); @@ -88,17 +89,37 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { + RandomPicks.randomFrom(random(), schemes), hosts)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); } + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), + null, hosts)) { + fail("pool creation should have failed " + connectionPool); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "scheme cannot be null"); + } + + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), + RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, + RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), + "whatever", hosts)) { + fail("pool creation should have failed " + connectionPool); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "scheme must be either http or https"); + } + try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), (HttpHost[])null)) { + RandomPicks.randomFrom(random(), schemes), (HttpHost[])null)) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "no hosts provided"); @@ -108,7 +129,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), (HttpHost) null)) { + RandomPicks.randomFrom(random(), schemes), (HttpHost) null)) { fail("pool creation should have failed " + connectionPool); } catch(NullPointerException e) { assertEquals(e.getMessage(), "host cannot be null"); @@ -118,7 +139,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()))) { + RandomPicks.randomFrom(random(), schemes))) { fail("pool creation should have failed " + connectionPool); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "no hosts provided"); @@ -128,7 +149,7 @@ public void testConstructor() throws Exception { RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), SniffingConnectionPool.Scheme.values()), hosts)) { + RandomPicks.randomFrom(random(), schemes), hosts)) { assertNotNull(sniffingConnectionPool); } } From d7c41764f2072a4d9ec3cbbdc3b0d5a2b57ed30b Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 6 May 2016 11:04:26 +0200 Subject: [PATCH 013/103] add some javadocs to connection pool classes --- .../client/AbstractStaticConnectionPool.java | 20 +++++++++++++------ .../client/StaticConnectionPool.java | 3 +++ .../elasticsearch/client/sniff/Sniffer.java | 4 ++-- .../client/sniff/SniffingConnectionPool.java | 4 ++++ 4 files changed, 23 insertions(+), 8 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java index 192c1b0f35a14..3fe4ead4bbe89 100644 --- a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java @@ -29,19 +29,17 @@ import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; -import java.util.function.Predicate; import java.util.stream.Stream; /** - * Base static connection pool implementation that deals with mutable connections. Marks connections as dead/alive when needed. - * Provides a stream of alive connections or dead ones that should be retried for each {@link #nextConnection()} call, which - * allows to filter connections through a customizable {@link Predicate}, called connection selector. + * Base static connection pool implementation that marks connections as dead/alive when needed. + * Provides a stream of alive connections or dead ones that should be retried for each {@link #nextConnection()} call. * In case the returned stream is empty a last resort dead connection should be retrieved by calling {@link #lastResortConnection()} - * and resurrected so that a single request attempt can be performed. + * and resurrected so that a last resort request attempt can be performed. * The {@link #onSuccess(Connection)} method marks the connection provided as an argument alive. * The {@link #onFailure(Connection)} method marks the connection provided as an argument dead. * This base implementation doesn't define the list implementation that stores connections, so that concurrency can be - * handled in the subclasses depending on the usecase (e.g. defining the list volatile when needed). + * handled in subclasses depending on the usecase (e.g. defining the list volatile or final when needed). */ public abstract class AbstractStaticConnectionPool implements ConnectionPool { @@ -49,6 +47,12 @@ public abstract class AbstractStaticConnectionPool implements ConnectionPool { private final AtomicInteger lastConnectionIndex = new AtomicInteger(0); + /** + * Allows to retrieve the concrete list of connections. Not defined directly as a member + * of this class as subclasses may need to handle concurrency if the list can change, for + * instance defining the field as volatile. On the other hand static implementations + * can just make the list final instead. + */ protected abstract List getConnections(); @Override @@ -64,6 +68,10 @@ public final Stream nextConnection() { return sortedConnections.stream().filter(connection -> connection.isAlive() || connection.shouldBeRetried()); } + /** + * Helper method to be used by subclasses when needing to create a new list + * of connections given their corresponding hosts + */ protected List createConnections(HttpHost... hosts) { List connections = new ArrayList<>(); for (HttpHost host : hosts) { diff --git a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java index 3e25818fb3967..5d54e583abe98 100644 --- a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java @@ -33,6 +33,9 @@ import java.util.List; import java.util.Objects; +/** + * Static implementation of {@link ConnectionPool}. Its underlying list of connections is immutable. + */ public class StaticConnectionPool extends AbstractStaticConnectionPool { private static final Log logger = LogFactory.getLog(StaticConnectionPool.class); diff --git a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index a812804cbce7e..ebed97e4a4e19 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -43,9 +43,9 @@ import java.util.Objects; /** - * Calls nodes info api and returns a list of http hosts extracted from it + * Calls nodes info api and returns a list of http hosts extracted from it. */ -//TODO this could potentially a call to _cat/nodes (although it doesn't support timeout param), but how would we handle bw comp with 2.x? +//TODO This could potentially be using _cat/nodes which wouldn't require jackson as a dependency, but we'd have bw comp problems with 2.x final class Sniffer { private static final Log logger = LogFactory.getLog(Sniffer.class); diff --git a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java index 399bfd8e39b82..3fb7885fd14b8 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java @@ -39,6 +39,10 @@ import java.util.function.Predicate; import java.util.stream.Stream; +/** + * Connection pool implementation that sniffs nodes from elasticsearch at regular intervals. + * Can optionally sniff nodes on each failure as well. + */ public class SniffingConnectionPool extends AbstractStaticConnectionPool { private static final Log logger = LogFactory.getLog(SniffingConnectionPool.class); From e85ed3eb5251a2da080d2d349f8dc4ed826e30ef Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 6 May 2016 11:16:28 +0200 Subject: [PATCH 014/103] remove pinging from static connection pool, can be replaced by a low connect timeout on each request --- .../elasticsearch/client/ConnectionPool.java | 9 +--- .../org/elasticsearch/client/RestClient.java | 7 --- .../client/StaticConnectionPool.java | 48 +------------------ .../client/sniff/SniffingConnectionPool.java | 5 -- .../elasticsearch/client/RestClientTests.java | 5 -- .../client/StaticConnectionPoolTests.java | 25 ++-------- 6 files changed, 6 insertions(+), 93 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java index 7e47a8192564a..b76376abf107d 100644 --- a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java @@ -26,8 +26,7 @@ /** * Pool of connections to the different hosts that belong to an elasticsearch cluster. * It keeps track of the different hosts to communicate with and allows to retrieve a stream of connections to be used - * for each request. Exposes the needed hooks to be able to eventually mark connections dead or alive and execute - * arbitrary operations before each single request attempt. + * for each request. Exposes the needed hooks to be able to eventually mark connections dead or alive. */ public interface ConnectionPool extends Closeable { @@ -47,12 +46,6 @@ public interface ConnectionPool extends Closeable { */ Connection lastResortConnection(); - /** - * Called before each single request attempt. Allows to execute operations (e.g. ping) before each request. - * Receives as an argument the connection that is going to be used for the request. - */ - void beforeAttempt(Connection connection) throws IOException; - /** * Called after each successful request call. * Receives as an argument the connection that was used for the successful request. diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 3e59cc297e434..f2ff7c615229d 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -95,13 +95,6 @@ private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator connections; - public StaticConnectionPool(CloseableHttpClient client, boolean pingEnabled, RequestConfig pingRequestConfig, HttpHost... hosts) { - Objects.requireNonNull(client, "client cannot be null"); - Objects.requireNonNull(pingRequestConfig, "pingRequestConfig cannot be null"); + public StaticConnectionPool(HttpHost... hosts) { if (hosts == null || hosts.length == 0) { throw new IllegalArgumentException("no hosts provided"); } - this.client = client; - this.pingEnabled = pingEnabled; - this.pingRequestConfig = pingRequestConfig; this.connections = createConnections(hosts); } @@ -62,33 +43,6 @@ protected List getConnections() { return connections; } - //TODO do we still need pinging? seems like a workaround for some clients that don't support connect timeout but we have that - - @Override - public void beforeAttempt(Connection connection) throws IOException { - if (pingEnabled && connection.shouldBeRetried()) { - HttpHead httpHead = new HttpHead("/"); - httpHead.setConfig(pingRequestConfig); - StatusLine statusLine; - try(CloseableHttpResponse httpResponse = client.execute(connection.getHost(), httpHead)) { - statusLine = httpResponse.getStatusLine(); - EntityUtils.consume(httpResponse.getEntity()); - } catch(IOException e) { - RequestLogger.log(logger, "ping failed", httpHead.getRequestLine(), connection.getHost(), e); - onFailure(connection); - throw e; - } - if (statusLine.getStatusCode() >= 300) { - RequestLogger.log(logger, "ping failed", httpHead.getRequestLine(), connection.getHost(), statusLine); - onFailure(connection); - throw new ElasticsearchResponseException(httpHead.getRequestLine(), connection.getHost(), statusLine); - } else { - RequestLogger.log(logger, "ping succeeded", httpHead.getRequestLine(), connection.getHost(), statusLine); - onSuccess(connection); - } - } - } - @Override public void close() throws IOException { //no-op nothing to close diff --git a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java index 3fb7885fd14b8..934352deee0e5 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java @@ -80,11 +80,6 @@ protected List getConnections() { return this.connections; } - @Override - public void beforeAttempt(Connection connection) throws IOException { - - } - @Override public void onFailure(Connection connection) throws IOException { super.onFailure(connection); diff --git a/client/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/src/test/java/org/elasticsearch/client/RestClientTests.java index 52e1883159124..6c3850fafcf24 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -47,11 +47,6 @@ public Connection lastResortConnection() { return null; } - @Override - public void beforeAttempt(Connection connection) throws IOException { - - } - @Override public void onSuccess(Connection connection) { diff --git a/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java index 60398b71c0d68..c777666e98bdf 100644 --- a/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java +++ b/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java @@ -21,9 +21,6 @@ import com.carrotsearch.randomizedtesting.generators.RandomInts; import org.apache.http.HttpHost; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; import org.apache.lucene.util.LuceneTestCase; import java.util.logging.LogManager; @@ -35,7 +32,6 @@ public class StaticConnectionPoolTests extends LuceneTestCase { } public void testConstructor() { - CloseableHttpClient httpClient = HttpClientBuilder.create().build(); int numNodes = RandomInts.randomIntBetween(random(), 1, 5); HttpHost[] hosts = new HttpHost[numNodes]; for (int i = 0; i < numNodes; i++) { @@ -43,37 +39,24 @@ public void testConstructor() { } try { - new StaticConnectionPool(null, random().nextBoolean(), RequestConfig.DEFAULT, hosts); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "client cannot be null"); - } - - try { - new StaticConnectionPool(httpClient, random().nextBoolean(), null, hosts); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "pingRequestConfig cannot be null"); - } - - try { - new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, (HttpHost) null); + new StaticConnectionPool((HttpHost) null); } catch(NullPointerException e) { assertEquals(e.getMessage(), "host cannot be null"); } try { - new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, (HttpHost[])null); + new StaticConnectionPool((HttpHost[])null); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "no hosts provided"); } try { - new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT); + new StaticConnectionPool(); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "no hosts provided"); } - StaticConnectionPool staticConnectionPool = new StaticConnectionPool(httpClient, random().nextBoolean(), RequestConfig.DEFAULT, - hosts); + StaticConnectionPool staticConnectionPool = new StaticConnectionPool(hosts); assertNotNull(staticConnectionPool); } } From e77ab87926d5c0025882557a5d0948029d4d193e Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 6 May 2016 15:04:53 +0200 Subject: [PATCH 015/103] return the response as part of ElasticsearchResponseException --- .../ElasticsearchResponseException.java | 23 ++++++++++--------- .../org/elasticsearch/client/RestClient.java | 4 +--- .../elasticsearch/client/sniff/Sniffer.java | 4 +--- .../client/sniff/SnifferTests.java | 4 ++-- 4 files changed, 16 insertions(+), 19 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java index 2d9ceead5ed56..291b2290e46e2 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java @@ -21,7 +21,7 @@ import org.apache.http.HttpHost; import org.apache.http.RequestLine; -import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; import java.io.IOException; @@ -32,25 +32,26 @@ public class ElasticsearchResponseException extends IOException { private final HttpHost host; private final RequestLine requestLine; - private final StatusLine statusLine; + private final CloseableHttpResponse response; - public ElasticsearchResponseException(RequestLine requestLine, HttpHost host, StatusLine statusLine) { - super(buildMessage(requestLine, host, statusLine)); + public ElasticsearchResponseException(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) { + super(buildMessage(requestLine, host, response)); this.host = host; this.requestLine = requestLine; - this.statusLine = statusLine; + this.response = response; } - private static String buildMessage(RequestLine requestLine, HttpHost host, StatusLine statusLine) { - return requestLine.getMethod() + " " + host + requestLine.getUri() + ": " + statusLine.toString(); + private static String buildMessage(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) { + return requestLine.getMethod() + " " + host + requestLine.getUri() + ": " + response.getStatusLine().toString(); } /** * Returns whether the error is recoverable or not, hence whether the same request should be retried on other nodes or not */ public boolean isRecoverable() { + int statusCode = response.getStatusLine().getStatusCode(); //clients don't retry on 500 because elasticsearch still misuses it instead of 400 in some places - return statusLine.getStatusCode() >= 502 && statusLine.getStatusCode() <= 504; + return statusCode >= 502 && statusCode <= 504; } /** @@ -68,9 +69,9 @@ public RequestLine getRequestLine() { } /** - * Returns the {@link StatusLine} that was returned by elasticsearch + * Returns the {@link CloseableHttpResponse} that was returned by elasticsearch */ - public StatusLine getStatusLine() { - return statusLine; + public CloseableHttpResponse getResponse() { + return response; } } diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index f2ff7c615229d..560c285b417c1 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -30,7 +30,6 @@ import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.utils.URIBuilder; import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.util.EntityUtils; import java.io.Closeable; import java.io.IOException; @@ -134,9 +133,8 @@ private ElasticsearchResponse performRequest(HttpRequestBase request, Connection RequestLogger.log(logger, "request succeeded", request.getRequestLine(), connection.getHost(), response.getStatusLine()); return new ElasticsearchResponse(request.getRequestLine(), connection.getHost(), response); } else { - EntityUtils.consume(response.getEntity()); RequestLogger.log(logger, "request failed", request.getRequestLine(), connection.getHost(), response.getStatusLine()); - throw new ElasticsearchResponseException(request.getRequestLine(), connection.getHost(), statusLine); + throw new ElasticsearchResponseException(request.getRequestLine(), connection.getHost(), response); } } diff --git a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index ebed97e4a4e19..77eb07e00e0ef 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -31,7 +31,6 @@ import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.util.EntityUtils; import org.elasticsearch.client.ElasticsearchResponseException; import org.elasticsearch.client.RequestLogger; @@ -78,8 +77,7 @@ List sniffNodes(HttpHost host) throws IOException { StatusLine statusLine = response.getStatusLine(); if (statusLine.getStatusCode() >= 300) { RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), host, statusLine); - EntityUtils.consume(response.getEntity()); - throw new ElasticsearchResponseException(httpGet.getRequestLine(), host, statusLine); + throw new ElasticsearchResponseException(httpGet.getRequestLine(), host, response); } else { List nodes = readHosts(response.getEntity()); RequestLogger.log(logger, "sniff succeeded", httpGet.getRequestLine(), host, statusLine); diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index 4ef17579856e2..ea5e4ba22434e 100644 --- a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -107,10 +107,10 @@ public void testSniffNodes() throws IOException, URISyntaxException { "/_nodes/http?timeout=" + sniffRequestTimeout)); assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); assertThat(e.getHost(), equalTo(httpHost)); - assertThat(e.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); assertThat(e.getRequestLine().toString(), equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1")); } else { - fail("sniffNodes should have succeeded: " + e.getStatusLine()); + fail("sniffNodes should have succeeded: " + e.getResponse().getStatusLine()); } } } From 530ad227a2e7bd63e80f72ee52b2502def1c724f Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 6 May 2016 15:51:23 +0200 Subject: [PATCH 016/103] prevent unclosed response entities in ElasticsearchResponseException, eagerly read response string in case of error status code --- .../ElasticsearchResponseException.java | 36 +++++------ .../org/elasticsearch/client/RestClient.java | 62 +++++++++---------- .../elasticsearch/client/sniff/Sniffer.java | 7 ++- .../client/sniff/SnifferTests.java | 8 +-- 4 files changed, 54 insertions(+), 59 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java index 291b2290e46e2..98945b9a726c2 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java @@ -21,7 +21,7 @@ import org.apache.http.HttpHost; import org.apache.http.RequestLine; -import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.StatusLine; import java.io.IOException; @@ -32,26 +32,19 @@ public class ElasticsearchResponseException extends IOException { private final HttpHost host; private final RequestLine requestLine; - private final CloseableHttpResponse response; + private final StatusLine statusLine; + private final String responseBody; - public ElasticsearchResponseException(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) { - super(buildMessage(requestLine, host, response)); + public ElasticsearchResponseException(RequestLine requestLine, HttpHost host, StatusLine statusLine, String responseBody) { + super(buildMessage(requestLine, host, statusLine)); this.host = host; this.requestLine = requestLine; - this.response = response; + this.responseBody = responseBody; + this.statusLine = statusLine; } - private static String buildMessage(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) { - return requestLine.getMethod() + " " + host + requestLine.getUri() + ": " + response.getStatusLine().toString(); - } - - /** - * Returns whether the error is recoverable or not, hence whether the same request should be retried on other nodes or not - */ - public boolean isRecoverable() { - int statusCode = response.getStatusLine().getStatusCode(); - //clients don't retry on 500 because elasticsearch still misuses it instead of 400 in some places - return statusCode >= 502 && statusCode <= 504; + private static String buildMessage(RequestLine requestLine, HttpHost host, StatusLine statusLine) { + return requestLine.getMethod() + " " + host + requestLine.getUri() + ": " + statusLine.toString(); } /** @@ -68,10 +61,11 @@ public RequestLine getRequestLine() { return requestLine; } - /** - * Returns the {@link CloseableHttpResponse} that was returned by elasticsearch - */ - public CloseableHttpResponse getResponse() { - return response; + public StatusLine getStatusLine() { + return statusLine; + } + + public String getResponseBody() { + return responseBody; } } diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 560c285b417c1..9409a4b7ba36c 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -21,7 +21,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.http.HttpEntity; -import org.apache.http.StatusLine; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; import org.apache.http.client.methods.HttpHead; @@ -30,6 +29,7 @@ import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.utils.URIBuilder; import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.util.EntityUtils; import java.io.Closeable; import java.io.IOException; @@ -94,50 +94,46 @@ private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator sniffNodes(HttpHost host) throws IOException { StatusLine statusLine = response.getStatusLine(); if (statusLine.getStatusCode() >= 300) { RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), host, statusLine); - throw new ElasticsearchResponseException(httpGet.getRequestLine(), host, response); + String responseBody = null; + if (response.getEntity() != null) { + responseBody = EntityUtils.toString(response.getEntity()); + } + throw new ElasticsearchResponseException(httpGet.getRequestLine(), host, response.getStatusLine(), responseBody); } else { List nodes = readHosts(response.getEntity()); RequestLogger.log(logger, "sniff succeeded", httpGet.getRequestLine(), host, statusLine); diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index ea5e4ba22434e..6cb7905151fce 100644 --- a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -89,7 +89,7 @@ public void stopMockWebServer() throws IOException { public void testSniffNodes() throws IOException, URISyntaxException { CloseableHttpClient client = HttpClientBuilder.create().build(); - Sniffer sniffer = new Sniffer(client, RequestConfig.DEFAULT, sniffRequestTimeout, scheme.toString()); + Sniffer sniffer = new Sniffer(client, RequestConfig.DEFAULT, sniffRequestTimeout, scheme); HttpHost httpHost = new HttpHost(server.getHostName(), server.getPort()); try { List sniffedHosts = sniffer.sniffNodes(httpHost); @@ -107,10 +107,10 @@ public void testSniffNodes() throws IOException, URISyntaxException { "/_nodes/http?timeout=" + sniffRequestTimeout)); assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); assertThat(e.getHost(), equalTo(httpHost)); - assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); + assertThat(e.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); assertThat(e.getRequestLine().toString(), equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1")); } else { - fail("sniffNodes should have succeeded: " + e.getResponse().getStatusLine()); + fail("sniffNodes should have succeeded: " + e.getStatusLine()); } } } @@ -170,7 +170,7 @@ private static SniffResponse buildSniffResponse(String scheme) throws IOExceptio if (isHttpEnabled) { String host = "host" + i; int port = RandomInts.randomIntBetween(random(), 9200, 9299); - HttpHost httpHost = new HttpHost(host, port, scheme.toString()); + HttpHost httpHost = new HttpHost(host, port, scheme); hosts.add(httpHost); generator.writeObjectFieldStart("http"); if (random().nextBoolean()) { From e7fe397c39712731e54fd4b35182e45010344b87 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 6 May 2016 15:53:30 +0200 Subject: [PATCH 017/103] add missing parentheses --- client/src/main/java/org/elasticsearch/client/RestClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 9409a4b7ba36c..21ad8f7200bc5 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -107,7 +107,7 @@ private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator Date: Mon, 9 May 2016 11:14:25 +0200 Subject: [PATCH 018/103] add curl format trace logging for requests and responses --- .../elasticsearch/client/RequestLogger.java | 94 +++++++++++++-- .../org/elasticsearch/client/RestClient.java | 6 +- .../elasticsearch/client/sniff/Sniffer.java | 6 +- .../client/RequestLoggerTests.java | 107 ++++++++++++++++++ 4 files changed, 200 insertions(+), 13 deletions(-) create mode 100644 client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java index 99ce924c16456..ce2e82ea1e57c 100644 --- a/client/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -20,31 +20,111 @@ package org.elasticsearch.client; import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpEntityEnclosingRequest; import org.apache.http.HttpHost; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; +import org.apache.http.HttpResponse; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.entity.BufferedHttpEntity; +import org.apache.http.util.EntityUtils; +import java.io.BufferedReader; import java.io.IOException; +import java.io.InputStreamReader; /** - * Helper class that exposes static methods to unify the way requests are logged + * Helper class that exposes static methods to unify the way requests are logged. + * Includes trace logging to log complete requests and responses in curl format. */ public final class RequestLogger { + private static final Log tracer = LogFactory.getLog("tracer"); + private RequestLogger() { } /** * Logs a request that yielded a response */ - public static void log(Log logger, String message, RequestLine requestLine, HttpHost host, StatusLine statusLine) { - logger.debug(message + " [" + requestLine.getMethod() + " " + host + requestLine.getUri() + "] [" + statusLine + "]"); + public static void log(Log logger, String message, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { + logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + + "] [" + httpResponse.getStatusLine() + "]"); + + if (tracer.isTraceEnabled()) { + String requestLine; + try { + requestLine = buildTraceRequest(request, host); + } catch(IOException e) { + requestLine = ""; + tracer.trace("error while reading request for trace purposes", e); + } + String responseLine; + try { + responseLine = buildTraceResponse(httpResponse); + } catch(IOException e) { + responseLine = ""; + tracer.trace("error while reading response for trace purposes", e); + } + tracer.trace(requestLine + '\n' + responseLine); + } } /** * Logs a request that failed */ - public static void log(Log logger, String message, RequestLine requestLine, HttpHost host, IOException e) { - logger.debug(message + " [" + requestLine.getMethod() + " " + host + requestLine.getUri() + "]", e); + public static void log(Log logger, String message, HttpUriRequest request, HttpHost host, IOException e) { + logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "]", e); + if (logger.isTraceEnabled()) { + String traceRequest; + try { + traceRequest = buildTraceRequest(request, host); + } catch (IOException e1) { + tracer.trace("error while reading request for trace purposes", e); + traceRequest = ""; + } + tracer.trace(traceRequest); + } + } + + /** + * Creates curl output for given request + */ + static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IOException { + String requestLine = "curl -iX " + request.getMethod() + " '" + host + request.getRequestLine().getUri() + "'"; + if (request instanceof HttpEntityEnclosingRequest) { + HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; + if (enclosingRequest.getEntity() != null) { + requestLine += " -d '"; + HttpEntity entity = new BufferedHttpEntity(enclosingRequest.getEntity()); + enclosingRequest.setEntity(entity); + requestLine += EntityUtils.toString(entity) + "'"; + } + } + return requestLine; + } + + /** + * Creates curl output for given response + */ + static String buildTraceResponse(HttpResponse httpResponse) throws IOException { + String responseLine = "# " + httpResponse.getStatusLine().toString(); + for (Header header : httpResponse.getAllHeaders()) { + responseLine += "\n# " + header.getName() + ": " + header.getValue(); + } + responseLine += "\n#"; + HttpEntity entity = httpResponse.getEntity(); + if (entity != null) { + entity = new BufferedHttpEntity(entity); + httpResponse.setEntity(entity); + try (BufferedReader reader = new BufferedReader(new InputStreamReader(entity.getContent()))) { + String line; + while( (line = reader.readLine()) != null) { + responseLine += "\n# " + line; + } + } + } + return responseLine; } } diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 21ad8f7200bc5..945934d5ffe0f 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -98,7 +98,7 @@ private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator sniffNodes(HttpHost host) throws IOException { try (CloseableHttpResponse response = client.execute(host, httpGet)) { StatusLine statusLine = response.getStatusLine(); if (statusLine.getStatusCode() >= 300) { - RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), host, statusLine); + RequestLogger.log(logger, "sniff failed", httpGet, host, response); String responseBody = null; if (response.getEntity() != null) { responseBody = EntityUtils.toString(response.getEntity()); @@ -85,11 +85,11 @@ List sniffNodes(HttpHost host) throws IOException { throw new ElasticsearchResponseException(httpGet.getRequestLine(), host, response.getStatusLine(), responseBody); } else { List nodes = readHosts(response.getEntity()); - RequestLogger.log(logger, "sniff succeeded", httpGet.getRequestLine(), host, statusLine); + RequestLogger.log(logger, "sniff succeeded", httpGet, host, response); return nodes; } } catch(IOException e) { - RequestLogger.log(logger, "sniff failed", httpGet.getRequestLine(), host, e); + RequestLogger.log(logger, "sniff failed", httpGet, host, e); throw e; } } diff --git a/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java new file mode 100644 index 0000000000000..84bc3c032d5d2 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java @@ -0,0 +1,107 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.commons.codec.Charsets; +import org.apache.http.HttpEntityEnclosingRequest; +import org.apache.http.HttpHost; +import org.apache.http.ProtocolVersion; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHttpResponse; +import org.apache.http.message.BasicStatusLine; +import org.apache.lucene.util.LuceneTestCase; + +import java.io.IOException; +import java.net.URI; +import java.net.URISyntaxException; + +import static org.hamcrest.CoreMatchers.equalTo; + +public class RequestLoggerTests extends LuceneTestCase { + + public void testTraceRequest() throws IOException, URISyntaxException { + HttpHost host = new HttpHost("localhost", 9200, random().nextBoolean() ? "http" : "https"); + URI uri = new URI("/index/type/_api"); + + HttpRequestBase request; + int requestType = RandomInts.randomIntBetween(random(), 0, 4); + switch(requestType) { + case 0: + request = new HttpGetWithEntity(uri); + break; + case 1: + request = new HttpPost(uri); + break; + case 2: + request = new HttpPut(uri); + break; + case 3: + request = new HttpDeleteWithEntity(uri); + break; + case 4: + request = new HttpHead(uri); + break; + default: + throw new UnsupportedOperationException(); + } + + String expected = "curl -iX " + request.getMethod() + " '" + host + uri + "'"; + + if (request instanceof HttpEntityEnclosingRequest && random().nextBoolean()) { + HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; + String requestBody = "{ \"field\": \"value\" }"; + enclosingRequest.setEntity(new StringEntity(requestBody, Charsets.UTF_8)); + expected += " -d '" + requestBody + "'"; + } + + String traceRequest = RequestLogger.buildTraceRequest(request, host); + assertThat(traceRequest, equalTo(expected)); + } + + public void testTraceResponse() throws IOException { + ProtocolVersion protocolVersion = new ProtocolVersion("HTTP", 1, 1); + int statusCode = RandomInts.randomIntBetween(random(), 200, 599); + String reasonPhrase = "REASON"; + BasicStatusLine statusLine = new BasicStatusLine(protocolVersion, statusCode, reasonPhrase); + String expected = "# " + statusLine.toString(); + BasicHttpResponse httpResponse = new BasicHttpResponse(statusLine); + int numHeaders = RandomInts.randomIntBetween(random(), 0, 3); + for (int i = 0; i < numHeaders; i++) { + httpResponse.setHeader("header" + i, "value"); + expected += "\n# header" + i + ": value"; + } + expected += "\n#"; + if (random().nextBoolean()) { + String responseBody = "{\n \"field\": \"value\"\n}"; + httpResponse.setEntity(new StringEntity(responseBody, Charsets.UTF_8)); + expected += "\n# {"; + expected += "\n# \"field\": \"value\""; + expected += "\n# }"; + } + + String traceResponse = RequestLogger.buildTraceResponse(httpResponse); + assertThat(traceResponse, equalTo(expected)); + } +} From e040d2fc779fcf3ed12cdcb29f92e5a3e5acc0f3 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 9 May 2016 12:28:47 +0200 Subject: [PATCH 019/103] remove ConnectionPool interface --- .../client/AbstractStaticConnectionPool.java | 103 ------------------ .../elasticsearch/client/ConnectionPool.java | 76 +++++++++++-- .../client/StaticConnectionPool.java | 2 +- .../client/sniff/SniffingConnectionPool.java | 4 +- .../elasticsearch/client/RestClientTests.java | 12 +- 5 files changed, 76 insertions(+), 121 deletions(-) delete mode 100644 client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java diff --git a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java deleted file mode 100644 index 3fe4ead4bbe89..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/AbstractStaticConnectionPool.java +++ /dev/null @@ -1,103 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; - -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Stream; - -/** - * Base static connection pool implementation that marks connections as dead/alive when needed. - * Provides a stream of alive connections or dead ones that should be retried for each {@link #nextConnection()} call. - * In case the returned stream is empty a last resort dead connection should be retrieved by calling {@link #lastResortConnection()} - * and resurrected so that a last resort request attempt can be performed. - * The {@link #onSuccess(Connection)} method marks the connection provided as an argument alive. - * The {@link #onFailure(Connection)} method marks the connection provided as an argument dead. - * This base implementation doesn't define the list implementation that stores connections, so that concurrency can be - * handled in subclasses depending on the usecase (e.g. defining the list volatile or final when needed). - */ -public abstract class AbstractStaticConnectionPool implements ConnectionPool { - - private static final Log logger = LogFactory.getLog(AbstractStaticConnectionPool.class); - - private final AtomicInteger lastConnectionIndex = new AtomicInteger(0); - - /** - * Allows to retrieve the concrete list of connections. Not defined directly as a member - * of this class as subclasses may need to handle concurrency if the list can change, for - * instance defining the field as volatile. On the other hand static implementations - * can just make the list final instead. - */ - protected abstract List getConnections(); - - @Override - public final Stream nextConnection() { - List connections = getConnections(); - if (connections.isEmpty()) { - throw new IllegalStateException("no connections available in the connection pool"); - } - - List sortedConnections = new ArrayList<>(connections); - //TODO is it possible to make this O(1)? (rotate is O(n)) - Collections.rotate(sortedConnections, sortedConnections.size() - lastConnectionIndex.getAndIncrement()); - return sortedConnections.stream().filter(connection -> connection.isAlive() || connection.shouldBeRetried()); - } - - /** - * Helper method to be used by subclasses when needing to create a new list - * of connections given their corresponding hosts - */ - protected List createConnections(HttpHost... hosts) { - List connections = new ArrayList<>(); - for (HttpHost host : hosts) { - Objects.requireNonNull(host, "host cannot be null"); - connections.add(new Connection(host)); - } - return Collections.unmodifiableList(connections); - } - - @Override - public Connection lastResortConnection() { - Connection Connection = getConnections().stream() - .sorted((o1, o2) -> Long.compare(o1.getDeadUntil(), o2.getDeadUntil())).findFirst().get(); - Connection.markResurrected(); - return Connection; - } - - @Override - public void onSuccess(Connection connection) { - connection.markAlive(); - logger.trace("marked connection alive for " + connection.getHost()); - } - - @Override - public void onFailure(Connection connection) throws IOException { - connection.markDead(); - logger.debug("marked connection dead for " + connection.getHost()); - } -} diff --git a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java index b76376abf107d..b0be3a27075db 100644 --- a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java @@ -19,16 +19,44 @@ package org.elasticsearch.client; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpHost; + import java.io.Closeable; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicInteger; import java.util.stream.Stream; /** * Pool of connections to the different hosts that belong to an elasticsearch cluster. * It keeps track of the different hosts to communicate with and allows to retrieve a stream of connections to be used - * for each request. Exposes the needed hooks to be able to eventually mark connections dead or alive. + * for each request. Marks connections as dead/alive when needed. + * Provides a stream of alive connections or dead ones that should be retried for each {@link #nextConnection()} call. + * In case the returned stream is empty a last resort dead connection should be retrieved by calling {@link #lastResortConnection()} + * and resurrected so that a last resort request attempt can be performed. + * The {@link #onSuccess(Connection)} method marks the connection provided as an argument alive. + * The {@link #onFailure(Connection)} method marks the connection provided as an argument dead. + * This base implementation doesn't define the list implementation that stores connections, so that concurrency can be + * handled in subclasses depending on the usecase (e.g. defining the list volatile or final when needed). */ -public interface ConnectionPool extends Closeable { +public abstract class ConnectionPool implements Closeable { + + private static final Log logger = LogFactory.getLog(ConnectionPool.class); + + private final AtomicInteger lastConnectionIndex = new AtomicInteger(0); + + /** + * Allows to retrieve the concrete list of connections. Not defined directly as a member + * of this class as subclasses may need to handle concurrency if the list can change, for + * instance defining the field as volatile. On the other hand static implementations + * can just make the list final instead. + */ + protected abstract List getConnections(); /** * Returns a stream of connections that should be used for a request call. @@ -38,23 +66,57 @@ public interface ConnectionPool extends Closeable { * It may happen that the stream is empty, in which case it means that there aren't healthy connections to use. * Then {@link #lastResortConnection()} should be called to retrieve a non healthy connection and try it. */ - Stream nextConnection(); + public final Stream nextConnection() { + List connections = getConnections(); + if (connections.isEmpty()) { + throw new IllegalStateException("no connections available in the connection pool"); + } + + List sortedConnections = new ArrayList<>(connections); + //TODO is it possible to make this O(1)? (rotate is O(n)) + Collections.rotate(sortedConnections, sortedConnections.size() - lastConnectionIndex.getAndIncrement()); + return sortedConnections.stream().filter(connection -> connection.isAlive() || connection.shouldBeRetried()); + } + + /** + * Helper method to be used by subclasses when needing to create a new list + * of connections given their corresponding hosts + */ + protected final List createConnections(HttpHost... hosts) { + List connections = new ArrayList<>(); + for (HttpHost host : hosts) { + Objects.requireNonNull(host, "host cannot be null"); + connections.add(new Connection(host)); + } + return Collections.unmodifiableList(connections); + } /** * Returns a connection that is not necessarily healthy, but can be used for a request attempt. To be called as last resort - * only in case {@link #nextConnection()} returns an empty stream + * only in case {@link #nextConnection()} returns an empty stream. */ - Connection lastResortConnection(); + public final Connection lastResortConnection() { + Connection Connection = getConnections().stream() + .sorted((o1, o2) -> Long.compare(o1.getDeadUntil(), o2.getDeadUntil())).findFirst().get(); + Connection.markResurrected(); + return Connection; + } /** * Called after each successful request call. * Receives as an argument the connection that was used for the successful request. */ - void onSuccess(Connection connection); + public void onSuccess(Connection connection) { + connection.markAlive(); + logger.trace("marked connection alive for " + connection.getHost()); + } /** * Called after each failed attempt. * Receives as an argument the connection that was used for the failed attempt. */ - void onFailure(Connection connection) throws IOException; + public void onFailure(Connection connection) throws IOException { + connection.markDead(); + logger.debug("marked connection dead for " + connection.getHost()); + } } diff --git a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java index 289f14ce6efe0..4ec0bcb3c3924 100644 --- a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java @@ -27,7 +27,7 @@ /** * Static implementation of {@link ConnectionPool}. Its underlying list of connections is immutable. */ -public class StaticConnectionPool extends AbstractStaticConnectionPool { +public class StaticConnectionPool extends ConnectionPool { private final List connections; diff --git a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java index 934352deee0e5..a2973638ad4d2 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java @@ -24,7 +24,7 @@ import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.impl.client.CloseableHttpClient; -import org.elasticsearch.client.AbstractStaticConnectionPool; +import org.elasticsearch.client.ConnectionPool; import org.elasticsearch.client.Connection; import java.io.IOException; @@ -43,7 +43,7 @@ * Connection pool implementation that sniffs nodes from elasticsearch at regular intervals. * Can optionally sniff nodes on each failure as well. */ -public class SniffingConnectionPool extends AbstractStaticConnectionPool { +public class SniffingConnectionPool extends ConnectionPool { private static final Log logger = LogFactory.getLog(SniffingConnectionPool.class); diff --git a/client/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/src/test/java/org/elasticsearch/client/RestClientTests.java index 6c3850fafcf24..0d9930d9c9f67 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -25,8 +25,9 @@ import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; +import java.util.Collections; +import java.util.List; import java.util.logging.LogManager; -import java.util.stream.Stream; public class RestClientTests extends LuceneTestCase { @@ -38,13 +39,8 @@ public void testConstructor() throws IOException { CloseableHttpClient httpClient = HttpClientBuilder.create().build(); ConnectionPool connectionPool = new ConnectionPool() { @Override - public Stream nextConnection() { - return null; - } - - @Override - public Connection lastResortConnection() { - return null; + protected List getConnections() { + return Collections.emptyList(); } @Override From 9569ebc262a20f629a9953f3da2724f7bc0c3841 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 9 May 2016 16:37:39 +0200 Subject: [PATCH 020/103] add builders for simple creation of RestClient and SniffingConnectionPool instances We have quite some constructor parameters and some defaults should be applied, builders help simplifying creation of objects for users. --- .../org/elasticsearch/client/RestClient.java | 109 +++++++++++- .../elasticsearch/client/sniff/Sniffer.java | 7 - .../client/sniff/SniffingConnectionPool.java | 152 +++++++++++++++-- .../client/RestClientBuilderTests.java | 102 ++++++++++++ .../elasticsearch/client/RestClientTests.java | 87 ---------- .../SniffingConnectionPoolBuilderTests.java | 138 ++++++++++++++++ .../sniff/SniffingConnectionPoolTests.java | 156 ------------------ 7 files changed, 476 insertions(+), 275 deletions(-) create mode 100644 client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java delete mode 100644 client/src/test/java/org/elasticsearch/client/RestClientTests.java create mode 100644 client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolBuilderTests.java delete mode 100644 client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 945934d5ffe0f..7fdc8e57e3a99 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -21,6 +21,8 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; import org.apache.http.client.methods.HttpHead; @@ -29,6 +31,8 @@ import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.utils.URIBuilder; import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.util.EntityUtils; import java.io.Closeable; @@ -38,7 +42,6 @@ import java.util.Iterator; import java.util.Locale; import java.util.Map; -import java.util.Objects; import java.util.concurrent.TimeUnit; import java.util.stream.Stream; @@ -50,12 +53,7 @@ public final class RestClient implements Closeable { private final ConnectionPool connectionPool; private final long maxRetryTimeout; - public RestClient(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { - Objects.requireNonNull(client, "client cannot be null"); - Objects.requireNonNull(connectionPool, "connectionPool cannot be null"); - if (maxRetryTimeout <= 0) { - throw new IllegalArgumentException("maxRetryTimeout must be greater than 0"); - } + private RestClient(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { this.client = client; this.connectionPool = connectionPool; this.maxRetryTimeout = maxRetryTimeout; @@ -192,4 +190,101 @@ public void close() throws IOException { connectionPool.close(); client.close(); } + + /** + * Returns a new {@link Builder} to help with {@link RestClient} creation. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Rest client builder. Helps creating a new {@link RestClient}. + */ + public static final class Builder { + private static final int DEFAULT_MAX_RETRY_TIMEOUT = 10000; + + private ConnectionPool connectionPool; + private CloseableHttpClient httpClient; + private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT; + private HttpHost[] hosts; + + private Builder() { + + } + + /** + * Sets the connection pool. {@link StaticConnectionPool} will be used if not specified. + * @see ConnectionPool + */ + public Builder setConnectionPool(ConnectionPool connectionPool) { + this.connectionPool = connectionPool; + return this; + } + + /** + * Sets the http client. A new default one will be created if not specified, by calling {@link #createDefaultHttpClient()}. + * @see CloseableHttpClient + */ + public Builder setHttpClient(CloseableHttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /** + * Sets the maximum timeout to honour in case of multiple retries of the same request. + * {@link #DEFAULT_MAX_RETRY_TIMEOUT} if not specified. + * @throws IllegalArgumentException if maxRetryTimeout is not greater than 0 + */ + public Builder setMaxRetryTimeout(int maxRetryTimeout) { + if (maxRetryTimeout <= 0) { + throw new IllegalArgumentException("maxRetryTimeout must be greater than 0"); + } + this.maxRetryTimeout = maxRetryTimeout; + return this; + } + + /** + * Sets the hosts that the client will send requests to. Mandatory if no connection pool is specified, + * as the provided hosts will be used to create the default static connection pool. + */ + public Builder setHosts(HttpHost... hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("no hosts provided"); + } + this.hosts = hosts; + return this; + } + + /** + * Creates a new {@link RestClient} based on the provided configuration. + */ + public RestClient build() { + if (httpClient == null) { + httpClient = createDefaultHttpClient(); + } + if (connectionPool == null) { + connectionPool = new StaticConnectionPool(hosts); + } + return new RestClient(httpClient, connectionPool, maxRetryTimeout); + } + + /** + * Creates an http client with default settings + * + * @see CloseableHttpClient + */ + public static CloseableHttpClient createDefaultHttpClient() { + PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); + //default settings may be too constraining + connectionManager.setDefaultMaxPerRoute(10); + connectionManager.setMaxTotal(30); + + //default timeouts are all infinite + RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(500).setSocketTimeout(10000) + .setConnectionRequestTimeout(500).build(); + + return HttpClientBuilder.create().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build(); + } + } } diff --git a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 7aa4e5f5f3571..09a72fedb624e 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -40,7 +40,6 @@ import java.net.URI; import java.util.ArrayList; import java.util.List; -import java.util.Objects; /** * Calls nodes info api and returns a list of http hosts extracted from it. @@ -57,12 +56,6 @@ final class Sniffer { private final JsonFactory jsonFactory; Sniffer(CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, String scheme) { - Objects.requireNonNull(client, "client cannot be null"); - Objects.requireNonNull(sniffRequestConfig, "sniffRequestConfig cannot be null"); - if (sniffRequestTimeout <=0) { - throw new IllegalArgumentException("sniffRequestTimeout must be greater than 0"); - } - Objects.requireNonNull(scheme, "scheme cannot be null"); this.client = client; this.sniffRequestConfig = sniffRequestConfig; this.sniffRequestTimeout = sniffRequestTimeout; diff --git a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java index a2973638ad4d2..525645e9598dd 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java @@ -24,8 +24,9 @@ import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; import org.apache.http.impl.client.CloseableHttpClient; -import org.elasticsearch.client.ConnectionPool; import org.elasticsearch.client.Connection; +import org.elasticsearch.client.ConnectionPool; +import org.elasticsearch.client.RestClient; import java.io.IOException; import java.util.Iterator; @@ -52,23 +53,8 @@ public class SniffingConnectionPool extends ConnectionPool { private volatile List connections; private final SnifferTask snifferTask; - //TODO do we still need the sniff request timeout? or should we just use a low connect timeout? - public SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sniffAfterFailureDelay, - CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, String scheme, - HttpHost... hosts) { - if (sniffInterval <= 0) { - throw new IllegalArgumentException("sniffInterval must be greater than 0"); - } - if (sniffAfterFailureDelay <= 0) { - throw new IllegalArgumentException("sniffAfterFailureDelay must be greater than 0"); - } - Objects.requireNonNull(scheme, "scheme cannot be null"); - if (scheme.equals("http") == false && scheme.equals("https") == false) { - throw new IllegalArgumentException("scheme must be either http or https"); - } - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("no hosts provided"); - } + private SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sniffAfterFailureDelay, CloseableHttpClient client, + RequestConfig sniffRequestConfig, int sniffRequestTimeout, String scheme, HttpHost... hosts) { this.sniffOnFailure = sniffOnFailure; this.sniffer = new Sniffer(client, sniffRequestConfig, sniffRequestTimeout, scheme); this.connections = createConnections(hosts); @@ -188,4 +174,134 @@ void shutdown() { scheduledExecutorService.shutdownNow(); } } + + /** + * Returns a new {@link Builder} to help with {@link SniffingConnectionPool} creation. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Sniffing connection pool builder. Helps creating a new {@link SniffingConnectionPool}. + */ + public static final class Builder { + private int sniffInterval = 5 * 1000 * 60; + private boolean sniffOnFailure = true; + private int sniffAfterFailureDelay = 60000; + private CloseableHttpClient httpClient; + private RequestConfig sniffRequestConfig; + private int sniffRequestTimeout = 1000; + private String scheme = "http"; + private HttpHost[] hosts; + + private Builder() { + + } + + /** + * Sets the interval between consecutive ordinary sniff executions. Will be honoured when sniffOnFailure is disabled or + * when there are no failures between consecutive sniff executions. + * @throws IllegalArgumentException if sniffInterval is not greater than 0 + */ + public Builder setSniffInterval(int sniffInterval) { + if (sniffInterval <= 0) { + throw new IllegalArgumentException("sniffInterval must be greater than 0"); + } + this.sniffInterval = sniffInterval; + return this; + } + + /** + * Enables/disables sniffing on failure. If enabled, at each failure nodes will be reloaded, and a new sniff execution will + * be scheduled after a shorter time than usual (sniffAfterFailureDelay). + */ + public Builder setSniffOnFailure(boolean sniffOnFailure) { + this.sniffOnFailure = sniffOnFailure; + return this; + } + + /** + * Sets the delay of a sniff execution scheduled after a failure. + */ + public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { + if (sniffAfterFailureDelay <= 0) { + throw new IllegalArgumentException("sniffAfterFailureDelay must be greater than 0"); + } + this.sniffAfterFailureDelay = sniffAfterFailureDelay; + return this; + } + + /** + * Sets the http client. Mandatory argument. Best practice is to use the same client used + * within {@link org.elasticsearch.client.RestClient} which can be created manually or + * through {@link RestClient.Builder#createDefaultHttpClient()}. + * @see CloseableHttpClient + */ + public Builder setHttpClient(CloseableHttpClient httpClient) { + this.httpClient = httpClient; + return this; + } + + /** + * Sets the configuration to be used for each sniff request. Useful as sniff can have + * different timeouts compared to ordinary requests. + * @see RequestConfig + */ + public Builder setSniffRequestConfig(RequestConfig sniffRequestConfig) { + this.sniffRequestConfig = sniffRequestConfig; + return this; + } + + /** + * Sets the sniff request timeout to be passed in as a query string parameter to elasticsearch. + * Allows to halt the request without any failure, as only the nodes that have responded + * within this timeout will be returned. + */ + public Builder setSniffRequestTimeout(int sniffRequestTimeout) { + if (sniffRequestTimeout <=0) { + throw new IllegalArgumentException("sniffRequestTimeout must be greater than 0"); + } + this.sniffRequestTimeout = sniffRequestTimeout; + return this; + } + + /** + * Sets the scheme to be used for sniffed nodes. This information is not returned by elasticsearch, + * default is http but should be customized if https is needed/enabled. + */ + public Builder setScheme(String scheme) { + Objects.requireNonNull(scheme, "scheme cannot be null"); + if (scheme.equals("http") == false && scheme.equals("https") == false) { + throw new IllegalArgumentException("scheme must be either http or https"); + } + this.scheme = scheme; + return this; + } + + /** + * Sets the hosts that the client will send requests to. + */ + public Builder setHosts(HttpHost... hosts) { + this.hosts = hosts; + return this; + } + + /** + * Creates the {@link SniffingConnectionPool} based on the provided configuration. + */ + public SniffingConnectionPool build() { + Objects.requireNonNull(httpClient, "httpClient cannot be null"); + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("no hosts provided"); + } + + if (sniffRequestConfig == null) { + sniffRequestConfig = RequestConfig.custom().setConnectTimeout(500).setSocketTimeout(1000) + .setConnectionRequestTimeout(500).build(); + } + return new SniffingConnectionPool(sniffInterval, sniffOnFailure, sniffAfterFailureDelay, httpClient, sniffRequestConfig, + sniffRequestTimeout, scheme, hosts); + } + } } diff --git a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java new file mode 100644 index 0000000000000..11008d52b190b --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -0,0 +1,102 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.HttpHost; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.lucene.util.LuceneTestCase; + +import java.io.IOException; +import java.util.Collections; +import java.util.List; +import java.util.logging.LogManager; + +public class RestClientBuilderTests extends LuceneTestCase { + + static { + LogManager.getLogManager().reset(); + } + + public void testBuild() throws IOException { + try { + RestClient.builder().setMaxRetryTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "maxRetryTimeout must be greater than 0"); + } + + try { + RestClient.builder().setHosts((HttpHost[])null); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no hosts provided"); + } + + try { + RestClient.builder().setHosts(); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no hosts provided"); + } + + RestClient.Builder builder = RestClient.builder(); + if (random().nextBoolean()) { + ConnectionPool connectionPool = new ConnectionPool() { + @Override + protected List getConnections() { + return Collections.emptyList(); + } + + @Override + public void onSuccess(Connection connection) { + + } + + @Override + public void onFailure(Connection connection) throws IOException { + + } + + @Override + public void close() throws IOException { + + } + }; + builder.setConnectionPool(connectionPool); + } else { + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + builder.setHosts(hosts); + } + if (random().nextBoolean()) { + builder.setHttpClient(HttpClientBuilder.create().build()); + } + if (random().nextBoolean()) { + builder.setMaxRetryTimeout(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + } + try (RestClient restClient = builder.build()) { + assertNotNull(restClient); + } + } +} diff --git a/client/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/src/test/java/org/elasticsearch/client/RestClientTests.java deleted file mode 100644 index 0d9930d9c9f67..0000000000000 --- a/client/src/test/java/org/elasticsearch/client/RestClientTests.java +++ /dev/null @@ -1,87 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import com.carrotsearch.randomizedtesting.generators.RandomInts; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.lucene.util.LuceneTestCase; - -import java.io.IOException; -import java.util.Collections; -import java.util.List; -import java.util.logging.LogManager; - -public class RestClientTests extends LuceneTestCase { - - static { - LogManager.getLogManager().reset(); - } - - public void testConstructor() throws IOException { - CloseableHttpClient httpClient = HttpClientBuilder.create().build(); - ConnectionPool connectionPool = new ConnectionPool() { - @Override - protected List getConnections() { - return Collections.emptyList(); - } - - @Override - public void onSuccess(Connection connection) { - - } - - @Override - public void onFailure(Connection connection) throws IOException { - - } - - @Override - public void close() throws IOException { - - } - }; - - try { - new RestClient(null, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); - fail("transport creation should have failed"); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "client cannot be null"); - } - - try { - new RestClient(httpClient, null, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); - fail("transport creation should have failed"); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "connectionPool cannot be null"); - } - - try { - new RestClient(httpClient, connectionPool, RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); - fail("transport creation should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "maxRetryTimeout must be greater than 0"); - } - - try(RestClient client = new RestClient(httpClient, connectionPool, RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE))) { - assertNotNull(client); - } - } -} diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolBuilderTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolBuilderTests.java new file mode 100644 index 0000000000000..b5b4eaa3ce1bf --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolBuilderTests.java @@ -0,0 +1,138 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.http.HttpHost; +import org.apache.http.client.config.RequestConfig; +import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.lucene.util.LuceneTestCase; + +import java.util.Arrays; +import java.util.logging.LogManager; + +public class SniffingConnectionPoolBuilderTests extends LuceneTestCase { + + static { + LogManager.getLogManager().reset(); + } + + public void testBuild() throws Exception { + + try { + SniffingConnectionPool.builder().setScheme(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "scheme cannot be null"); + } + + try { + SniffingConnectionPool.builder().setScheme("whatever"); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "scheme must be either http or https"); + } + + try { + SniffingConnectionPool.builder().setSniffInterval(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffInterval must be greater than 0"); + } + + try { + SniffingConnectionPool.builder().setSniffRequestTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); + } + + try { + SniffingConnectionPool.builder().setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); + } + + try { + SniffingConnectionPool.builder().build(); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "httpClient cannot be null"); + } + + try { + SniffingConnectionPool.builder().setHttpClient(HttpClientBuilder.create().build()).build(); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no hosts provided"); + } + + try { + SniffingConnectionPool.builder().setHttpClient(HttpClientBuilder.create().build()).setHosts((HttpHost[])null).build(); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no hosts provided"); + } + + try { + SniffingConnectionPool.builder().setHttpClient(HttpClientBuilder.create().build()).setHosts().build(); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no hosts provided"); + } + + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + + try (SniffingConnectionPool connectionPool = SniffingConnectionPool.builder() + .setHttpClient(HttpClientBuilder.create().build()).setHosts(hosts).build()) { + assertNotNull(connectionPool); + } + + SniffingConnectionPool.Builder builder = SniffingConnectionPool.builder() + .setHttpClient(HttpClientBuilder.create().build()).setHosts(hosts); + if (random().nextBoolean()) { + builder.setScheme(RandomPicks.randomFrom(random(), Arrays.asList("http", "https"))); + } + if (random().nextBoolean()) { + builder.setSniffInterval(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + } + if (random().nextBoolean()) { + builder.setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + } + if (random().nextBoolean()) { + builder.setSniffRequestTimeout(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + } + if (random().nextBoolean()) { + builder.setSniffOnFailure(random().nextBoolean()); + } + if (random().nextBoolean()) { + builder.setSniffRequestConfig(RequestConfig.DEFAULT); + } + try (SniffingConnectionPool connectionPool = builder.build()) { + assertNotNull(connectionPool); + } + } +} diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java deleted file mode 100644 index 4e54f8074dba8..0000000000000 --- a/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolTests.java +++ /dev/null @@ -1,156 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.sniff; - -import com.carrotsearch.randomizedtesting.generators.RandomInts; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.http.HttpHost; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.lucene.util.LuceneTestCase; - -import java.util.logging.LogManager; - -public class SniffingConnectionPoolTests extends LuceneTestCase { - - static { - LogManager.getLogManager().reset(); - } - - public void testConstructor() throws Exception { - CloseableHttpClient httpClient = HttpClientBuilder.create().build(); - String[] schemes = new String[]{"http", "https"}; - int numNodes = RandomInts.randomIntBetween(random(), 1, 5); - HttpHost[] hosts = new HttpHost[numNodes]; - for (int i = 0; i < numNodes; i++) { - hosts[i] = new HttpHost("localhost", 9200); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), schemes), hosts)) { - - fail("pool creation should have failed " + connectionPool); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffInterval must be greater than 0"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), schemes), hosts)) { - fail("pool creation should have failed " + connectionPool); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), null, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), schemes), hosts)) { - fail("pool creation should have failed " + connectionPool); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "client cannot be null"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, null, - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), schemes), hosts)) { - fail("pool creation should have failed " + connectionPool); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "sniffRequestConfig cannot be null"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), - RandomPicks.randomFrom(random(), schemes), hosts)) { - fail("pool creation should have failed " + connectionPool); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), - null, hosts)) { - fail("pool creation should have failed " + connectionPool); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "scheme cannot be null"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0), - "whatever", hosts)) { - fail("pool creation should have failed " + connectionPool); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "scheme must be either http or https"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), schemes), (HttpHost[])null)) { - fail("pool creation should have failed " + connectionPool); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), schemes), (HttpHost) null)) { - fail("pool creation should have failed " + connectionPool); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "host cannot be null"); - } - - try (SniffingConnectionPool connectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), schemes))) { - fail("pool creation should have failed " + connectionPool); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); - } - - try (SniffingConnectionPool sniffingConnectionPool = new SniffingConnectionPool( - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), random().nextBoolean(), - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), httpClient, RequestConfig.DEFAULT, - RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE), - RandomPicks.randomFrom(random(), schemes), hosts)) { - assertNotNull(sniffingConnectionPool); - } - } -} From 599dad560c3297c5bc24b9bf33284ba7f0a9bd5e Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 9 May 2016 19:45:14 +0200 Subject: [PATCH 021/103] remove streams and java 8 only api, build with source and target 1.7 --- client/build.gradle | 11 +++++-- .../elasticsearch/client/ConnectionPool.java | 30 ++++++++++++++----- .../org/elasticsearch/client/RestClient.java | 6 ++-- .../client/sniff/SniffingConnectionPool.java | 24 +++++++-------- .../client/sniff/SnifferTests.java | 4 +-- 5 files changed, 48 insertions(+), 27 deletions(-) diff --git a/client/build.gradle b/client/build.gradle index e19e860c35690..35af3d0979b03 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -17,11 +17,15 @@ * under the License. */ -import org.elasticsearch.gradle.precommit.PrecommitTasks; +import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.gradle.api.JavaVersion group = 'org.elasticsearch.client' apply plugin: 'elasticsearch.build' +targetCompatibility = JavaVersion.VERSION_1_7 +sourceCompatibility = JavaVersion.VERSION_1_7 + dependencies { // TODO once we got rid of the client in the test framework we should use a version variable here compile "org.apache.httpcomponents:httpclient:4.5.2" @@ -47,8 +51,9 @@ dependencies { testCompile "org.bouncycastle:bcprov-jdk15on:1.54" } -compileJava.options.compilerArgs << '-Xlint:-cast,-rawtypes,-try,-unchecked' -compileTestJava.options.compilerArgs << '-Xlint:-rawtypes' +//TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible +compileJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' << '-Xlint:all,-path,-serial,-options' +compileTestJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked diff --git a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java index b0be3a27075db..89b714f2eeed7 100644 --- a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java @@ -27,10 +27,11 @@ import java.io.IOException; import java.util.ArrayList; import java.util.Collections; +import java.util.Comparator; +import java.util.Iterator; import java.util.List; import java.util.Objects; import java.util.concurrent.atomic.AtomicInteger; -import java.util.stream.Stream; /** * Pool of connections to the different hosts that belong to an elasticsearch cluster. @@ -66,7 +67,7 @@ public abstract class ConnectionPool implements Closeable { * It may happen that the stream is empty, in which case it means that there aren't healthy connections to use. * Then {@link #lastResortConnection()} should be called to retrieve a non healthy connection and try it. */ - public final Stream nextConnection() { + public final Iterator nextConnection() { List connections = getConnections(); if (connections.isEmpty()) { throw new IllegalStateException("no connections available in the connection pool"); @@ -75,7 +76,14 @@ public final Stream nextConnection() { List sortedConnections = new ArrayList<>(connections); //TODO is it possible to make this O(1)? (rotate is O(n)) Collections.rotate(sortedConnections, sortedConnections.size() - lastConnectionIndex.getAndIncrement()); - return sortedConnections.stream().filter(connection -> connection.isAlive() || connection.shouldBeRetried()); + Iterator connectionIterator = sortedConnections.iterator(); + while (connectionIterator.hasNext()) { + Connection connection = connectionIterator.next(); + if (connection.isAlive() == false && connection.shouldBeRetried() == false) { + connectionIterator.remove(); + } + } + return connectionIterator; } /** @@ -96,10 +104,18 @@ protected final List createConnections(HttpHost... hosts) { * only in case {@link #nextConnection()} returns an empty stream. */ public final Connection lastResortConnection() { - Connection Connection = getConnections().stream() - .sorted((o1, o2) -> Long.compare(o1.getDeadUntil(), o2.getDeadUntil())).findFirst().get(); - Connection.markResurrected(); - return Connection; + List connections = getConnections(); + if (connections.isEmpty()) { + throw new IllegalStateException("no connections available in the connection pool"); + } + List sortedConnections = new ArrayList<>(connections); + Collections.sort(sortedConnections, new Comparator() { + @Override + public int compare(Connection o1, Connection o2) { + return Long.compare(o1.getDeadUntil(), o2.getDeadUntil()); + } + }); + return sortedConnections.get(0); } /** diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 7fdc8e57e3a99..f13be9e4e49a1 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -39,11 +39,11 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.Collections; import java.util.Iterator; import java.util.Locale; import java.util.Map; import java.util.concurrent.TimeUnit; -import java.util.stream.Stream; public final class RestClient implements Closeable { @@ -63,11 +63,11 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< throws IOException { URI uri = buildUri(endpoint, params); HttpRequestBase request = createHttpRequest(method, uri, entity); - Iterator connectionIterator = connectionPool.nextConnection().iterator(); + Iterator connectionIterator = connectionPool.nextConnection(); if (connectionIterator.hasNext() == false) { Connection connection = connectionPool.lastResortConnection(); logger.info("no healthy nodes available, trying " + connection.getHost()); - return performRequest(request, Stream.of(connection).iterator()); + return performRequest(request, Collections.singleton(connection).iterator()); } return performRequest(request, connectionIterator); } diff --git a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java index 525645e9598dd..6bc7ad84e366d 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java @@ -29,6 +29,7 @@ import org.elasticsearch.client.RestClient; import java.io.IOException; +import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Objects; @@ -37,8 +38,6 @@ import java.util.concurrent.ScheduledFuture; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicBoolean; -import java.util.function.Predicate; -import java.util.stream.Stream; /** * Connection pool implementation that sniffs nodes from elasticsearch at regular intervals. @@ -97,25 +96,25 @@ private SnifferTask(int sniffInterval, int sniffAfterFailureDelay) { @Override public void run() { - sniff(node -> true); + sniff(null); } void sniffOnFailure(HttpHost failedHost) { //sync sniff straightaway on failure failure = true; - sniff(host -> host.equals(failedHost) == false); + sniff(failedHost); } - void sniff(Predicate hostFilter) { + void sniff(HttpHost excludeHost) { if (running.compareAndSet(false, true)) { try { - Iterator connectionIterator = nextConnection().iterator(); + Iterator connectionIterator = nextConnection(); if (connectionIterator.hasNext()) { - sniff(connectionIterator, hostFilter); + sniff(connectionIterator, excludeHost); } else { Connection connection = lastResortConnection(); logger.info("no healthy nodes available, trying " + connection.getHost()); - sniff(Stream.of(connection).iterator(), hostFilter); + sniff(Collections.singleton(connection).iterator(), excludeHost); } } catch (Throwable t) { logger.error("error while sniffing nodes", t); @@ -139,15 +138,16 @@ void sniff(Predicate hostFilter) { } } - void sniff(Iterator connectionIterator, Predicate hostFilter) throws IOException { + void sniff(Iterator connectionIterator, HttpHost excludeHost) throws IOException { IOException lastSeenException = null; while (connectionIterator.hasNext()) { Connection connection = connectionIterator.next(); try { List sniffedNodes = sniffer.sniffNodes(connection.getHost()); - HttpHost[] filteredNodes = sniffedNodes.stream().filter(hostFilter).toArray(HttpHost[]::new); - logger.debug("adding " + filteredNodes.length + " nodes out of " + sniffedNodes.size() + " sniffed nodes"); - connections = createConnections(filteredNodes); + if (excludeHost != null) { + sniffedNodes.remove(excludeHost); + } + connections = createConnections(sniffedNodes.toArray(new HttpHost[sniffedNodes.size()])); onSuccess(connection); return; } catch (IOException e) { diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java index 6cb7905151fce..2fa9f7b322584 100644 --- a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java @@ -115,7 +115,7 @@ public void testSniffNodes() throws IOException, URISyntaxException { } } - private static MockWebServer buildMockWebServer(SniffResponse sniffResponse, int sniffTimeout) throws UnsupportedEncodingException { + private static MockWebServer buildMockWebServer(final SniffResponse sniffResponse, final int sniffTimeout) throws UnsupportedEncodingException { MockWebServer server = new MockWebServer(); final Dispatcher dispatcher = new Dispatcher() { @Override @@ -244,7 +244,7 @@ private static class SniffResponse { } static SniffResponse buildFailure() { - return new SniffResponse("", Collections.emptyList(), true); + return new SniffResponse("", Collections.emptyList(), true); } static SniffResponse buildResponse(String nodesInfoBody, List hosts) { From b38ef345e2e8eac35ae2727008f15ed26210ba31 Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 10 May 2016 10:50:53 +0200 Subject: [PATCH 022/103] remove streams leftover Given that we don't use streams anymore, we can check straightaway if the connection iterator is empty before returning it and resurrect a connection when needed directly in the connection pool, no lastResortConnection method required. --- .../elasticsearch/client/ConnectionPool.java | 59 ++++++++----------- .../org/elasticsearch/client/RestClient.java | 9 +-- .../client/sniff/SniffingConnectionPool.java | 10 +--- 3 files changed, 28 insertions(+), 50 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java index 89b714f2eeed7..55e9554684eb9 100644 --- a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java @@ -35,11 +35,9 @@ /** * Pool of connections to the different hosts that belong to an elasticsearch cluster. - * It keeps track of the different hosts to communicate with and allows to retrieve a stream of connections to be used + * It keeps track of the different hosts to communicate with and allows to retrieve an iterator of connections to be used * for each request. Marks connections as dead/alive when needed. - * Provides a stream of alive connections or dead ones that should be retried for each {@link #nextConnection()} call. - * In case the returned stream is empty a last resort dead connection should be retrieved by calling {@link #lastResortConnection()} - * and resurrected so that a last resort request attempt can be performed. + * Provides an iterator of connections to be used at each {@link #nextConnection()} call. * The {@link #onSuccess(Connection)} method marks the connection provided as an argument alive. * The {@link #onFailure(Connection)} method marks the connection provided as an argument dead. * This base implementation doesn't define the list implementation that stores connections, so that concurrency can be @@ -60,12 +58,13 @@ public abstract class ConnectionPool implements Closeable { protected abstract List getConnections(); /** - * Returns a stream of connections that should be used for a request call. - * Ideally, the first connection is retrieved from the stream and used successfully for the request. - * Otherwise, after each failure the next connection should be retrieved from the stream so that the request can be retried. - * The maximum total of attempts is equal to the number of connections that are available in the stream. - * It may happen that the stream is empty, in which case it means that there aren't healthy connections to use. - * Then {@link #lastResortConnection()} should be called to retrieve a non healthy connection and try it. + * Returns an iterator of connections that should be used for a request call. + * Ideally, the first connection is retrieved from the iterator and used successfully for the request. + * Otherwise, after each failure the next connection should be retrieved from the iterator so that the request can be retried. + * The maximum total of attempts is equal to the number of connections that are available in the iterator. + * The iterator returned will never be empty, rather an {@link IllegalStateException} will be thrown in that case. + * In case there are no alive connections available, or dead ones that should be retried, one dead connection + * gets resurrected and returned. */ public final Iterator nextConnection() { List connections = getConnections(); @@ -73,17 +72,30 @@ public final Iterator nextConnection() { throw new IllegalStateException("no connections available in the connection pool"); } - List sortedConnections = new ArrayList<>(connections); + List rotatedConnections = new ArrayList<>(connections); //TODO is it possible to make this O(1)? (rotate is O(n)) - Collections.rotate(sortedConnections, sortedConnections.size() - lastConnectionIndex.getAndIncrement()); - Iterator connectionIterator = sortedConnections.iterator(); + Collections.rotate(rotatedConnections, rotatedConnections.size() - lastConnectionIndex.getAndIncrement()); + Iterator connectionIterator = rotatedConnections.iterator(); while (connectionIterator.hasNext()) { Connection connection = connectionIterator.next(); if (connection.isAlive() == false && connection.shouldBeRetried() == false) { connectionIterator.remove(); } } - return connectionIterator; + if (rotatedConnections.isEmpty()) { + List sortedConnections = new ArrayList<>(connections); + Collections.sort(sortedConnections, new Comparator() { + @Override + public int compare(Connection o1, Connection o2) { + return Long.compare(o1.getDeadUntil(), o2.getDeadUntil()); + } + }); + Connection connection = sortedConnections.get(0); + connection.markResurrected(); + logger.trace("marked connection resurrected for " + connection.getHost()); + return Collections.singleton(connection).iterator(); + } + return rotatedConnections.iterator(); } /** @@ -99,25 +111,6 @@ protected final List createConnections(HttpHost... hosts) { return Collections.unmodifiableList(connections); } - /** - * Returns a connection that is not necessarily healthy, but can be used for a request attempt. To be called as last resort - * only in case {@link #nextConnection()} returns an empty stream. - */ - public final Connection lastResortConnection() { - List connections = getConnections(); - if (connections.isEmpty()) { - throw new IllegalStateException("no connections available in the connection pool"); - } - List sortedConnections = new ArrayList<>(connections); - Collections.sort(sortedConnections, new Comparator() { - @Override - public int compare(Connection o1, Connection o2) { - return Long.compare(o1.getDeadUntil(), o2.getDeadUntil()); - } - }); - return sortedConnections.get(0); - } - /** * Called after each successful request call. * Receives as an argument the connection that was used for the successful request. diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index f13be9e4e49a1..c4d6ede623e85 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -39,7 +39,6 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; -import java.util.Collections; import java.util.Iterator; import java.util.Locale; import java.util.Map; @@ -63,13 +62,7 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< throws IOException { URI uri = buildUri(endpoint, params); HttpRequestBase request = createHttpRequest(method, uri, entity); - Iterator connectionIterator = connectionPool.nextConnection(); - if (connectionIterator.hasNext() == false) { - Connection connection = connectionPool.lastResortConnection(); - logger.info("no healthy nodes available, trying " + connection.getHost()); - return performRequest(request, Collections.singleton(connection).iterator()); - } - return performRequest(request, connectionIterator); + return performRequest(request, connectionPool.nextConnection()); } private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator connectionIterator) throws IOException { diff --git a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java index 6bc7ad84e366d..6fd98666e8c9d 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java @@ -29,7 +29,6 @@ import org.elasticsearch.client.RestClient; import java.io.IOException; -import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Objects; @@ -108,14 +107,7 @@ void sniffOnFailure(HttpHost failedHost) { void sniff(HttpHost excludeHost) { if (running.compareAndSet(false, true)) { try { - Iterator connectionIterator = nextConnection(); - if (connectionIterator.hasNext()) { - sniff(connectionIterator, excludeHost); - } else { - Connection connection = lastResortConnection(); - logger.info("no healthy nodes available, trying " + connection.getHost()); - sniff(Collections.singleton(connection).iterator(), excludeHost); - } + sniff(nextConnection(), excludeHost); } catch (Throwable t) { logger.error("error while sniffing nodes", t); } finally { From cdffc3d15b10eb5b40f6a56d5d01f92982dc6fc4 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 13 May 2016 11:55:18 +0200 Subject: [PATCH 023/103] remove notion of connection pool, turn around dependency between RestClient and Sniffer RestClient exposes a setNodes method, which Sniffer can call to update its nodes. --- .../org/elasticsearch/client/Connection.java | 1 - .../elasticsearch/client/ConnectionPool.java | 131 -------- .../org/elasticsearch/client/RestClient.java | 151 ++++++--- .../client/StaticConnectionPool.java | 50 --- .../client/sniff/HostsSniffer.java | 126 ++++++++ .../elasticsearch/client/sniff/Sniffer.java | 290 +++++++++++------ .../client/sniff/SniffingConnectionPool.java | 299 ------------------ .../client/RestClientBuilderTests.java | 45 +-- .../client/StaticConnectionPoolTests.java | 62 ---- ...ifferTests.java => HostsSnifferTests.java} | 54 ++-- .../client/sniff/SnifferBuilderTests.java | 115 +++++++ .../SniffingConnectionPoolBuilderTests.java | 138 -------- 12 files changed, 590 insertions(+), 872 deletions(-) delete mode 100644 client/src/main/java/org/elasticsearch/client/ConnectionPool.java delete mode 100644 client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java create mode 100644 client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java delete mode 100644 client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java delete mode 100644 client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java rename client/src/test/java/org/elasticsearch/client/sniff/{SnifferTests.java => HostsSnifferTests.java} (84%) create mode 100644 client/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java delete mode 100644 client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolBuilderTests.java diff --git a/client/src/main/java/org/elasticsearch/client/Connection.java b/client/src/main/java/org/elasticsearch/client/Connection.java index 3d48a4eeae1f4..f17ff69e14c08 100644 --- a/client/src/main/java/org/elasticsearch/client/Connection.java +++ b/client/src/main/java/org/elasticsearch/client/Connection.java @@ -29,7 +29,6 @@ * Any change to the state of a connection should be made through the connection pool. */ public class Connection { - //TODO make these values configurable through the connection pool? private static final long DEFAULT_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(1); private static final long MAX_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); private final HttpHost host; diff --git a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java b/client/src/main/java/org/elasticsearch/client/ConnectionPool.java deleted file mode 100644 index 55e9554684eb9..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/ConnectionPool.java +++ /dev/null @@ -1,131 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; - -import java.io.Closeable; -import java.io.IOException; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.Iterator; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.atomic.AtomicInteger; - -/** - * Pool of connections to the different hosts that belong to an elasticsearch cluster. - * It keeps track of the different hosts to communicate with and allows to retrieve an iterator of connections to be used - * for each request. Marks connections as dead/alive when needed. - * Provides an iterator of connections to be used at each {@link #nextConnection()} call. - * The {@link #onSuccess(Connection)} method marks the connection provided as an argument alive. - * The {@link #onFailure(Connection)} method marks the connection provided as an argument dead. - * This base implementation doesn't define the list implementation that stores connections, so that concurrency can be - * handled in subclasses depending on the usecase (e.g. defining the list volatile or final when needed). - */ -public abstract class ConnectionPool implements Closeable { - - private static final Log logger = LogFactory.getLog(ConnectionPool.class); - - private final AtomicInteger lastConnectionIndex = new AtomicInteger(0); - - /** - * Allows to retrieve the concrete list of connections. Not defined directly as a member - * of this class as subclasses may need to handle concurrency if the list can change, for - * instance defining the field as volatile. On the other hand static implementations - * can just make the list final instead. - */ - protected abstract List getConnections(); - - /** - * Returns an iterator of connections that should be used for a request call. - * Ideally, the first connection is retrieved from the iterator and used successfully for the request. - * Otherwise, after each failure the next connection should be retrieved from the iterator so that the request can be retried. - * The maximum total of attempts is equal to the number of connections that are available in the iterator. - * The iterator returned will never be empty, rather an {@link IllegalStateException} will be thrown in that case. - * In case there are no alive connections available, or dead ones that should be retried, one dead connection - * gets resurrected and returned. - */ - public final Iterator nextConnection() { - List connections = getConnections(); - if (connections.isEmpty()) { - throw new IllegalStateException("no connections available in the connection pool"); - } - - List rotatedConnections = new ArrayList<>(connections); - //TODO is it possible to make this O(1)? (rotate is O(n)) - Collections.rotate(rotatedConnections, rotatedConnections.size() - lastConnectionIndex.getAndIncrement()); - Iterator connectionIterator = rotatedConnections.iterator(); - while (connectionIterator.hasNext()) { - Connection connection = connectionIterator.next(); - if (connection.isAlive() == false && connection.shouldBeRetried() == false) { - connectionIterator.remove(); - } - } - if (rotatedConnections.isEmpty()) { - List sortedConnections = new ArrayList<>(connections); - Collections.sort(sortedConnections, new Comparator() { - @Override - public int compare(Connection o1, Connection o2) { - return Long.compare(o1.getDeadUntil(), o2.getDeadUntil()); - } - }); - Connection connection = sortedConnections.get(0); - connection.markResurrected(); - logger.trace("marked connection resurrected for " + connection.getHost()); - return Collections.singleton(connection).iterator(); - } - return rotatedConnections.iterator(); - } - - /** - * Helper method to be used by subclasses when needing to create a new list - * of connections given their corresponding hosts - */ - protected final List createConnections(HttpHost... hosts) { - List connections = new ArrayList<>(); - for (HttpHost host : hosts) { - Objects.requireNonNull(host, "host cannot be null"); - connections.add(new Connection(host)); - } - return Collections.unmodifiableList(connections); - } - - /** - * Called after each successful request call. - * Receives as an argument the connection that was used for the successful request. - */ - public void onSuccess(Connection connection) { - connection.markAlive(); - logger.trace("marked connection alive for " + connection.getHost()); - } - - /** - * Called after each failed attempt. - * Receives as an argument the connection that was used for the failed attempt. - */ - public void onFailure(Connection connection) throws IOException { - connection.markDead(); - logger.debug("marked connection dead for " + connection.getHost()); - } -} diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index c4d6ede623e85..f578d7ce304f5 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -39,38 +39,51 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; import java.util.Iterator; +import java.util.List; import java.util.Locale; import java.util.Map; +import java.util.Objects; import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicInteger; public final class RestClient implements Closeable { private static final Log logger = LogFactory.getLog(RestClient.class); private final CloseableHttpClient client; - private final ConnectionPool connectionPool; private final long maxRetryTimeout; + private final AtomicInteger lastConnectionIndex = new AtomicInteger(0); + private volatile List connections; + private volatile FailureListener failureListener = new FailureListener(); - private RestClient(CloseableHttpClient client, ConnectionPool connectionPool, long maxRetryTimeout) { + private RestClient(CloseableHttpClient client, long maxRetryTimeout, HttpHost... hosts) { this.client = client; - this.connectionPool = connectionPool; this.maxRetryTimeout = maxRetryTimeout; + setNodes(hosts); + } + + public synchronized void setNodes(HttpHost... hosts) { + List connections = new ArrayList<>(hosts.length); + for (HttpHost host : hosts) { + Objects.requireNonNull(host, "host cannot be null"); + connections.add(new Connection(host)); + } + this.connections = Collections.unmodifiableList(connections); } public ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity) throws IOException { URI uri = buildUri(endpoint, params); HttpRequestBase request = createHttpRequest(method, uri, entity); - return performRequest(request, connectionPool.nextConnection()); - } - - private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator connectionIterator) throws IOException { //we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt long retryTimeout = Math.round(this.maxRetryTimeout / (float)100 * 98); IOException lastSeenException = null; long startTime = System.nanoTime(); - + Iterator connectionIterator = nextConnection(); while (connectionIterator.hasNext()) { Connection connection = connectionIterator.next(); @@ -83,6 +96,7 @@ private ElasticsearchResponse performRequest(HttpRequestBase request, Iterator nextConnection() { + if (this.connections.isEmpty()) { + throw new IllegalStateException("no connections available in the connection pool"); + } + + List rotatedConnections = new ArrayList<>(connections); + //TODO is it possible to make this O(1)? (rotate is O(n)) + Collections.rotate(rotatedConnections, rotatedConnections.size() - lastConnectionIndex.getAndIncrement()); + Iterator connectionIterator = rotatedConnections.iterator(); + while (connectionIterator.hasNext()) { + Connection connection = connectionIterator.next(); + if (connection.isAlive() == false && connection.shouldBeRetried() == false) { + connectionIterator.remove(); + } + } + if (rotatedConnections.isEmpty()) { + List sortedConnections = new ArrayList<>(connections); + Collections.sort(sortedConnections, new Comparator() { + @Override + public int compare(Connection o1, Connection o2) { + return Long.compare(o1.getDeadUntil(), o2.getDeadUntil()); + } + }); + Connection connection = sortedConnections.get(0); + connection.markResurrected(); + logger.trace("marked connection resurrected for " + connection.getHost()); + return Collections.singleton(connection).iterator(); + } + return rotatedConnections.iterator(); + } + + /** + * Called after each successful request call. + * Receives as an argument the connection that was used for the successful request. + */ + public void onSuccess(Connection connection) { + connection.markAlive(); + logger.trace("marked connection alive for " + connection.getHost()); + } + + /** + * Called after each failed attempt. + * Receives as an argument the connection that was used for the failed attempt. + */ + private void onFailure(Connection connection) throws IOException { + connection.markDead(); + logger.debug("marked connection dead for " + connection.getHost()); + failureListener.onFailure(connection); + } + + public synchronized void setFailureListener(FailureListener failureListener) { + this.failureListener = failureListener; + } + + @Override + public void close() throws IOException { + client.close(); + } + private static IOException addSuppressedException(IOException suppressedException, IOException currentException) { if (suppressedException != null) { currentException.addSuppressed(suppressedException); @@ -178,12 +258,6 @@ private static URI buildUri(String path, Map params) { } } - @Override - public void close() throws IOException { - connectionPool.close(); - client.close(); - } - /** * Returns a new {@link Builder} to help with {@link RestClient} creation. */ @@ -195,9 +269,11 @@ public static Builder builder() { * Rest client builder. Helps creating a new {@link RestClient}. */ public static final class Builder { - private static final int DEFAULT_MAX_RETRY_TIMEOUT = 10000; + public static final int DEFAULT_CONNECT_TIMEOUT = 500; + public static final int DEFAULT_SOCKET_TIMEOUT = 5000; + public static final int DEFAULT_MAX_RETRY_TIMEOUT = DEFAULT_SOCKET_TIMEOUT; + public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT = 500; - private ConnectionPool connectionPool; private CloseableHttpClient httpClient; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT; private HttpHost[] hosts; @@ -206,17 +282,9 @@ private Builder() { } - /** - * Sets the connection pool. {@link StaticConnectionPool} will be used if not specified. - * @see ConnectionPool - */ - public Builder setConnectionPool(ConnectionPool connectionPool) { - this.connectionPool = connectionPool; - return this; - } - /** * Sets the http client. A new default one will be created if not specified, by calling {@link #createDefaultHttpClient()}. + * * @see CloseableHttpClient */ public Builder setHttpClient(CloseableHttpClient httpClient) { @@ -227,6 +295,7 @@ public Builder setHttpClient(CloseableHttpClient httpClient) { /** * Sets the maximum timeout to honour in case of multiple retries of the same request. * {@link #DEFAULT_MAX_RETRY_TIMEOUT} if not specified. + * * @throws IllegalArgumentException if maxRetryTimeout is not greater than 0 */ public Builder setMaxRetryTimeout(int maxRetryTimeout) { @@ -256,10 +325,7 @@ public RestClient build() { if (httpClient == null) { httpClient = createDefaultHttpClient(); } - if (connectionPool == null) { - connectionPool = new StaticConnectionPool(hosts); - } - return new RestClient(httpClient, connectionPool, maxRetryTimeout); + return new RestClient(httpClient, maxRetryTimeout, hosts); } /** @@ -274,10 +340,21 @@ public static CloseableHttpClient createDefaultHttpClient() { connectionManager.setMaxTotal(30); //default timeouts are all infinite - RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(500).setSocketTimeout(10000) - .setConnectionRequestTimeout(500).build(); + RequestConfig requestConfig = RequestConfig.custom().setConnectTimeout(DEFAULT_CONNECT_TIMEOUT) + .setSocketTimeout(DEFAULT_SOCKET_TIMEOUT) + .setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT).build(); return HttpClientBuilder.create().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build(); } } + + /** + * Listener that allows to be notified whenever a failure happens. Useful when sniffing is enabled, so that we can sniff on failure. + * The default implementation is a no-op. + */ + public static class FailureListener { + public void onFailure(Connection connection) throws IOException { + + } + } } diff --git a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java b/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java deleted file mode 100644 index 4ec0bcb3c3924..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/StaticConnectionPool.java +++ /dev/null @@ -1,50 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import org.apache.http.HttpHost; - -import java.io.IOException; -import java.util.List; - -/** - * Static implementation of {@link ConnectionPool}. Its underlying list of connections is immutable. - */ -public class StaticConnectionPool extends ConnectionPool { - - private final List connections; - - public StaticConnectionPool(HttpHost... hosts) { - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("no hosts provided"); - } - this.connections = createConnections(hosts); - } - - @Override - protected List getConnections() { - return connections; - } - - @Override - public void close() throws IOException { - //no-op nothing to close - } -} diff --git a/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java new file mode 100644 index 0000000000000..29117fcd856cb --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java @@ -0,0 +1,126 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import com.fasterxml.jackson.core.JsonFactory; +import com.fasterxml.jackson.core.JsonParser; +import com.fasterxml.jackson.core.JsonToken; +import org.apache.commons.logging.Log; +import org.apache.commons.logging.LogFactory; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.RestClient; + +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.util.ArrayList; +import java.util.Collections; +import java.util.List; +import java.util.Map; + +/** + * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back + */ +public class HostsSniffer { + + private static final Log logger = LogFactory.getLog(HostsSniffer.class); + + private final RestClient restClient; + private final Map sniffRequestParams; + private final String scheme; + private final JsonFactory jsonFactory; + + public HostsSniffer(RestClient restClient, int sniffRequestTimeout, String scheme) { + this.restClient = restClient; + this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeout + "ms"); + this.scheme = scheme; + this.jsonFactory = new JsonFactory(); + } + + /** + * Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts + */ + public List sniffHosts() throws IOException { + try (ElasticsearchResponse response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams, null)) { + return readHosts(response.getEntity()); + } + } + + private List readHosts(HttpEntity entity) throws IOException { + try (InputStream inputStream = entity.getContent()) { + JsonParser parser = jsonFactory.createParser(inputStream); + if (parser.nextToken() != JsonToken.START_OBJECT) { + throw new IOException("expected data to start with an object"); + } + List hosts = new ArrayList<>(); + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if ("nodes".equals(parser.getCurrentName())) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + JsonToken token = parser.nextToken(); + assert token == JsonToken.START_OBJECT; + String nodeId = parser.getCurrentName(); + HttpHost sniffedHost = readHost(nodeId, parser, this.scheme); + if (sniffedHost != null) { + logger.trace("adding node [" + nodeId + "]"); + hosts.add(sniffedHost); + } + } + } else { + parser.skipChildren(); + } + } + } + return hosts; + } + } + + private static HttpHost readHost(String nodeId, JsonParser parser, String scheme) throws IOException { + HttpHost httpHost = null; + String fieldName = null; + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { + fieldName = parser.getCurrentName(); + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + if ("http".equals(fieldName)) { + while (parser.nextToken() != JsonToken.END_OBJECT) { + if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { + URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); + httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), + boundAddressAsURI.getScheme()); + } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { + parser.skipChildren(); + } + } + } else { + parser.skipChildren(); + } + } + } + //http section is not present if http is not enabled on the node, ignore such nodes + if (httpHost == null) { + logger.debug("skipping node [" + nodeId + "] with http disabled"); + return null; + } + return httpHost; + } +} diff --git a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 09a72fedb624e..43ad143738a0b 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -19,130 +19,228 @@ package org.elasticsearch.client.sniff; -import com.fasterxml.jackson.core.JsonFactory; -import com.fasterxml.jackson.core.JsonParser; -import com.fasterxml.jackson.core.JsonToken; import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpEntity; import org.apache.http.HttpHost; -import org.apache.http.StatusLine; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpGet; import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.util.EntityUtils; -import org.elasticsearch.client.ElasticsearchResponseException; -import org.elasticsearch.client.RequestLogger; +import org.elasticsearch.client.Connection; +import org.elasticsearch.client.RestClient; +import java.io.Closeable; import java.io.IOException; -import java.io.InputStream; -import java.net.URI; -import java.util.ArrayList; import java.util.List; +import java.util.Objects; +import java.util.concurrent.Executors; +import java.util.concurrent.ScheduledExecutorService; +import java.util.concurrent.ScheduledFuture; +import java.util.concurrent.TimeUnit; +import java.util.concurrent.atomic.AtomicBoolean; /** * Calls nodes info api and returns a list of http hosts extracted from it. */ //TODO This could potentially be using _cat/nodes which wouldn't require jackson as a dependency, but we'd have bw comp problems with 2.x -final class Sniffer { +public final class Sniffer extends RestClient.FailureListener implements Closeable { private static final Log logger = LogFactory.getLog(Sniffer.class); - private final CloseableHttpClient client; - private final RequestConfig sniffRequestConfig; - private final int sniffRequestTimeout; - private final String scheme; - private final JsonFactory jsonFactory; - - Sniffer(CloseableHttpClient client, RequestConfig sniffRequestConfig, int sniffRequestTimeout, String scheme) { - this.client = client; - this.sniffRequestConfig = sniffRequestConfig; - this.sniffRequestTimeout = sniffRequestTimeout; - this.scheme = scheme; - this.jsonFactory = new JsonFactory(); + private final boolean sniffOnFailure; + private final Task task; + + public Sniffer(RestClient restClient, int sniffRequestTimeout, String scheme, int sniffInterval, + boolean sniffOnFailure, int sniffAfterFailureDelay) { + HostsSniffer hostsSniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme); + this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); + this.sniffOnFailure = sniffOnFailure; + restClient.setFailureListener(this); } - List sniffNodes(HttpHost host) throws IOException { - HttpGet httpGet = new HttpGet("/_nodes/http?timeout=" + sniffRequestTimeout + "ms"); - httpGet.setConfig(sniffRequestConfig); - - try (CloseableHttpResponse response = client.execute(host, httpGet)) { - StatusLine statusLine = response.getStatusLine(); - if (statusLine.getStatusCode() >= 300) { - RequestLogger.log(logger, "sniff failed", httpGet, host, response); - String responseBody = null; - if (response.getEntity() != null) { - responseBody = EntityUtils.toString(response.getEntity()); - } - throw new ElasticsearchResponseException(httpGet.getRequestLine(), host, response.getStatusLine(), responseBody); - } else { - List nodes = readHosts(response.getEntity()); - RequestLogger.log(logger, "sniff succeeded", httpGet, host, response); - return nodes; - } - } catch(IOException e) { - RequestLogger.log(logger, "sniff failed", httpGet, host, e); - throw e; + @Override + public void onFailure(Connection connection) throws IOException { + if (sniffOnFailure) { + //re-sniff immediately but take out the node that failed + task.sniffOnFailure(connection.getHost()); } } - private List readHosts(HttpEntity entity) throws IOException { - try (InputStream inputStream = entity.getContent()) { - JsonParser parser = jsonFactory.createParser(inputStream); - if (parser.nextToken() != JsonToken.START_OBJECT) { - throw new IOException("expected data to start with an object"); - } - List hosts = new ArrayList<>(); - while (parser.nextToken() != JsonToken.END_OBJECT) { - if (parser.getCurrentToken() == JsonToken.START_OBJECT) { - if ("nodes".equals(parser.getCurrentName())) { - while (parser.nextToken() != JsonToken.END_OBJECT) { - JsonToken token = parser.nextToken(); - assert token == JsonToken.START_OBJECT; - String nodeId = parser.getCurrentName(); - HttpHost sniffedHost = readNode(nodeId, parser, this.scheme); - if (sniffedHost != null) { - logger.trace("adding node [" + nodeId + "]"); - hosts.add(sniffedHost); - } - } - } else { - parser.skipChildren(); + @Override + public void close() throws IOException { + task.shutdown(); + } + + private static class Task implements Runnable { + private final HostsSniffer hostsSniffer; + private final RestClient restClient; + + private final int sniffInterval; + private final int sniffAfterFailureDelay; + private final ScheduledExecutorService scheduledExecutorService; + private final AtomicBoolean running = new AtomicBoolean(false); + private volatile int nextSniffDelay; + private volatile ScheduledFuture scheduledFuture; + + private Task(HostsSniffer hostsSniffer, RestClient restClient, int sniffInterval, int sniffAfterFailureDelay) { + this.hostsSniffer = hostsSniffer; + this.restClient = restClient; + this.sniffInterval = sniffInterval; + this.sniffAfterFailureDelay = sniffAfterFailureDelay; + this.scheduledExecutorService = Executors.newScheduledThreadPool(1); + this.scheduledFuture = this.scheduledExecutorService.schedule(this, 0, TimeUnit.MILLISECONDS); + this.nextSniffDelay = sniffInterval; + } + + @Override + public void run() { + sniff(null); + } + + void sniffOnFailure(HttpHost failedHost) { + this.nextSniffDelay = sniffAfterFailureDelay; + sniff(failedHost); + } + + void sniff(HttpHost excludeHost) { + if (running.compareAndSet(false, true)) { + try { + List sniffedNodes = hostsSniffer.sniffHosts(); + if (excludeHost != null) { + sniffedNodes.remove(excludeHost); + } + logger.debug("sniffed nodes: " + sniffedNodes); + this.restClient.setNodes(sniffedNodes.toArray(new HttpHost[sniffedNodes.size()])); + } catch (Throwable t) { + logger.error("error while sniffing nodes", t); + } finally { + try { + //regardless of whether and when the next sniff is scheduled, cancel it and schedule a new one with updated delay + this.scheduledFuture.cancel(false); + logger.debug("scheduling next sniff in " + nextSniffDelay + " ms"); + this.scheduledFuture = this.scheduledExecutorService.schedule(this, nextSniffDelay, TimeUnit.MILLISECONDS); + } catch (Throwable t) { + logger.error("error while scheduling next sniffer task", t); + } finally { + this.nextSniffDelay = sniffInterval; + running.set(false); } } } - return hosts; } - } - private static HttpHost readNode(String nodeId, JsonParser parser, String scheme) throws IOException { - HttpHost httpHost = null; - String fieldName = null; - while (parser.nextToken() != JsonToken.END_OBJECT) { - if (parser.getCurrentToken() == JsonToken.FIELD_NAME) { - fieldName = parser.getCurrentName(); - } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { - if ("http".equals(fieldName)) { - while (parser.nextToken() != JsonToken.END_OBJECT) { - if (parser.getCurrentToken() == JsonToken.VALUE_STRING && "publish_address".equals(parser.getCurrentName())) { - URI boundAddressAsURI = URI.create(scheme + "://" + parser.getValueAsString()); - httpHost = new HttpHost(boundAddressAsURI.getHost(), boundAddressAsURI.getPort(), - boundAddressAsURI.getScheme()); - } else if (parser.getCurrentToken() == JsonToken.START_OBJECT) { - parser.skipChildren(); - } - } - } else { - parser.skipChildren(); + void shutdown() { + scheduledExecutorService.shutdown(); + try { + if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) { + return; } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + } + scheduledExecutorService.shutdownNow(); + } + } + + /** + * Returns a new {@link Builder} to help with {@link Sniffer} creation. + */ + public static Builder builder() { + return new Builder(); + } + + /** + * Sniffer builder. Helps creating a new {@link Sniffer}. + */ + public static final class Builder { + public static final int DEFAULT_SNIFF_INTERVAL = 60000 * 5; //5 minutes + public static final int DEFAULT_SNIFF_AFTER_FAILURE_DELAY = 60000; //1 minute + public static final int DEFAULT_SNIFF_REQUEST_TIMEOUT = 1000; //1 second + + private int sniffRequestTimeout = DEFAULT_SNIFF_REQUEST_TIMEOUT; + private int sniffInterval = DEFAULT_SNIFF_INTERVAL; + private boolean sniffOnFailure = true; + private int sniffAfterFailureDelay = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; + private String scheme = "http"; + private RestClient restClient; + + private Builder() { + + } + + /** + * Sets the interval between consecutive ordinary sniff executions. Will be honoured when sniffOnFailure is disabled or + * when there are no failures between consecutive sniff executions. + * @throws IllegalArgumentException if sniffInterval is not greater than 0 + */ + public Builder setSniffInterval(int sniffInterval) { + if (sniffInterval <= 0) { + throw new IllegalArgumentException("sniffInterval must be greater than 0"); } + this.sniffInterval = sniffInterval; + return this; + } + + /** + * Enables/disables sniffing on failure. If enabled, at each failure nodes will be reloaded, and a new sniff execution will + * be scheduled after a shorter time than usual (sniffAfterFailureDelay). + */ + public Builder setSniffOnFailure(boolean sniffOnFailure) { + this.sniffOnFailure = sniffOnFailure; + return this; } - //http section is not present if http is not enabled on the node, ignore such nodes - if (httpHost == null) { - logger.debug("skipping node [" + nodeId + "] with http disabled"); - return null; + + /** + * Sets the delay of a sniff execution scheduled after a failure. + */ + public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { + if (sniffAfterFailureDelay <= 0) { + throw new IllegalArgumentException("sniffAfterFailureDelay must be greater than 0"); + } + this.sniffAfterFailureDelay = sniffAfterFailureDelay; + return this; + } + + /** + * Sets the http client. Mandatory argument. Best practice is to use the same client used + * within {@link org.elasticsearch.client.RestClient} which can be created manually or + * through {@link RestClient.Builder#createDefaultHttpClient()}. + * @see CloseableHttpClient + */ + public Builder setRestClient(RestClient restClient) { + this.restClient = restClient; + return this; + } + + /** + * Sets the sniff request timeout to be passed in as a query string parameter to elasticsearch. + * Allows to halt the request without any failure, as only the nodes that have responded + * within this timeout will be returned. + */ + public Builder setSniffRequestTimeout(int sniffRequestTimeout) { + if (sniffRequestTimeout <=0) { + throw new IllegalArgumentException("sniffRequestTimeout must be greater than 0"); + } + this.sniffRequestTimeout = sniffRequestTimeout; + return this; + } + + /** + * Sets the scheme to be used for sniffed nodes. This information is not returned by elasticsearch, + * default is http but should be customized if https is needed/enabled. + */ + public Builder setScheme(String scheme) { + Objects.requireNonNull(scheme, "scheme cannot be null"); + if (scheme.equals("http") == false && scheme.equals("https") == false) { + throw new IllegalArgumentException("scheme must be either http or https"); + } + this.scheme = scheme; + return this; + } + + /** + * Creates the {@link Sniffer} based on the provided configuration. + */ + public Sniffer build() { + Objects.requireNonNull(restClient, "restClient cannot be null"); + return new Sniffer(restClient, sniffRequestTimeout, scheme, sniffInterval, sniffOnFailure, sniffAfterFailureDelay); } - return httpHost; } } diff --git a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java b/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java deleted file mode 100644 index 6fd98666e8c9d..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/sniff/SniffingConnectionPool.java +++ /dev/null @@ -1,299 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.sniff; - -import org.apache.commons.logging.Log; -import org.apache.commons.logging.LogFactory; -import org.apache.http.HttpHost; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.elasticsearch.client.Connection; -import org.elasticsearch.client.ConnectionPool; -import org.elasticsearch.client.RestClient; - -import java.io.IOException; -import java.util.Iterator; -import java.util.List; -import java.util.Objects; -import java.util.concurrent.Executors; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; - -/** - * Connection pool implementation that sniffs nodes from elasticsearch at regular intervals. - * Can optionally sniff nodes on each failure as well. - */ -public class SniffingConnectionPool extends ConnectionPool { - - private static final Log logger = LogFactory.getLog(SniffingConnectionPool.class); - - private final boolean sniffOnFailure; - private final Sniffer sniffer; - private volatile List connections; - private final SnifferTask snifferTask; - - private SniffingConnectionPool(int sniffInterval, boolean sniffOnFailure, int sniffAfterFailureDelay, CloseableHttpClient client, - RequestConfig sniffRequestConfig, int sniffRequestTimeout, String scheme, HttpHost... hosts) { - this.sniffOnFailure = sniffOnFailure; - this.sniffer = new Sniffer(client, sniffRequestConfig, sniffRequestTimeout, scheme); - this.connections = createConnections(hosts); - this.snifferTask = new SnifferTask(sniffInterval, sniffAfterFailureDelay); - } - - @Override - protected List getConnections() { - return this.connections; - } - - @Override - public void onFailure(Connection connection) throws IOException { - super.onFailure(connection); - if (sniffOnFailure) { - //re-sniff immediately but take out the node that failed - snifferTask.sniffOnFailure(connection.getHost()); - } - } - - @Override - public void close() throws IOException { - snifferTask.shutdown(); - } - - private class SnifferTask implements Runnable { - private final int sniffInterval; - private final int sniffAfterFailureDelay; - private final ScheduledExecutorService scheduledExecutorService; - private final AtomicBoolean running = new AtomicBoolean(false); - private volatile boolean failure = false; - private volatile ScheduledFuture scheduledFuture; - - private SnifferTask(int sniffInterval, int sniffAfterFailureDelay) { - this.sniffInterval = sniffInterval; - this.sniffAfterFailureDelay = sniffAfterFailureDelay; - this.scheduledExecutorService = Executors.newScheduledThreadPool(1); - this.scheduledFuture = this.scheduledExecutorService.schedule(this, 0, TimeUnit.MILLISECONDS); - } - - @Override - public void run() { - sniff(null); - } - - void sniffOnFailure(HttpHost failedHost) { - //sync sniff straightaway on failure - failure = true; - sniff(failedHost); - } - - void sniff(HttpHost excludeHost) { - if (running.compareAndSet(false, true)) { - try { - sniff(nextConnection(), excludeHost); - } catch (Throwable t) { - logger.error("error while sniffing nodes", t); - } finally { - try { - //regardless of whether and when the next sniff is scheduled, cancel it and schedule a new one with updated delay - this.scheduledFuture.cancel(false); - if (this.failure) { - this.scheduledFuture = this.scheduledExecutorService.schedule(this, - sniffAfterFailureDelay, TimeUnit.MILLISECONDS); - this.failure = false; - } else { - this.scheduledFuture = this.scheduledExecutorService.schedule(this, sniffInterval, TimeUnit.MILLISECONDS); - } - } catch (Throwable t) { - logger.error("error while scheduling next sniffer task", t); - } finally { - running.set(false); - } - } - } - } - - void sniff(Iterator connectionIterator, HttpHost excludeHost) throws IOException { - IOException lastSeenException = null; - while (connectionIterator.hasNext()) { - Connection connection = connectionIterator.next(); - try { - List sniffedNodes = sniffer.sniffNodes(connection.getHost()); - if (excludeHost != null) { - sniffedNodes.remove(excludeHost); - } - connections = createConnections(sniffedNodes.toArray(new HttpHost[sniffedNodes.size()])); - onSuccess(connection); - return; - } catch (IOException e) { - //here we have control over the request, if it fails something is really wrong, always call onFailure - onFailure(connection); - if (lastSeenException != null) { - e.addSuppressed(lastSeenException); - } - lastSeenException = e; - } - } - logger.warn("failed to sniff nodes", lastSeenException); - } - - void shutdown() { - scheduledExecutorService.shutdown(); - try { - if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) { - return; - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - } - scheduledExecutorService.shutdownNow(); - } - } - - /** - * Returns a new {@link Builder} to help with {@link SniffingConnectionPool} creation. - */ - public static Builder builder() { - return new Builder(); - } - - /** - * Sniffing connection pool builder. Helps creating a new {@link SniffingConnectionPool}. - */ - public static final class Builder { - private int sniffInterval = 5 * 1000 * 60; - private boolean sniffOnFailure = true; - private int sniffAfterFailureDelay = 60000; - private CloseableHttpClient httpClient; - private RequestConfig sniffRequestConfig; - private int sniffRequestTimeout = 1000; - private String scheme = "http"; - private HttpHost[] hosts; - - private Builder() { - - } - - /** - * Sets the interval between consecutive ordinary sniff executions. Will be honoured when sniffOnFailure is disabled or - * when there are no failures between consecutive sniff executions. - * @throws IllegalArgumentException if sniffInterval is not greater than 0 - */ - public Builder setSniffInterval(int sniffInterval) { - if (sniffInterval <= 0) { - throw new IllegalArgumentException("sniffInterval must be greater than 0"); - } - this.sniffInterval = sniffInterval; - return this; - } - - /** - * Enables/disables sniffing on failure. If enabled, at each failure nodes will be reloaded, and a new sniff execution will - * be scheduled after a shorter time than usual (sniffAfterFailureDelay). - */ - public Builder setSniffOnFailure(boolean sniffOnFailure) { - this.sniffOnFailure = sniffOnFailure; - return this; - } - - /** - * Sets the delay of a sniff execution scheduled after a failure. - */ - public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { - if (sniffAfterFailureDelay <= 0) { - throw new IllegalArgumentException("sniffAfterFailureDelay must be greater than 0"); - } - this.sniffAfterFailureDelay = sniffAfterFailureDelay; - return this; - } - - /** - * Sets the http client. Mandatory argument. Best practice is to use the same client used - * within {@link org.elasticsearch.client.RestClient} which can be created manually or - * through {@link RestClient.Builder#createDefaultHttpClient()}. - * @see CloseableHttpClient - */ - public Builder setHttpClient(CloseableHttpClient httpClient) { - this.httpClient = httpClient; - return this; - } - - /** - * Sets the configuration to be used for each sniff request. Useful as sniff can have - * different timeouts compared to ordinary requests. - * @see RequestConfig - */ - public Builder setSniffRequestConfig(RequestConfig sniffRequestConfig) { - this.sniffRequestConfig = sniffRequestConfig; - return this; - } - - /** - * Sets the sniff request timeout to be passed in as a query string parameter to elasticsearch. - * Allows to halt the request without any failure, as only the nodes that have responded - * within this timeout will be returned. - */ - public Builder setSniffRequestTimeout(int sniffRequestTimeout) { - if (sniffRequestTimeout <=0) { - throw new IllegalArgumentException("sniffRequestTimeout must be greater than 0"); - } - this.sniffRequestTimeout = sniffRequestTimeout; - return this; - } - - /** - * Sets the scheme to be used for sniffed nodes. This information is not returned by elasticsearch, - * default is http but should be customized if https is needed/enabled. - */ - public Builder setScheme(String scheme) { - Objects.requireNonNull(scheme, "scheme cannot be null"); - if (scheme.equals("http") == false && scheme.equals("https") == false) { - throw new IllegalArgumentException("scheme must be either http or https"); - } - this.scheme = scheme; - return this; - } - - /** - * Sets the hosts that the client will send requests to. - */ - public Builder setHosts(HttpHost... hosts) { - this.hosts = hosts; - return this; - } - - /** - * Creates the {@link SniffingConnectionPool} based on the provided configuration. - */ - public SniffingConnectionPool build() { - Objects.requireNonNull(httpClient, "httpClient cannot be null"); - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("no hosts provided"); - } - - if (sniffRequestConfig == null) { - sniffRequestConfig = RequestConfig.custom().setConnectTimeout(500).setSocketTimeout(1000) - .setConnectionRequestTimeout(500).build(); - } - return new SniffingConnectionPool(sniffInterval, sniffOnFailure, sniffAfterFailureDelay, httpClient, sniffRequestConfig, - sniffRequestTimeout, scheme, hosts); - } - } -} diff --git a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 11008d52b190b..e0828f2bd1fcc 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -25,8 +25,6 @@ import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; -import java.util.Collections; -import java.util.List; import java.util.logging.LogManager; public class RestClientBuilderTests extends LuceneTestCase { @@ -57,38 +55,23 @@ public void testBuild() throws IOException { assertEquals(e.getMessage(), "no hosts provided"); } - RestClient.Builder builder = RestClient.builder(); - if (random().nextBoolean()) { - ConnectionPool connectionPool = new ConnectionPool() { - @Override - protected List getConnections() { - return Collections.emptyList(); - } - - @Override - public void onSuccess(Connection connection) { - - } - - @Override - public void onFailure(Connection connection) throws IOException { + try { + RestClient.builder(); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "no hosts provided"); + } - } + RestClient.Builder builder = RestClient.builder(); + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + builder.setHosts(hosts); - @Override - public void close() throws IOException { + //TODO test one host is null among others - } - }; - builder.setConnectionPool(connectionPool); - } else { - int numNodes = RandomInts.randomIntBetween(random(), 1, 5); - HttpHost[] hosts = new HttpHost[numNodes]; - for (int i = 0; i < numNodes; i++) { - hosts[i] = new HttpHost("localhost", 9200 + i); - } - builder.setHosts(hosts); - } if (random().nextBoolean()) { builder.setHttpClient(HttpClientBuilder.create().build()); } diff --git a/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java b/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java deleted file mode 100644 index c777666e98bdf..0000000000000 --- a/client/src/test/java/org/elasticsearch/client/StaticConnectionPoolTests.java +++ /dev/null @@ -1,62 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import com.carrotsearch.randomizedtesting.generators.RandomInts; -import org.apache.http.HttpHost; -import org.apache.lucene.util.LuceneTestCase; - -import java.util.logging.LogManager; - -public class StaticConnectionPoolTests extends LuceneTestCase { - - static { - LogManager.getLogManager().reset(); - } - - public void testConstructor() { - int numNodes = RandomInts.randomIntBetween(random(), 1, 5); - HttpHost[] hosts = new HttpHost[numNodes]; - for (int i = 0; i < numNodes; i++) { - hosts[i] = new HttpHost("localhost", 9200); - } - - try { - new StaticConnectionPool((HttpHost) null); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "host cannot be null"); - } - - try { - new StaticConnectionPool((HttpHost[])null); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); - } - - try { - new StaticConnectionPool(); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); - } - - StaticConnectionPool staticConnectionPool = new StaticConnectionPool(hosts); - assertNotNull(staticConnectionPool); - } -} diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java b/client/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java similarity index 84% rename from client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java rename to client/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index 2fa9f7b322584..c9e1b48a79195 100644 --- a/client/src/test/java/org/elasticsearch/client/sniff/SnifferTests.java +++ b/client/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -29,11 +29,9 @@ import okhttp3.mockwebserver.MockWebServer; import okhttp3.mockwebserver.RecordedRequest; import org.apache.http.HttpHost; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.RestClient; import org.junit.After; import org.junit.Before; @@ -57,7 +55,7 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; -public class SnifferTests extends LuceneTestCase { +public class HostsSnifferTests extends LuceneTestCase { static { //prevent MockWebServer from logging to stdout and stderr @@ -88,34 +86,36 @@ public void stopMockWebServer() throws IOException { } public void testSniffNodes() throws IOException, URISyntaxException { - CloseableHttpClient client = HttpClientBuilder.create().build(); - Sniffer sniffer = new Sniffer(client, RequestConfig.DEFAULT, sniffRequestTimeout, scheme); HttpHost httpHost = new HttpHost(server.getHostName(), server.getPort()); - try { - List sniffedHosts = sniffer.sniffNodes(httpHost); - if (sniffResponse.isFailure) { - fail("sniffNodes should have failed"); - } - assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size())); - Iterator responseHostsIterator = sniffResponse.hosts.iterator(); - for (HttpHost sniffedHost : sniffedHosts) { - assertEquals(sniffedHost, responseHostsIterator.next()); - } - } catch(ElasticsearchResponseException e) { - if (sniffResponse.isFailure) { - assertThat(e.getMessage(), containsString("GET http://localhost:" + server.getPort() + - "/_nodes/http?timeout=" + sniffRequestTimeout)); - assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); - assertThat(e.getHost(), equalTo(httpHost)); - assertThat(e.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); - assertThat(e.getRequestLine().toString(), equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1")); - } else { - fail("sniffNodes should have succeeded: " + e.getStatusLine()); + try (RestClient restClient = RestClient.builder().setHosts(httpHost).build()) { + HostsSniffer sniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme); + try { + List sniffedHosts = sniffer.sniffHosts(); + if (sniffResponse.isFailure) { + fail("sniffNodes should have failed"); + } + assertThat(sniffedHosts.size(), equalTo(sniffResponse.hosts.size())); + Iterator responseHostsIterator = sniffResponse.hosts.iterator(); + for (HttpHost sniffedHost : sniffedHosts) { + assertEquals(sniffedHost, responseHostsIterator.next()); + } + } catch(ElasticsearchResponseException e) { + if (sniffResponse.isFailure) { + assertThat(e.getMessage(), containsString("GET http://localhost:" + server.getPort() + + "/_nodes/http?timeout=" + sniffRequestTimeout)); + assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); + assertThat(e.getHost(), equalTo(httpHost)); + assertThat(e.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); + assertThat(e.getRequestLine().toString(), equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1")); + } else { + fail("sniffNodes should have succeeded: " + e.getStatusLine()); + } } } } - private static MockWebServer buildMockWebServer(final SniffResponse sniffResponse, final int sniffTimeout) throws UnsupportedEncodingException { + private static MockWebServer buildMockWebServer(final SniffResponse sniffResponse, final int sniffTimeout) + throws UnsupportedEncodingException { MockWebServer server = new MockWebServer(); final Dispatcher dispatcher = new Dispatcher() { @Override diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java new file mode 100644 index 0000000000000..6373fae00d586 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -0,0 +1,115 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.http.HttpHost; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.client.RestClient; + +import java.util.Arrays; +import java.util.logging.LogManager; + +public class SnifferBuilderTests extends LuceneTestCase { + + static { + LogManager.getLogManager().reset(); + } + + public void testBuild() throws Exception { + + try { + Sniffer.builder().setScheme(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "scheme cannot be null"); + } + + try { + Sniffer.builder().setScheme("whatever"); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "scheme must be either http or https"); + } + + try { + Sniffer.builder().setSniffInterval(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffInterval must be greater than 0"); + } + + try { + Sniffer.builder().setSniffRequestTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); + } + + try { + Sniffer.builder().setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); + } + + try { + Sniffer.builder().build(); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "restClient cannot be null"); + } + + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + + try (RestClient client = RestClient.builder().setHosts(hosts).build()) { + try (Sniffer sniffer = Sniffer.builder().setRestClient(client).build()) { + assertNotNull(sniffer); + } + } + + try (RestClient client = RestClient.builder().setHosts(hosts).build()) { + Sniffer.Builder builder = Sniffer.builder().setRestClient(client); + if (random().nextBoolean()) { + builder.setScheme(RandomPicks.randomFrom(random(), Arrays.asList("http", "https"))); + } + if (random().nextBoolean()) { + builder.setSniffInterval(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + } + if (random().nextBoolean()) { + builder.setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + } + if (random().nextBoolean()) { + builder.setSniffRequestTimeout(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + } + if (random().nextBoolean()) { + builder.setSniffOnFailure(random().nextBoolean()); + } + try (Sniffer connectionPool = builder.build()) { + assertNotNull(connectionPool); + } + } + } +} diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolBuilderTests.java b/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolBuilderTests.java deleted file mode 100644 index b5b4eaa3ce1bf..0000000000000 --- a/client/src/test/java/org/elasticsearch/client/sniff/SniffingConnectionPoolBuilderTests.java +++ /dev/null @@ -1,138 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client.sniff; - -import com.carrotsearch.randomizedtesting.generators.RandomInts; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; -import org.apache.http.HttpHost; -import org.apache.http.client.config.RequestConfig; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.lucene.util.LuceneTestCase; - -import java.util.Arrays; -import java.util.logging.LogManager; - -public class SniffingConnectionPoolBuilderTests extends LuceneTestCase { - - static { - LogManager.getLogManager().reset(); - } - - public void testBuild() throws Exception { - - try { - SniffingConnectionPool.builder().setScheme(null); - fail("should have failed"); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "scheme cannot be null"); - } - - try { - SniffingConnectionPool.builder().setScheme("whatever"); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "scheme must be either http or https"); - } - - try { - SniffingConnectionPool.builder().setSniffInterval(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffInterval must be greater than 0"); - } - - try { - SniffingConnectionPool.builder().setSniffRequestTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); - } - - try { - SniffingConnectionPool.builder().setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); - } - - try { - SniffingConnectionPool.builder().build(); - fail("should have failed"); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "httpClient cannot be null"); - } - - try { - SniffingConnectionPool.builder().setHttpClient(HttpClientBuilder.create().build()).build(); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); - } - - try { - SniffingConnectionPool.builder().setHttpClient(HttpClientBuilder.create().build()).setHosts((HttpHost[])null).build(); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); - } - - try { - SniffingConnectionPool.builder().setHttpClient(HttpClientBuilder.create().build()).setHosts().build(); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); - } - - int numNodes = RandomInts.randomIntBetween(random(), 1, 5); - HttpHost[] hosts = new HttpHost[numNodes]; - for (int i = 0; i < numNodes; i++) { - hosts[i] = new HttpHost("localhost", 9200 + i); - } - - try (SniffingConnectionPool connectionPool = SniffingConnectionPool.builder() - .setHttpClient(HttpClientBuilder.create().build()).setHosts(hosts).build()) { - assertNotNull(connectionPool); - } - - SniffingConnectionPool.Builder builder = SniffingConnectionPool.builder() - .setHttpClient(HttpClientBuilder.create().build()).setHosts(hosts); - if (random().nextBoolean()) { - builder.setScheme(RandomPicks.randomFrom(random(), Arrays.asList("http", "https"))); - } - if (random().nextBoolean()) { - builder.setSniffInterval(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); - } - if (random().nextBoolean()) { - builder.setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); - } - if (random().nextBoolean()) { - builder.setSniffRequestTimeout(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); - } - if (random().nextBoolean()) { - builder.setSniffOnFailure(random().nextBoolean()); - } - if (random().nextBoolean()) { - builder.setSniffRequestConfig(RequestConfig.DEFAULT); - } - try (SniffingConnectionPool connectionPool = builder.build()) { - assertNotNull(connectionPool); - } - } -} From 85a7721185739ad8cedac3065424c42e29ea0418 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 13 May 2016 12:19:45 +0200 Subject: [PATCH 024/103] simplify Connection class The connection class can be greatly simplified now that we don't ping anymore. Pings required a special initial state (UNKNOWN) for connections, to indicate that they require pinging although they are not dead. At this point we don't need the State enum anymore, as connections can only be dead or alive based on the number of failed attempts. markResurrected is also not needed, as it was again a way to make pings required. RestClient can simply pick a dead connection now and use it, no need to change its state when picking the connection. --- .../org/elasticsearch/client/Connection.java | 54 ++++--------------- .../org/elasticsearch/client/RestClient.java | 10 ++-- 2 files changed, 15 insertions(+), 49 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/Connection.java b/client/src/main/java/org/elasticsearch/client/Connection.java index f17ff69e14c08..1f60448360142 100644 --- a/client/src/main/java/org/elasticsearch/client/Connection.java +++ b/client/src/main/java/org/elasticsearch/client/Connection.java @@ -24,16 +24,13 @@ import java.util.concurrent.TimeUnit; /** - * Represents a connection to a host. It holds the host that the connection points to. - * Allows the transport to deal with very simple connection objects that are immutable. - * Any change to the state of a connection should be made through the connection pool. + * Represents a connection to a host. It holds the host that the connection points to and the state of the connection to it. */ public class Connection { private static final long DEFAULT_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(1); private static final long MAX_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); private final HttpHost host; - private volatile State state = State.UNKNOWN; - private volatile int failedAttempts = -1; + private volatile int failedAttempts = 0; private volatile long deadUntil = -1; /** @@ -53,7 +50,7 @@ public HttpHost getHost() { /** * Marks connection as dead. Should be called in case the corresponding node is not responding or caused failures. * Once marked dead, the number of failed attempts will be incremented on each call to this method. A dead connection - * should be retried once {@link #shouldBeRetried()} returns true, which depends on the number of previous failed attempts + * should be retried once {@link #isBlacklisted()} returns true, which depends on the number of previous failed attempts * and when the last failure was registered. */ void markDead() { @@ -63,64 +60,35 @@ void markDead() { MAX_CONNECTION_TIMEOUT_MILLIS); this.deadUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeoutMillis); this.failedAttempts = ++failedAttempts; - this.state = State.DEAD; } } /** * Marks this connection alive. Should be called when the corresponding node is working properly. - * Will reset the number of failed attempts that were counted in case the connection was previously dead, - * as well as its dead timeout. + * Will reset the number of failed attempts that were counted in case the connection was previously dead, as well as its timeout. */ void markAlive() { - if (this.state != State.ALIVE) { + if (this.failedAttempts > 0) { synchronized (this) { this.deadUntil = -1; this.failedAttempts = 0; - this.state = State.ALIVE; } } } /** - * Resets the connection to its initial state, so it will be retried. To be called when all the connections in the pool - * are dead, so that one connection can be retried. Note that calling this method only changes the state of the connection, - * it doesn't reset its failed attempts and dead until timestamp. That way if the connection goes back to dead straightaway - * all of its previous failed attempts are taken into account. - */ - void markResurrected() { - if (this.state == State.DEAD) { - synchronized (this) { - this.state = State.UNKNOWN; - } - } - } - - /** - * Returns the timestamp till the connection is supposed to stay dead till it can be retried + * Returns the timestamp till the connection is supposed to stay dead. After that moment the connection should be retried */ public long getDeadUntil() { return deadUntil; } /** - * Returns true if the connection is alive, false otherwise. + * Returns true when the connection should be skipped due to previous failures, false in case the connection is alive + * or dead but ready to be retried. When the connection is dead, returns false when it is time to retry it, depending + * on how many failed attempts were registered and when the last failure happened (minimum 1 minute, maximum 30 minutes). */ - public boolean isAlive() { - return state == State.ALIVE; - } - - /** - * Returns true in case the connection is not alive but should be used/retried, false otherwise. - * Returns true in case the connection is in unknown state (never used before) or resurrected. When the connection is dead, - * returns true when it is time to retry it, depending on how many failed attempts were registered and when the last failure - * happened (minimum 1 minute, maximum 30 minutes). - */ - public boolean shouldBeRetried() { - return state == State.UNKNOWN || (state == State.DEAD && System.nanoTime() - deadUntil >= 0); - } - - private enum State { - UNKNOWN, DEAD, ALIVE + public boolean isBlacklisted() { + return failedAttempts > 0 && System.nanoTime() - deadUntil < 0; } } diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index f578d7ce304f5..030f67c99527b 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -148,7 +148,7 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< */ private Iterator nextConnection() { if (this.connections.isEmpty()) { - throw new IllegalStateException("no connections available in the connection pool"); + throw new IllegalStateException("no connections available"); } List rotatedConnections = new ArrayList<>(connections); @@ -157,7 +157,7 @@ private Iterator nextConnection() { Iterator connectionIterator = rotatedConnections.iterator(); while (connectionIterator.hasNext()) { Connection connection = connectionIterator.next(); - if (connection.isAlive() == false && connection.shouldBeRetried() == false) { + if (connection.isBlacklisted()) { connectionIterator.remove(); } } @@ -170,8 +170,7 @@ public int compare(Connection o1, Connection o2) { } }); Connection connection = sortedConnections.get(0); - connection.markResurrected(); - logger.trace("marked connection resurrected for " + connection.getHost()); + logger.trace("trying to resurrect connection for " + connection.getHost()); return Collections.singleton(connection).iterator(); } return rotatedConnections.iterator(); @@ -307,8 +306,7 @@ public Builder setMaxRetryTimeout(int maxRetryTimeout) { } /** - * Sets the hosts that the client will send requests to. Mandatory if no connection pool is specified, - * as the provided hosts will be used to create the default static connection pool. + * Sets the hosts that the client will send requests to. */ public Builder setHosts(HttpHost... hosts) { if (hosts == null || hosts.length == 0) { From c0a72c1686689417374eeea46a19c3b26cf0edda Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 13 May 2016 15:20:21 +0200 Subject: [PATCH 025/103] add support for headers: default ones and per request --- .../org/elasticsearch/client/RestClient.java | 42 ++++++++++++++++--- .../elasticsearch/client/sniff/Sniffer.java | 3 +- .../client/RestClientBuilderTests.java | 34 +++++++++++++-- 3 files changed, 69 insertions(+), 10 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 030f67c99527b..e27949a975141 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -20,6 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; import org.apache.http.client.config.RequestConfig; @@ -40,6 +41,7 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; +import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.Iterator; @@ -75,10 +77,15 @@ public synchronized void setNodes(HttpHost... hosts) { this.connections = Collections.unmodifiableList(connections); } - public ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity) - throws IOException { + public ElasticsearchResponse performRequest(String method, String endpoint, Map params, + HttpEntity entity, Header... headers) throws IOException { URI uri = buildUri(endpoint, params); HttpRequestBase request = createHttpRequest(method, uri, entity); + if (headers.length > 0) { + for (Header header : headers) { + request.addHeader(header); + } + } //we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt long retryTimeout = Math.round(this.maxRetryTimeout / (float)100 * 98); IOException lastSeenException = null; @@ -276,13 +283,15 @@ public static final class Builder { private CloseableHttpClient httpClient; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT; private HttpHost[] hosts; + private Collection defaultHeaders; private Builder() { } /** - * Sets the http client. A new default one will be created if not specified, by calling {@link #createDefaultHttpClient()}. + * Sets the http client. A new default one will be created if not + * specified, by calling {@link #createDefaultHttpClient(Collection)}. * * @see CloseableHttpClient */ @@ -316,12 +325,29 @@ public Builder setHosts(HttpHost... hosts) { return this; } + /** + * Sets the default request headers, to be used when creating the default http client instance. + * In case the http client is set through {@link #setHttpClient(CloseableHttpClient)}, the default headers need to be + * set to it externally during http client construction. + */ + public Builder setDefaultHeaders(Collection defaultHeaders) { + this.defaultHeaders = defaultHeaders; + return this; + } + /** * Creates a new {@link RestClient} based on the provided configuration. */ public RestClient build() { if (httpClient == null) { - httpClient = createDefaultHttpClient(); + httpClient = createDefaultHttpClient(defaultHeaders); + } else { + if (defaultHeaders != null) { + throw new IllegalArgumentException("defaultHeaders need to be set to the HttpClient directly when manually provided"); + } + } + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("no hosts provided"); } return new RestClient(httpClient, maxRetryTimeout, hosts); } @@ -331,7 +357,7 @@ public RestClient build() { * * @see CloseableHttpClient */ - public static CloseableHttpClient createDefaultHttpClient() { + public static CloseableHttpClient createDefaultHttpClient(Collection defaultHeaders) { PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); //default settings may be too constraining connectionManager.setDefaultMaxPerRoute(10); @@ -342,7 +368,11 @@ public static CloseableHttpClient createDefaultHttpClient() { .setSocketTimeout(DEFAULT_SOCKET_TIMEOUT) .setConnectionRequestTimeout(DEFAULT_CONNECTION_REQUEST_TIMEOUT).build(); - return HttpClientBuilder.create().setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build(); + HttpClientBuilder httpClientBuilder = HttpClientBuilder.create(); + if (defaultHeaders != null) { + httpClientBuilder.setDefaultHeaders(defaultHeaders); + } + return httpClientBuilder.setConnectionManager(connectionManager).setDefaultRequestConfig(requestConfig).build(); } } diff --git a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 43ad143738a0b..ea9fc9a26abd7 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -28,6 +28,7 @@ import java.io.Closeable; import java.io.IOException; +import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.concurrent.Executors; @@ -201,7 +202,7 @@ public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { /** * Sets the http client. Mandatory argument. Best practice is to use the same client used * within {@link org.elasticsearch.client.RestClient} which can be created manually or - * through {@link RestClient.Builder#createDefaultHttpClient()}. + * through {@link RestClient.Builder#createDefaultHttpClient(Collection)}. * @see CloseableHttpClient */ public Builder setRestClient(RestClient restClient) { diff --git a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index e0828f2bd1fcc..44909336dd687 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -21,10 +21,15 @@ import com.carrotsearch.randomizedtesting.generators.RandomInts; import org.apache.http.HttpHost; +import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; +import org.apache.http.message.BasicHeader; import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; +import java.util.ArrayList; +import java.util.Collection; +import java.util.Collections; import java.util.logging.LogManager; public class RestClientBuilderTests extends LuceneTestCase { @@ -56,12 +61,27 @@ public void testBuild() throws IOException { } try { - RestClient.builder(); + RestClient.builder().build(); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "no hosts provided"); } + try { + RestClient.builder().setHosts(new HttpHost[]{new HttpHost("localhost", 9200), null}).build(); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "host cannot be null"); + } + + try (CloseableHttpClient httpClient = HttpClientBuilder.create().build()) { + RestClient.builder().setHttpClient(httpClient) + .setDefaultHeaders(Collections.singleton(new BasicHeader("header", "value"))).build(); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "defaultHeaders need to be set to the HttpClient directly when manually provided"); + } + RestClient.Builder builder = RestClient.builder(); int numNodes = RandomInts.randomIntBetween(random(), 1, 5); HttpHost[] hosts = new HttpHost[numNodes]; @@ -70,11 +90,19 @@ public void testBuild() throws IOException { } builder.setHosts(hosts); - //TODO test one host is null among others - if (random().nextBoolean()) { builder.setHttpClient(HttpClientBuilder.create().build()); + } else { + if (random().nextBoolean()) { + int numHeaders = RandomInts.randomIntBetween(random(), 1, 5); + Collection headers = new ArrayList<>(numHeaders); + for (int i = 0; i < numHeaders; i++) { + headers.add(new BasicHeader("header" + i, "value")); + } + builder.setDefaultHeaders(headers); + } } + if (random().nextBoolean()) { builder.setMaxRetryTimeout(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); } From 2d7a781195c8b6436efd5bb3f355e77586dca559 Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 17 May 2016 15:23:47 +0200 Subject: [PATCH 026/103] fix usage of deprecated apis --- .../java/org/elasticsearch/client/RequestLogger.java | 10 +++++++++- .../org/elasticsearch/client/RequestLoggerTests.java | 6 +++--- 2 files changed, 12 insertions(+), 4 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java index ce2e82ea1e57c..dd9fb269717d0 100644 --- a/client/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -28,11 +28,14 @@ import org.apache.http.HttpResponse; import org.apache.http.client.methods.HttpUriRequest; import org.apache.http.entity.BufferedHttpEntity; +import org.apache.http.entity.ContentType; import org.apache.http.util.EntityUtils; import java.io.BufferedReader; import java.io.IOException; import java.io.InputStreamReader; +import java.nio.charset.Charset; +import java.nio.charset.StandardCharsets; /** * Helper class that exposes static methods to unify the way requests are logged. @@ -118,7 +121,12 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { if (entity != null) { entity = new BufferedHttpEntity(entity); httpResponse.setEntity(entity); - try (BufferedReader reader = new BufferedReader(new InputStreamReader(entity.getContent()))) { + ContentType contentType = ContentType.get(entity); + Charset charset = StandardCharsets.UTF_8; + if (contentType != null) { + charset = contentType.getCharset(); + } + try (BufferedReader reader = new BufferedReader(new InputStreamReader(entity.getContent(), charset))) { String line; while( (line = reader.readLine()) != null) { responseLine += "\n# " + line; diff --git a/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java index 84bc3c032d5d2..89fa30c7e0867 100644 --- a/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java +++ b/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java @@ -20,7 +20,6 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.RandomInts; -import org.apache.commons.codec.Charsets; import org.apache.http.HttpEntityEnclosingRequest; import org.apache.http.HttpHost; import org.apache.http.ProtocolVersion; @@ -36,6 +35,7 @@ import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; +import java.nio.charset.StandardCharsets; import static org.hamcrest.CoreMatchers.equalTo; @@ -72,7 +72,7 @@ public void testTraceRequest() throws IOException, URISyntaxException { if (request instanceof HttpEntityEnclosingRequest && random().nextBoolean()) { HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; String requestBody = "{ \"field\": \"value\" }"; - enclosingRequest.setEntity(new StringEntity(requestBody, Charsets.UTF_8)); + enclosingRequest.setEntity(new StringEntity(requestBody, StandardCharsets.UTF_8)); expected += " -d '" + requestBody + "'"; } @@ -95,7 +95,7 @@ public void testTraceResponse() throws IOException { expected += "\n#"; if (random().nextBoolean()) { String responseBody = "{\n \"field\": \"value\"\n}"; - httpResponse.setEntity(new StringEntity(responseBody, Charsets.UTF_8)); + httpResponse.setEntity(new StringEntity(responseBody, StandardCharsets.UTF_8)); expected += "\n# {"; expected += "\n# \"field\": \"value\""; expected += "\n# }"; From 2735897b36226f36723dc1a4bf27e53d76b70fdc Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 17 May 2016 15:24:26 +0200 Subject: [PATCH 027/103] use versions from versions.properties --- client/build.gradle | 14 +++++--------- 1 file changed, 5 insertions(+), 9 deletions(-) diff --git a/client/build.gradle b/client/build.gradle index 35af3d0979b03..e24aa9878e312 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -20,22 +20,18 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.JavaVersion -group = 'org.elasticsearch.client' apply plugin: 'elasticsearch.build' targetCompatibility = JavaVersion.VERSION_1_7 sourceCompatibility = JavaVersion.VERSION_1_7 dependencies { - // TODO once we got rid of the client in the test framework we should use a version variable here - compile "org.apache.httpcomponents:httpclient:4.5.2" - compile "org.apache.httpcomponents:httpcore:4.4.4" - //compile "org.apache.httpcomponents:httpcore-nio:4.4.4" - //compile "org.apache.httpcomponents:httpasyncclient:4.1.1" - compile "commons-codec:commons-codec:1.9" - compile "commons-logging:commons-logging:1.2" + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + compile "commons-logging:commons-logging:${versions.commonslogging}" //jackson is only needed in the sniff package - compile "com.fasterxml.jackson.core:jackson-core:2.7.3" + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" From 3890842fd4cacd9085dee3c8544c57fea5f06b24 Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 17 May 2016 15:25:10 +0200 Subject: [PATCH 028/103] add project substitution for org.elasticsearch:client so that we can add it as a dep in other projects --- build.gradle | 1 + 1 file changed, 1 insertion(+) diff --git a/build.gradle b/build.gradle index 5c6ec36693047..af18424944b66 100644 --- a/build.gradle +++ b/build.gradle @@ -162,6 +162,7 @@ subprojects { ext.projectSubstitutions = [ "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:elasticsearch:${version}": ':core', + "org.elasticsearch:client:${version}": ':client', "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', From 1d06916b07b1f6a59e57b4d3d1ded9ce426bfefe Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 18 May 2016 11:39:59 +0200 Subject: [PATCH 029/103] adapt params argument, they can only be Strings in performRequest method --- .../main/java/org/elasticsearch/client/RestClient.java | 8 ++++---- .../java/org/elasticsearch/client/sniff/HostsSniffer.java | 4 ++-- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index e27949a975141..23540049fb5fa 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -77,7 +77,7 @@ public synchronized void setNodes(HttpHost... hosts) { this.connections = Collections.unmodifiableList(connections); } - public ElasticsearchResponse performRequest(String method, String endpoint, Map params, + public ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity, Header... headers) throws IOException { URI uri = buildUri(endpoint, params); HttpRequestBase request = createHttpRequest(method, uri, entity); @@ -252,11 +252,11 @@ private static void addRequestBody(HttpEntityEnclosingRequestBase httpRequest, H } } - private static URI buildUri(String path, Map params) { + private static URI buildUri(String path, Map params) { try { URIBuilder uriBuilder = new URIBuilder(path); - for (Map.Entry param : params.entrySet()) { - uriBuilder.addParameter(param.getKey(), param.getValue().toString()); + for (Map.Entry param : params.entrySet()) { + uriBuilder.addParameter(param.getKey(), param.getValue()); } return uriBuilder.build(); } catch(URISyntaxException e) { diff --git a/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java index 29117fcd856cb..86dca173a6714 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java @@ -45,13 +45,13 @@ public class HostsSniffer { private static final Log logger = LogFactory.getLog(HostsSniffer.class); private final RestClient restClient; - private final Map sniffRequestParams; + private final Map sniffRequestParams; private final String scheme; private final JsonFactory jsonFactory; public HostsSniffer(RestClient restClient, int sniffRequestTimeout, String scheme) { this.restClient = restClient; - this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeout + "ms"); + this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeout + "ms"); this.scheme = scheme; this.jsonFactory = new JsonFactory(); } From 16ab491016368c04cb70bb9e5858f6a35280f380 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 18 May 2016 11:40:20 +0200 Subject: [PATCH 030/103] add getFirstHeader method to ElasticsearchResponse --- .../elasticsearch/client/ElasticsearchResponse.java | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java index 124a233dea7ff..304d10e2b94da 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java @@ -77,6 +77,19 @@ public Header[] getHeaders() { return response.getAllHeaders(); } + /** + * Returns the value of the first header with a specified name of this message. + * If there is more than one matching header in the message the first element is returned. + * If there is no matching header in the message null is returned. + */ + public String getFirstHeader(String name) { + Header header = response.getFirstHeader(name); + if (header == null) { + return null; + } + return header.getValue(); + } + /** * Returns the response body available, null otherwise * @see HttpEntity From 9a38d81bec9549a7505c03001c2d9e8251e3edab Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 18 May 2016 13:51:38 +0200 Subject: [PATCH 031/103] Expose whole ElasticsearchResponse within ElasticsearchResponseException The only small problem is that the response gets closed straightaway and its body read immediately into a string. Should be ok to load it all into memory eagerly though in case of errors. Otherwise it becomes cumbersome to have an exception implement Closeable... --- .../ElasticsearchResponseException.java | 41 +++++++++---------- .../org/elasticsearch/client/RestClient.java | 12 ++---- .../client/sniff/HostsSnifferTests.java | 11 +++-- 3 files changed, 31 insertions(+), 33 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java index 98945b9a726c2..ad63009db3460 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java @@ -22,6 +22,7 @@ import org.apache.http.HttpHost; import org.apache.http.RequestLine; import org.apache.http.StatusLine; +import org.apache.http.util.EntityUtils; import java.io.IOException; @@ -30,17 +31,21 @@ */ public class ElasticsearchResponseException extends IOException { - private final HttpHost host; - private final RequestLine requestLine; - private final StatusLine statusLine; + private ElasticsearchResponse elasticsearchResponse; private final String responseBody; - public ElasticsearchResponseException(RequestLine requestLine, HttpHost host, StatusLine statusLine, String responseBody) { - super(buildMessage(requestLine, host, statusLine)); - this.host = host; - this.requestLine = requestLine; - this.responseBody = responseBody; - this.statusLine = statusLine; + public ElasticsearchResponseException(ElasticsearchResponse elasticsearchResponse) throws IOException { + super(buildMessage(elasticsearchResponse.getRequestLine(), elasticsearchResponse.getHost(), elasticsearchResponse.getStatusLine())); + this.elasticsearchResponse = elasticsearchResponse; + try { + if (elasticsearchResponse.getEntity() == null) { + this.responseBody = null; + } else { + this.responseBody = EntityUtils.toString(elasticsearchResponse.getEntity()); + } + } finally { + elasticsearchResponse.close(); + } } private static String buildMessage(RequestLine requestLine, HttpHost host, StatusLine statusLine) { @@ -48,23 +53,17 @@ private static String buildMessage(RequestLine requestLine, HttpHost host, Statu } /** - * Returns the {@link HttpHost} that returned the error + * Returns the {@link ElasticsearchResponse} that caused this exception to be thrown */ - public HttpHost getHost() { - return host; + public ElasticsearchResponse getElasticsearchResponse() { + return elasticsearchResponse; } /** - * Returns the {@link RequestLine} that triggered the error + * Returns the response body as a string or null if there wasn't any. + * The body is eagerly consumed when an ElasticsearchResponseException gets created, and its corresponding ElasticsearchResponse + * gets closed straightaway so this method is the only way to get back the response body that was returned. */ - public RequestLine getRequestLine() { - return requestLine; - } - - public StatusLine getStatusLine() { - return statusLine; - } - public String getResponseBody() { return responseBody; } diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 23540049fb5fa..4e414033d4a80 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -34,7 +34,6 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; -import org.apache.http.util.EntityUtils; import java.io.Closeable; import java.io.IOException; @@ -115,20 +114,17 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< lastSeenException = addSuppressedException(lastSeenException, e); continue; } + ElasticsearchResponse elasticsearchResponse = new ElasticsearchResponse(request.getRequestLine(), + connection.getHost(), response); int statusCode = response.getStatusLine().getStatusCode(); //TODO make ignore status code configurable. rest-spec and tests support that parameter (ignore_missing) if (statusCode < 300 || (request.getMethod().equals(HttpHead.METHOD_NAME) && statusCode == 404) ) { RequestLogger.log(logger, "request succeeded", request, connection.getHost(), response); onSuccess(connection); - return new ElasticsearchResponse(request.getRequestLine(), connection.getHost(), response); + return elasticsearchResponse; } else { RequestLogger.log(logger, "request failed", request, connection.getHost(), response); - String responseBody = null; - if (response.getEntity() != null) { - responseBody = EntityUtils.toString(response.getEntity()); - } - ElasticsearchResponseException elasticsearchResponseException = new ElasticsearchResponseException( - request.getRequestLine(), connection.getHost(), response.getStatusLine(), responseBody); + ElasticsearchResponseException elasticsearchResponseException = new ElasticsearchResponseException(elasticsearchResponse); lastSeenException = addSuppressedException(lastSeenException, elasticsearchResponseException); //clients don't retry on 500 because elasticsearch still misuses it instead of 400 in some places if (statusCode == 502 || statusCode == 503 || statusCode == 504) { diff --git a/client/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index c9e1b48a79195..e2b7a6d7c1051 100644 --- a/client/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -30,6 +30,7 @@ import okhttp3.mockwebserver.RecordedRequest; import org.apache.http.HttpHost; import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.client.ElasticsearchResponse; import org.elasticsearch.client.ElasticsearchResponseException; import org.elasticsearch.client.RestClient; import org.junit.After; @@ -100,15 +101,17 @@ public void testSniffNodes() throws IOException, URISyntaxException { assertEquals(sniffedHost, responseHostsIterator.next()); } } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); if (sniffResponse.isFailure) { assertThat(e.getMessage(), containsString("GET http://localhost:" + server.getPort() + "/_nodes/http?timeout=" + sniffRequestTimeout)); assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); - assertThat(e.getHost(), equalTo(httpHost)); - assertThat(e.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); - assertThat(e.getRequestLine().toString(), equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1")); + assertThat(response.getHost(), equalTo(httpHost)); + assertThat(response.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); + assertThat(response.getRequestLine().toString(), + equalTo("GET /_nodes/http?timeout=" + sniffRequestTimeout + "ms HTTP/1.1")); } else { - fail("sniffNodes should have succeeded: " + e.getStatusLine()); + fail("sniffNodes should have succeeded: " + response.getStatusLine()); } } } From 6d3f6c7fafd5b5df98c142e04a85d98a434bee37 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 18 May 2016 13:52:33 +0200 Subject: [PATCH 032/103] support missing OPTIONS method, it is supported in elasticsearch core --- .../src/main/java/org/elasticsearch/client/RestClient.java | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 4e414033d4a80..6c37fd58ae171 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -27,6 +27,7 @@ import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpOptions; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpRequestBase; @@ -229,6 +230,11 @@ private static HttpRequestBase createHttpRequest(String method, URI uri, HttpEnt throw new UnsupportedOperationException("HEAD with body is not supported"); } return new HttpHead(uri); + case HttpOptions.METHOD_NAME: + if (entity != null) { + throw new UnsupportedOperationException("OPTIONS with body is not supported"); + } + return new HttpOptions(uri); case HttpPost.METHOD_NAME: HttpPost httpPost = new HttpPost(uri); addRequestBody(httpPost, entity); From 325b72393077e7985975515a8c9a8b29c468da16 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 18 May 2016 15:49:28 +0200 Subject: [PATCH 033/103] [TEST] add rest client test dependency and replace usage of HttpRequestBuilder with RestClient in integration tests --- .../resources/checkstyle_suppressions.xml | 1 - .../org/elasticsearch/client/RestClient.java | 3 + .../http/netty/NettyHttpCompressionIT.java | 82 ++++++-------- .../DetailedErrorsDisabledIT.java | 29 +++-- .../DetailedErrorsEnabledIT.java | 49 +++++---- .../plugins/ResponseHeaderPluginIT.java | 26 +++-- .../org/elasticsearch/rest/CorsNotSetIT.java | 32 +++--- .../org/elasticsearch/rest/CorsRegexIT.java | 103 ++++++++++-------- .../rest/action/main/RestMainActionIT.java | 23 ++-- .../ContextAndHeaderTransportIT.java | 40 +++---- test/framework/build.gradle | 5 +- .../elasticsearch/test/ESIntegTestCase.java | 34 ++++-- .../hamcrest/ElasticsearchAssertions.java | 8 +- .../test/hamcrest/ElasticsearchMatchers.java | 16 +-- 14 files changed, 242 insertions(+), 209 deletions(-) diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index c77ef0f9d5b73..d6a38e76e3ec3 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -1049,7 +1049,6 @@ - diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 6c37fd58ae171..31914f46282ff 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -20,6 +20,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; +import org.apache.http.Consts; import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; @@ -32,6 +33,7 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.utils.URIBuilder; +import org.apache.http.entity.ContentType; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; @@ -55,6 +57,7 @@ public final class RestClient implements Closeable { private static final Log logger = LogFactory.getLog(RestClient.class); + public static ContentType JSON_CONTENT_TYPE = ContentType.create("application/json", Consts.UTF_8); private final CloseableHttpClient client; private final long maxRetryTimeout; diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java index bb7a78ab43f30..9e995f0a010d7 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java @@ -22,27 +22,31 @@ import org.apache.http.HttpException; import org.apache.http.HttpHeaders; import org.apache.http.HttpResponseInterceptor; +import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClients; +import org.apache.http.message.BasicHeader; import org.apache.http.protocol.HttpContext; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.rest.client.http.HttpResponse; import java.io.IOException; +import java.util.Collections; @ESIntegTestCase.ClusterScope(scope = ESIntegTestCase.Scope.SUITE, numDataNodes = 1, numClientNodes = 1) public class NettyHttpCompressionIT extends ESIntegTestCase { private static final String GZIP_ENCODING = "gzip"; - private static final String SAMPLE_DOCUMENT = "{\n" + + private static final StringEntity SAMPLE_DOCUMENT = new StringEntity("{\n" + " \"name\": {\n" + " \"first name\": \"Steve\",\n" + " \"last name\": \"Jobs\"\n" + " }\n" + - "}"; + "}", RestClient.JSON_CONTENT_TYPE); @Override protected Settings nodeSettings(int nodeOrdinal) { @@ -55,69 +59,49 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testCompressesResponseIfRequested() throws Exception { ensureGreen(); - // we need to intercept early, otherwise internal logic in HttpClient will just remove the header and we cannot verify it ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); - CloseableHttpClient internalClient = HttpClients.custom().addInterceptorFirst(headerExtractor).build(); - - HttpResponse response = httpClient(internalClient).path("/").addHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING).execute(); - assertEquals(200, response.getStatusCode()); - assertTrue(headerExtractor.hasContentEncodingHeader()); - assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue()); + try (RestClient client = restClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { + ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertTrue(headerExtractor.hasContentEncodingHeader()); + assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue()); + } } public void testUncompressedResponseByDefault() throws Exception { ensureGreen(); - ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); - CloseableHttpClient internalClient = HttpClients - .custom() - .disableContentCompression() - .addInterceptorFirst(headerExtractor) - .build(); - - HttpResponse response = httpClient(internalClient).path("/").execute(); - assertEquals(200, response.getStatusCode()); - assertFalse(headerExtractor.hasContentEncodingHeader()); + CloseableHttpClient httpClient = HttpClients.custom().disableContentCompression().addInterceptorFirst(headerExtractor).build(); + try (RestClient client = restClient(httpClient)) { + ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null); + assertEquals(200, response.getStatusLine().getStatusCode()); + assertFalse(headerExtractor.hasContentEncodingHeader()); + } } public void testCanInterpretUncompressedRequest() throws Exception { ensureGreen(); - ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); - CloseableHttpClient internalClient = HttpClients - .custom() - // this disable content compression in both directions (request and response) - .disableContentCompression() - .addInterceptorFirst(headerExtractor) - .build(); - - HttpResponse response = httpClient(internalClient) - .path("/company/employees/1") - .method("POST") - .body(SAMPLE_DOCUMENT) - .execute(); - - assertEquals(201, response.getStatusCode()); - assertFalse(headerExtractor.hasContentEncodingHeader()); + // this disable content compression in both directions (request and response) + CloseableHttpClient httpClient = HttpClients.custom().disableContentCompression().addInterceptorFirst(headerExtractor).build(); + try (RestClient client = restClient(httpClient)) { + ElasticsearchResponse response = client.performRequest("POST", "/company/employees/1", Collections.emptyMap(), SAMPLE_DOCUMENT); + assertEquals(201, response.getStatusLine().getStatusCode()); + assertFalse(headerExtractor.hasContentEncodingHeader()); + } } public void testCanInterpretCompressedRequest() throws Exception { ensureGreen(); - ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); // we don't call #disableContentCompression() hence the client will send the content compressed - CloseableHttpClient internalClient = HttpClients.custom().addInterceptorFirst(headerExtractor).build(); - - HttpResponse response = httpClient(internalClient) - .path("/company/employees/2") - .method("POST") - .body(SAMPLE_DOCUMENT) - .execute(); - - assertEquals(201, response.getStatusCode()); - assertTrue(headerExtractor.hasContentEncodingHeader()); - assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue()); + try (RestClient client = restClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { + ElasticsearchResponse response = client.performRequest("POST", "/company/employees/2", Collections.emptyMap(), SAMPLE_DOCUMENT); + assertEquals(201, response.getStatusLine().getStatusCode()); + assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue()); + } } private static class ContentEncodingHeaderExtractor implements HttpResponseInterceptor { @@ -141,6 +125,4 @@ public Header getContentEncodingHeader() { return contentEncodingHeader; } } - - } diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java index 683ae71a11234..ba8840bbc2c21 100644 --- a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java +++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java @@ -19,17 +19,17 @@ package org.elasticsearch.options.detailederrors; -import org.apache.http.impl.client.HttpClients; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.rest.client.http.HttpDeleteWithEntity; -import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; -import org.elasticsearch.test.rest.client.http.HttpResponse; + +import java.util.Collections; import static org.hamcrest.Matchers.is; @@ -49,15 +49,14 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testThatErrorTraceParamReturns400() throws Exception { - // Make the HTTP request - HttpResponse response = new HttpRequestBuilder(HttpClients.createDefault()) - .httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class)) - .addParam("error_trace", "true") - .method(HttpDeleteWithEntity.METHOD_NAME) - .execute(); - - assertThat(response.getHeaders().get("Content-Type"), is("application/json")); - assertThat(response.getBody(), is("{\"error\":\"error traces in responses are disabled.\"}")); - assertThat(response.getStatusCode(), is(400)); + try (RestClient restClient = restClient()) { + restClient.performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getFirstHeader("Content-Type"), is("application/json")); + assertThat(e.getResponseBody(), is("{\"error\":\"error traces in responses are disabled.\"}")); + assertThat(response.getStatusLine().getStatusCode(), is(400)); + } } } diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java index d98db83dddc4d..a040c31299da9 100644 --- a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java +++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java @@ -19,16 +19,16 @@ package org.elasticsearch.options.detailederrors; -import org.apache.http.impl.client.HttpClients; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.rest.client.http.HttpDeleteWithEntity; -import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; -import org.elasticsearch.test.rest.client.http.HttpResponse; + +import java.util.Collections; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.not; @@ -47,25 +47,26 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testThatErrorTraceWorksByDefault() throws Exception { - // Make the HTTP request - HttpResponse response = new HttpRequestBuilder(HttpClients.createDefault()) - .httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class)) - .path("/") - .addParam("error_trace", "true") - .method(HttpDeleteWithEntity.METHOD_NAME) - .execute(); - - assertThat(response.getHeaders().get("Content-Type"), containsString("application/json")); - assertThat(response.getBody(), containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; nested: ActionRequestValidationException[Validation Failed: 1:")); - - // Make the HTTP request - response = new HttpRequestBuilder(HttpClients.createDefault()) - .httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class)) - .path("/") - .method(HttpDeleteWithEntity.METHOD_NAME) - .execute(); + try (RestClient restClient = restClient()) { + try { + restClient.performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getFirstHeader("Content-Type"), containsString("application/json")); + assertThat(e.getResponseBody(), containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " + + "nested: ActionRequestValidationException[Validation Failed: 1:")); + } - assertThat(response.getHeaders().get("Content-Type"), containsString("application/json")); - assertThat(response.getBody(), not(containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; nested: ActionRequestValidationException[Validation Failed: 1:"))); + try { + restClient.performRequest("DELETE", "/", Collections.emptyMap(), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getFirstHeader("Content-Type"), containsString("application/json")); + assertThat(e.getResponseBody(), not(containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " + + "nested: ActionRequestValidationException[Validation Failed: 1:"))); + } + } } } diff --git a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java index ac6d8fddd89e6..4ae9e3912b28d 100644 --- a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java +++ b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java @@ -18,14 +18,18 @@ */ package org.elasticsearch.plugins; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.responseheader.TestResponseHeaderPlugin; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.rest.client.http.HttpResponse; import java.util.Collection; +import java.util.Collections; import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.rest.RestStatus.UNAUTHORIZED; @@ -52,12 +56,20 @@ protected Collection> nodePlugins() { public void testThatSettingHeadersWorks() throws Exception { ensureGreen(); - HttpResponse response = httpClient().method("GET").path("/_protected").execute(); - assertThat(response, hasStatus(UNAUTHORIZED)); - assertThat(response.getHeaders().get("Secret"), equalTo("required")); + try (RestClient client = restClient()) { + try { + client.performRequest("GET", "/_protected", Collections.emptyMap(), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response, hasStatus(UNAUTHORIZED)); + assertThat(response.getFirstHeader("Secret"), equalTo("required")); + } - HttpResponse authResponse = httpClient().method("GET").path("/_protected").addHeader("Secret", "password").execute(); - assertThat(authResponse, hasStatus(OK)); - assertThat(authResponse.getHeaders().get("Secret"), equalTo("granted")); + ElasticsearchResponse authResponse = client.performRequest("GET", "/_protected", Collections.emptyMap(), null, + new BasicHeader("Secret", "password")); + assertThat(authResponse, hasStatus(OK)); + assertThat(authResponse.getFirstHeader("Secret"), equalTo("granted")); + } } } diff --git a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java index dfdd88d198745..e407bccf872be 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java @@ -19,15 +19,18 @@ package org.elasticsearch.rest; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.rest.client.http.HttpResponse; -import static org.hamcrest.Matchers.hasKey; +import java.util.Collections; + import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; /** * @@ -44,18 +47,21 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws Exception { String corsValue = "http://localhost:9200"; - HttpResponse response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", corsValue).execute(); - - assertThat(response.getStatusCode(), is(200)); - assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin"))); - assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Credentials"))); + try (RestClient restClient = restClient()) { + ElasticsearchResponse response = restClient.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); + } } public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws Exception { - HttpResponse response = httpClient().method("GET").path("/").execute(); - - assertThat(response.getStatusCode(), is(200)); - assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin"))); - assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Credentials"))); + try (RestClient restClient = restClient()) { + ElasticsearchResponse response = restClient.performRequest("GET", "/", Collections.emptyMap(), null); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); + } } } diff --git a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java index 18351dcb294da..729fed8e96d86 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java @@ -18,6 +18,10 @@ */ package org.elasticsearch.rest; +import org.apache.http.message.BasicHeader; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; @@ -25,16 +29,16 @@ import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; import org.elasticsearch.test.ESIntegTestCase.Scope; -import org.elasticsearch.test.rest.client.http.HttpResponse; import org.jboss.netty.handler.codec.http.HttpHeaders; +import java.util.Collections; + import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_CREDENTIALS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_METHODS; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ALLOW_ORIGIN; import static org.elasticsearch.http.HttpTransportSettings.SETTING_CORS_ENABLED; -import static org.hamcrest.Matchers.hasKey; import static org.hamcrest.Matchers.is; -import static org.hamcrest.Matchers.not; +import static org.hamcrest.Matchers.nullValue; /** * Test CORS where the allow origin value is a regular expression. @@ -58,64 +62,77 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testThatRegularExpressionWorksOnMatch() throws Exception { String corsValue = "http://localhost:9200"; - HttpResponse response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", corsValue).execute(); - assertResponseWithOriginheader(response, corsValue); + try (RestClient client = restClient()) { + ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + assertResponseWithOriginheader(response, corsValue); - corsValue = "https://localhost:9200"; - response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", corsValue).execute(); - assertResponseWithOriginheader(response, corsValue); - assertThat(response.getHeaders(), hasKey("Access-Control-Allow-Credentials")); - assertThat(response.getHeaders().get("Access-Control-Allow-Credentials"), is("true")); + corsValue = "https://localhost:9200"; + response = client.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); + assertResponseWithOriginheader(response, corsValue); + assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), is("true")); + } } public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws Exception { - HttpResponse response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").addHeader("Origin", "http://evil-host:9200").execute(); - // a rejected origin gets a FORBIDDEN - 403 - assertThat(response.getStatusCode(), is(403)); - assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin"))); + try (RestClient client = restClient()) { + client.performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", "http://evil-host:9200")); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + // a rejected origin gets a FORBIDDEN - 403 + assertThat(response.getStatusLine().getStatusCode(), is(403)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + } } public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws Exception { - HttpResponse response = httpClient().method("GET").path("/").addHeader("User-Agent", "Mozilla Bar").execute(); - assertThat(response.getStatusCode(), is(200)); - assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin"))); + try (RestClient client = restClient()) { + ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar")); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + } } public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws Exception { - HttpResponse response = httpClient().method("GET").path("/").execute(); - assertThat(response.getStatusCode(), is(200)); - assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin"))); + try (RestClient client = restClient()) { + ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null); + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + } } public void testThatPreFlightRequestWorksOnMatch() throws Exception { String corsValue = "http://localhost:9200"; - HttpResponse response = httpClient().method("OPTIONS") - .path("/") - .addHeader("User-Agent", "Mozilla Bar") - .addHeader("Origin", corsValue) - .addHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET") - .execute(); - assertResponseWithOriginheader(response, corsValue); - assertThat(response.getHeaders(), hasKey("Access-Control-Allow-Methods")); + try (RestClient client = restClient()) { + ElasticsearchResponse response = client.performRequest("OPTIONS", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue), + new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET")); + assertResponseWithOriginheader(response, corsValue); + assertNotNull(response.getFirstHeader("Access-Control-Allow-Methods")); + } } public void testThatPreFlightRequestReturnsNullOnNonMatch() throws Exception { - HttpResponse response = httpClient().method("OPTIONS") - .path("/") - .addHeader("User-Agent", "Mozilla Bar") - .addHeader("Origin", "http://evil-host:9200") - .addHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET") - .execute(); - // a rejected origin gets a FORBIDDEN - 403 - assertThat(response.getStatusCode(), is(403)); - assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Origin"))); - assertThat(response.getHeaders(), not(hasKey("Access-Control-Allow-Methods"))); + try (RestClient client = restClient()) { + client.performRequest("OPTIONS", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), + new BasicHeader("Origin", "http://evil-host:9200"), + new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET")); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + // a rejected origin gets a FORBIDDEN - 403 + assertThat(response.getStatusLine().getStatusCode(), is(403)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getFirstHeader("Access-Control-Allow-Methods"), nullValue()); + } } - protected static void assertResponseWithOriginheader(HttpResponse response, String expectedCorsHeader) { - assertThat(response.getStatusCode(), is(200)); - assertThat(response.getHeaders(), hasKey("Access-Control-Allow-Origin")); - assertThat(response.getHeaders().get("Access-Control-Allow-Origin"), is(expectedCorsHeader)); + protected static void assertResponseWithOriginheader(ElasticsearchResponse response, String expectedCorsHeader) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), is(expectedCorsHeader)); } - } diff --git a/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java b/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java index 0ad40f84cf760..9c2b0284ef7ee 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java +++ b/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java @@ -18,16 +18,18 @@ */ package org.elasticsearch.rest.action.main; +import org.apache.http.util.EntityUtils; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; -import org.elasticsearch.test.rest.client.http.HttpResponse; import java.io.IOException; +import java.util.Collections; import static org.hamcrest.Matchers.containsString; import static org.hamcrest.Matchers.equalTo; -import static org.hamcrest.Matchers.nullValue; public class RestMainActionIT extends ESIntegTestCase { @Override @@ -39,14 +41,19 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testHeadRequest() throws IOException { - final HttpResponse response = httpClient().method("HEAD").path("/").execute(); - assertThat(response.getStatusCode(), equalTo(200)); - assertThat(response.getBody(), nullValue()); + try (RestClient client = restClient()) { + ElasticsearchResponse response = client.performRequest("HEAD", "/", Collections.emptyMap(), null); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertNull(response.getEntity()); + } } public void testGetRequest() throws IOException { - final HttpResponse response = httpClient().path("/").execute(); - assertThat(response.getStatusCode(), equalTo(200)); - assertThat(response.getBody(), containsString("cluster_name")); + try (RestClient client = restClient()) { + ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertNotNull(response.getEntity()); + assertThat(EntityUtils.toString(response.getEntity()), containsString("cluster_name")); + } } } diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index eb13de34858a2..aae15dc778a68 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -19,8 +19,7 @@ package org.elasticsearch.transport; -import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; +import org.apache.http.message.BasicHeader; import org.elasticsearch.action.ActionListener; import org.elasticsearch.action.ActionModule; import org.elasticsearch.action.ActionRequest; @@ -33,12 +32,13 @@ import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.client.Client; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Module; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; -import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.index.query.BoolQueryBuilder; import org.elasticsearch.index.query.GeoShapeQueryBuilder; import org.elasticsearch.index.query.MoreLikeThisQueryBuilder; @@ -50,8 +50,6 @@ import org.elasticsearch.rest.RestController; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.ESIntegTestCase.ClusterScope; -import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; -import org.elasticsearch.test.rest.client.http.HttpResponse; import org.elasticsearch.threadpool.ThreadPool; import org.junit.After; import org.junit.Before; @@ -217,26 +215,22 @@ public void testThatMoreLikeThisQueryMultiTermVectorRequestContainsContextAndHea } public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws Exception { - String releventHeaderName = "relevant_" + randomHeaderKey; - for (RestController restController : internalCluster().getDataNodeInstances(RestController.class)) { - restController.registerRelevantHeaders(releventHeaderName); + String relevantHeaderName = "relevant_" + randomHeaderKey; + for (RestController restController : internalCluster().getInstances(RestController.class)) { + restController.registerRelevantHeaders(relevantHeaderName); } - CloseableHttpClient httpClient = HttpClients.createDefault(); - HttpResponse response = new HttpRequestBuilder(httpClient) - .httpTransport(internalCluster().getDataNodeInstance(HttpServerTransport.class)) - .addHeader(randomHeaderKey, randomHeaderValue) - .addHeader(releventHeaderName, randomHeaderValue) - .path("/" + queryIndex + "/_search") - .execute(); - - assertThat(response, hasStatus(OK)); - List searchRequests = getRequests(SearchRequest.class); - assertThat(searchRequests, hasSize(greaterThan(0))); - for (RequestAndHeaders requestAndHeaders : searchRequests) { - assertThat(requestAndHeaders.headers.containsKey(releventHeaderName), is(true)); - // was not specified, thus is not included - assertThat(requestAndHeaders.headers.containsKey(randomHeaderKey), is(false)); + try (RestClient client = restClient()) { + ElasticsearchResponse response = client.performRequest("GET", "/" + queryIndex + "/_search", Collections.emptyMap(), null, + new BasicHeader(randomHeaderKey, randomHeaderValue), new BasicHeader(relevantHeaderName, randomHeaderValue)); + assertThat(response, hasStatus(OK)); + List searchRequests = getRequests(SearchRequest.class); + assertThat(searchRequests, hasSize(greaterThan(0))); + for (RequestAndHeaders requestAndHeaders : searchRequests) { + assertThat(requestAndHeaders.headers.containsKey(relevantHeaderName), is(true)); + // was not specified, thus is not included + assertThat(requestAndHeaders.headers.containsKey(randomHeaderKey), is(false)); + } } } diff --git a/test/framework/build.gradle b/test/framework/build.gradle index af65c9ff7c9d2..078511611feca 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -23,14 +23,15 @@ dependencies { compile "org.elasticsearch:elasticsearch:${version}" compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" compile "junit:junit:${versions.junit}" - compile 'org.hamcrest:hamcrest-all:1.3' + compile "org.hamcrest:hamcrest-all:1.3" compile "org.apache.lucene:lucene-test-framework:${versions.lucene}" compile "org.apache.lucene:lucene-codecs:${versions.lucene}" + compile "org.elasticsearch:client:${version}" compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" - compile 'org.elasticsearch:securemock:1.2' + compile "org.elasticsearch:securemock:1.2" } compileJava.options.compilerArgs << '-Xlint:-cast,-rawtypes,-try,-unchecked' diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 2a3eecf4cc819..67f2a65492867 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -22,8 +22,8 @@ import com.carrotsearch.randomizedtesting.annotations.TestGroup; import com.carrotsearch.randomizedtesting.generators.RandomInts; import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.http.HttpHost; import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; import org.apache.lucene.util.IOUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.TestUtil; @@ -57,6 +57,7 @@ import org.elasticsearch.client.AdminClient; import org.elasticsearch.client.Client; import org.elasticsearch.client.Requests; +import org.elasticsearch.client.RestClient; import org.elasticsearch.cluster.ClusterState; import org.elasticsearch.cluster.health.ClusterHealthStatus; import org.elasticsearch.cluster.metadata.IndexMetaData; @@ -118,7 +119,6 @@ import org.elasticsearch.search.SearchHit; import org.elasticsearch.test.client.RandomizingClient; import org.elasticsearch.test.disruption.ServiceDisruptionScheme; -import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; import org.elasticsearch.test.store.MockFSIndexStore; import org.elasticsearch.test.transport.AssertingLocalTransport; import org.elasticsearch.test.transport.MockTransportService; @@ -2033,20 +2033,34 @@ protected Settings prepareBackwardsDataDir(Path backwardsIndex, Object... settin return builder.build(); } - protected HttpRequestBuilder httpClient() { - return httpClient(HttpClients.createDefault()); + protected static RestClient restClient() { + return restClient(null); } - protected HttpRequestBuilder httpClient(CloseableHttpClient httpClient) { + protected static RestClient restClient(CloseableHttpClient httpClient) { + return restClient(httpClient, "http"); + } + + protected static RestClient restClient(CloseableHttpClient httpClient, String protocol) { final NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); final List nodes = nodeInfos.getNodes(); assertFalse(nodeInfos.hasFailures()); - TransportAddress publishAddress = randomFrom(nodes).getHttp().address().publishAddress(); - assertEquals(1, publishAddress.uniqueAddressTypeId()); - InetSocketAddress address = ((InetSocketTransportAddress) publishAddress).address(); - return new HttpRequestBuilder(httpClient).host(NetworkAddress.format(address.getAddress())).port(address.getPort()); - } + List hosts = new ArrayList<>(); + for (NodeInfo node : nodes) { + if (node.getHttp() != null) { + TransportAddress publishAddress = node.getHttp().address().publishAddress(); + assertEquals(1, publishAddress.uniqueAddressTypeId()); + InetSocketAddress address = ((InetSocketTransportAddress) publishAddress).address(); + hosts.add(new HttpHost(NetworkAddress.format(address.getAddress()), address.getPort(), protocol)); + } + } + RestClient.Builder builder = RestClient.builder().setHosts(hosts.toArray(new HttpHost[hosts.size()])); + if (httpClient != null) { + builder.setHttpClient(httpClient); + } + return builder.build(); + } /** * This method is executed iff the test is annotated with {@link SuiteScopeTestCase} diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index d10c113b59018..0fe65ccf7520e 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -42,6 +42,7 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; +import org.elasticsearch.client.ElasticsearchResponse; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; @@ -60,7 +61,6 @@ import org.elasticsearch.search.suggest.Suggest; import org.elasticsearch.test.ESIntegTestCase; import org.elasticsearch.test.VersionUtils; -import org.elasticsearch.test.rest.client.http.HttpResponse; import org.hamcrest.CoreMatchers; import org.hamcrest.Matcher; import org.hamcrest.Matchers; @@ -82,7 +82,6 @@ import static org.hamcrest.CoreMatchers.equalTo; import static org.hamcrest.CoreMatchers.is; import static org.hamcrest.MatcherAssert.assertThat; -import static org.hamcrest.Matchers.contains; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.greaterThanOrEqualTo; import static org.hamcrest.Matchers.hasItem; @@ -90,7 +89,6 @@ import static org.hamcrest.Matchers.instanceOf; import static org.hamcrest.Matchers.not; import static org.hamcrest.Matchers.notNullValue; -import static org.hamcrest.Matchers.nullValue; import static org.junit.Assert.assertArrayEquals; import static org.junit.Assert.assertEquals; import static org.junit.Assert.assertFalse; @@ -488,8 +486,8 @@ public static Matcher hasScore(final float score) { return new ElasticsearchMatchers.SearchHitHasScoreMatcher(score); } - public static Matcher hasStatus(RestStatus restStatus) { - return new ElasticsearchMatchers.HttpResponseHasStatusMatcher(restStatus); + public static Matcher hasStatus(RestStatus restStatus) { + return new ElasticsearchMatchers.ElasticsearchResponseHasStatusMatcher(restStatus); } public static T assertBooleanSubQuery(Query query, Class subqueryType, int i) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java index d86791fa6bdad..c40e434a5a773 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java @@ -18,9 +18,9 @@ */ package org.elasticsearch.test.hamcrest; +import org.elasticsearch.client.ElasticsearchResponse; import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; -import org.elasticsearch.test.rest.client.http.HttpResponse; import org.hamcrest.Description; import org.hamcrest.TypeSafeMatcher; @@ -118,27 +118,27 @@ public void describeTo(final Description description) { } } - public static class HttpResponseHasStatusMatcher extends TypeSafeMatcher { + public static class ElasticsearchResponseHasStatusMatcher extends TypeSafeMatcher { private RestStatus restStatus; - public HttpResponseHasStatusMatcher(RestStatus restStatus) { + public ElasticsearchResponseHasStatusMatcher(RestStatus restStatus) { this.restStatus = restStatus; } @Override - protected boolean matchesSafely(HttpResponse response) { - return response.getStatusCode() == restStatus.getStatus(); + protected boolean matchesSafely(ElasticsearchResponse response) { + return response.getStatusLine().getStatusCode() == restStatus.getStatus(); } @Override - public void describeMismatchSafely(final HttpResponse response, final Description mismatchDescription) { - mismatchDescription.appendText(" was ").appendValue(response.getStatusCode()); + public void describeMismatchSafely(final ElasticsearchResponse response, final Description mismatchDescription) { + mismatchDescription.appendText(" was ").appendValue(response.getStatusLine().getStatusCode()); } @Override public void describeTo(final Description description) { - description.appendText("HTTP response status code should be ").appendValue(restStatus.getStatus()); + description.appendText("Elasticsearch response status code should be ").appendValue(restStatus.getStatus()); } } } From eae914ae8e768ba57a10ecda41cf9b64c372c68b Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 19 May 2016 15:18:17 +0200 Subject: [PATCH 034/103] Replace rest test client with low level RestClient We still have a wrapper called RestTestClient that is very specific to Rest tests, as well as RestTestResponse etc. but all the low level bits around http connections etc. are now handled by RestClient. --- .../resources/checkstyle_suppressions.xml | 4 - .../org/elasticsearch/client/RestClient.java | 1 - .../ExceptionSerializationTests.java | 1 - .../test/rest/ESRestTestCase.java | 67 ++--- .../test/rest/RestTestExecutionContext.java | 37 ++- .../org/elasticsearch/test/rest/Stash.java | 16 +- .../test/rest/client/RestException.java | 41 --- .../{RestClient.java => RestTestClient.java} | 266 +++++++++--------- ...estResponse.java => RestTestResponse.java} | 56 ++-- .../client/http/HttpDeleteWithEntity.java | 40 --- .../rest/client/http/HttpGetWithEntity.java | 40 --- .../rest/client/http/HttpRequestBuilder.java | 250 ---------------- .../test/rest/client/http/HttpResponse.java | 108 ------- .../test/rest/section/DoSection.java | 33 ++- 14 files changed, 253 insertions(+), 707 deletions(-) delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java rename test/framework/src/main/java/org/elasticsearch/test/rest/client/{RestClient.java => RestTestClient.java} (52%) rename test/framework/src/main/java/org/elasticsearch/test/rest/client/{RestResponse.java => RestTestResponse.java} (62%) delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java delete mode 100644 test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java diff --git a/buildSrc/src/main/resources/checkstyle_suppressions.xml b/buildSrc/src/main/resources/checkstyle_suppressions.xml index d6a38e76e3ec3..985ffefa41154 100644 --- a/buildSrc/src/main/resources/checkstyle_suppressions.xml +++ b/buildSrc/src/main/resources/checkstyle_suppressions.xml @@ -1311,9 +1311,6 @@ - - - @@ -1321,7 +1318,6 @@ - diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 31914f46282ff..b366d98f77a04 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -121,7 +121,6 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< ElasticsearchResponse elasticsearchResponse = new ElasticsearchResponse(request.getRequestLine(), connection.getHost(), response); int statusCode = response.getStatusLine().getStatusCode(); - //TODO make ignore status code configurable. rest-spec and tests support that parameter (ignore_missing) if (statusCode < 300 || (request.getMethod().equals(HttpHead.METHOD_NAME) && statusCode == 404) ) { RequestLogger.log(logger, "request succeeded", request, connection.getHost(), response); onSuccess(connection); diff --git a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java index 682eed35f2eab..d10fc89084890 100644 --- a/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java +++ b/core/src/test/java/org/elasticsearch/ExceptionSerializationTests.java @@ -121,7 +121,6 @@ public void testExceptionRegistration() .resolve("org").resolve("elasticsearch"); final Set> ignore = Sets.newHashSet( org.elasticsearch.test.rest.parser.RestTestParseException.class, - org.elasticsearch.test.rest.client.RestException.class, CancellableThreadsTests.CustomException.class, org.elasticsearch.rest.BytesRestResponseTests.WithHeadersException.class, AbstractClientHeadersTestCase.InternalException.class); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index ab2c1d2afbe33..720408637047a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -19,34 +19,16 @@ package org.elasticsearch.test.rest; -import java.io.IOException; -import java.io.InputStream; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URL; -import java.nio.file.FileSystem; -import java.nio.file.FileSystems; -import java.nio.file.Files; -import java.nio.file.Path; -import java.nio.file.StandardCopyOption; -import java.util.ArrayList; -import java.util.Collections; -import java.util.Comparator; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Map; -import java.util.Set; - +import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; +import org.elasticsearch.client.ElasticsearchResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentHelper; import org.elasticsearch.test.ESTestCase; -import org.elasticsearch.test.rest.client.RestException; -import org.elasticsearch.test.rest.client.RestResponse; +import org.elasticsearch.test.rest.client.RestTestResponse; import org.elasticsearch.test.rest.parser.RestTestParseException; import org.elasticsearch.test.rest.parser.RestTestSuiteParser; import org.elasticsearch.test.rest.section.DoSection; @@ -62,7 +44,24 @@ import org.junit.Before; import org.junit.BeforeClass; -import com.carrotsearch.randomizedtesting.RandomizedTest; +import java.io.IOException; +import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; +import java.net.URL; +import java.nio.file.FileSystem; +import java.nio.file.FileSystems; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.StandardCopyOption; +import java.util.ArrayList; +import java.util.Collections; +import java.util.Comparator; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; import static java.util.Collections.emptyList; import static java.util.Collections.emptyMap; @@ -98,8 +97,8 @@ public abstract class ESRestTestCase extends ESTestCase { private static final String DEFAULT_SPEC_PATH = "/rest-api-spec/api"; /** - * This separator pattern matches ',' except it is preceded by a '\'. This allows us to support ',' within paths when it is escaped with - * a slash. + * This separator pattern matches ',' except it is preceded by a '\'. + * This allows us to support ',' within paths when it is escaped with a slash. * * For example, the path string "/a/b/c\,d/e/f,/foo/bar,/baz" is separated to "/a/b/c\,d/e/f", "/foo/bar" and "/baz". * @@ -233,7 +232,7 @@ static FileSystem getFileSystem() throws IOException { } @BeforeClass - public static void initExecutionContext() throws IOException, RestException { + public static void initExecutionContext() throws IOException { String[] specPaths = resolvePathsProperty(REST_TESTS_SPEC, DEFAULT_SPEC_PATH); RestSpec restSpec = null; FileSystem fileSystem = getFileSystem(); @@ -277,9 +276,9 @@ public void wipeCluster() throws Exception { deleteIndicesArgs.put("index", "*"); try { adminExecutionContext.callApi("indices.delete", deleteIndicesArgs, Collections.emptyList(), Collections.emptyMap()); - } catch (RestException e) { + } catch (ElasticsearchResponseException e) { // 404 here just means we had no indexes - if (e.statusCode() != 404) { + if (e.getElasticsearchResponse().getStatusLine().getStatusCode() != 404) { throw e; } } @@ -300,8 +299,8 @@ public void wipeCluster() throws Exception { * other tests. */ @After - public void logIfThereAreRunningTasks() throws InterruptedException, IOException, RestException { - RestResponse tasks = adminExecutionContext.callApi("tasks.list", emptyMap(), emptyList(), emptyMap()); + public void logIfThereAreRunningTasks() throws InterruptedException, IOException { + RestTestResponse tasks = adminExecutionContext.callApi("tasks.list", emptyMap(), emptyList(), emptyMap()); Set runningTasks = runningTasks(tasks); // Ignore the task list API - it doens't count against us runningTasks.remove(ListTasksAction.NAME); @@ -347,7 +346,7 @@ protected URL[] getClusterUrls() { } @Before - public void reset() throws IOException, RestException { + public void reset() throws IOException { // admin context must be available for @After always, regardless of whether the test was blacklisted adminExecutionContext.initClient(clusterUrls, restAdminSettings()); adminExecutionContext.clear(); @@ -355,7 +354,8 @@ public void reset() throws IOException, RestException { //skip test if it matches one of the blacklist globs for (BlacklistedPathPatternMatcher blacklistedPathMatcher : blacklistPathMatchers) { String testPath = testCandidate.getSuitePath() + "/" + testCandidate.getTestSection().getName(); - assumeFalse("[" + testCandidate.getTestPath() + "] skipped, reason: blacklisted", blacklistedPathMatcher.isSuffixMatch(testPath)); + assumeFalse("[" + testCandidate.getTestPath() + "] skipped, reason: blacklisted", blacklistedPathMatcher + .isSuffixMatch(testPath)); } //The client needs non static info to get initialized, therefore it can't be initialized in the before class restTestExecutionContext.initClient(clusterUrls, restClientSettings()); @@ -374,7 +374,8 @@ private static String buildSkipMessage(String description, SkipSection skipSecti if (skipSection.isVersionCheck()) { messageBuilder.append("[").append(description).append("] skipped, reason: [").append(skipSection.getReason()).append("] "); } else { - messageBuilder.append("[").append(description).append("] skipped, reason: features ").append(skipSection.getFeatures()).append(" not supported"); + messageBuilder.append("[").append(description).append("] skipped, reason: features ") + .append(skipSection.getFeatures()).append(" not supported"); } return messageBuilder.toString(); } @@ -401,7 +402,7 @@ public void test() throws IOException { } @SuppressWarnings("unchecked") - public Set runningTasks(RestResponse response) throws IOException { + public Set runningTasks(RestTestResponse response) throws IOException { Set runningTasks = new HashSet<>(); Map nodes = (Map) response.evaluate("nodes"); for (Map.Entry node : nodes.entrySet()) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java index 4a7f83953def2..1fa3dba5d83ef 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java @@ -19,18 +19,17 @@ package org.elasticsearch.test.rest; import org.elasticsearch.Version; +import org.elasticsearch.client.ElasticsearchResponseException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.xcontent.XContentFactory; -import org.elasticsearch.test.rest.client.RestClient; -import org.elasticsearch.test.rest.client.RestException; -import org.elasticsearch.test.rest.client.RestResponse; +import org.elasticsearch.test.rest.client.RestTestClient; +import org.elasticsearch.test.rest.client.RestTestResponse; import org.elasticsearch.test.rest.spec.RestSpec; import java.io.Closeable; import java.io.IOException; -import java.net.InetSocketAddress; import java.net.URL; import java.util.HashMap; import java.util.List; @@ -50,9 +49,9 @@ public class RestTestExecutionContext implements Closeable { private final RestSpec restSpec; - private RestClient restClient; + private RestTestClient restTestClient; - private RestResponse response; + private RestTestResponse response; public RestTestExecutionContext(RestSpec restSpec) { this.restSpec = restSpec; @@ -61,10 +60,9 @@ public RestTestExecutionContext(RestSpec restSpec) { /** * Calls an elasticsearch api with the parameters and request body provided as arguments. * Saves the obtained response in the execution context. - * @throws RestException if the returned status code is non ok */ - public RestResponse callApi(String apiName, Map params, List> bodies, - Map headers) throws IOException, RestException { + public RestTestResponse callApi(String apiName, Map params, List> bodies, + Map headers) throws IOException { //makes a copy of the parameters before modifying them for this specific request HashMap requestParams = new HashMap<>(params); for (Map.Entry entry : requestParams.entrySet()) { @@ -80,8 +78,8 @@ public RestResponse callApi(String apiName, Map params, List body) throws IOException { return XContentFactory.jsonBuilder().map(body).string(); } - private RestResponse callApiInternal(String apiName, Map params, String body, Map headers) throws IOException, RestException { - return restClient.callApi(apiName, params, body, headers); + private RestTestResponse callApiInternal(String apiName, Map params, String body, Map headers) + throws IOException { + return restTestClient.callApi(apiName, params, body, headers); } /** @@ -120,9 +119,9 @@ public Object response(String path) throws IOException { /** * Creates the embedded REST client when needed. Needs to be called before each test. */ - public void initClient(URL[] urls, Settings settings) throws IOException, RestException { - if (restClient == null) { - restClient = new RestClient(restSpec, settings, urls); + public void initClient(URL[] urls, Settings settings) throws IOException { + if (restTestClient == null) { + restTestClient = new RestTestClient(restSpec, settings, urls); } } @@ -143,7 +142,7 @@ public Stash stash() { * Returns the current es version as a string */ public Version esVersion() { - return restClient.getEsVersion(); + return restTestClient.getEsVersion(); } /** @@ -151,8 +150,8 @@ public Version esVersion() { */ @Override public void close() { - if (restClient != null) { - restClient.close(); + if (restTestClient != null) { + restTestClient.close(); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java b/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java index 52b531e5cd1c1..885df395c2bf1 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/Stash.java @@ -19,17 +19,17 @@ package org.elasticsearch.test.rest; -import java.io.IOException; -import java.util.HashMap; -import java.util.List; -import java.util.Map; - import org.elasticsearch.common.Strings; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.xcontent.ToXContent; import org.elasticsearch.common.xcontent.XContentBuilder; -import org.elasticsearch.test.rest.client.RestResponse; +import org.elasticsearch.test.rest.client.RestTestResponse; + +import java.io.IOException; +import java.util.HashMap; +import java.util.List; +import java.util.Map; /** * Allows to cache the last obtained test response and or part of it within variables @@ -42,7 +42,7 @@ public class Stash implements ToXContent { public static final Stash EMPTY = new Stash(); private final Map stash = new HashMap<>(); - private RestResponse response; + private RestTestResponse response; /** * Allows to saved a specific field in the stash as key-value pair @@ -55,7 +55,7 @@ public void stashValue(String key, Object value) { } } - public void stashResponse(RestResponse response) throws IOException { + public void stashResponse(RestTestResponse response) throws IOException { // TODO we can almost certainly save time by lazily evaluating the body stashValue("body", response.getBody()); this.response = response; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java deleted file mode 100644 index 2236134837bae..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestException.java +++ /dev/null @@ -1,41 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.test.rest.client; - -/** - * Thrown when a status code that holds an error is received (unless needs to be ignored) - * Holds the original {@link RestResponse} - */ -public class RestException extends Exception { - - private final RestResponse restResponse; - - public RestException(String message, RestResponse restResponse) { - super(message); - this.restResponse = restResponse; - } - - public RestResponse restResponse() { - return restResponse; - } - - public int statusCode() { - return restResponse.getStatusCode(); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java similarity index 52% rename from test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java index cb35653b1035b..3018af869b340 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java @@ -19,17 +19,25 @@ package org.elasticsearch.test.rest.client; import com.carrotsearch.randomizedtesting.RandomizedTest; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpHost; import org.apache.http.config.Registry; import org.apache.http.config.RegistryBuilder; import org.apache.http.conn.socket.ConnectionSocketFactory; import org.apache.http.conn.socket.PlainConnectionSocketFactory; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; +import org.apache.http.conn.ssl.SSLContexts; +import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClients; +import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; -import org.apache.http.ssl.SSLContexts; +import org.apache.http.message.BasicHeader; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; import org.elasticsearch.common.logging.ESLogger; @@ -37,8 +45,6 @@ import org.elasticsearch.common.settings.Settings; import org.elasticsearch.common.util.concurrent.ThreadContext; import org.elasticsearch.common.util.set.Sets; -import org.elasticsearch.test.rest.client.http.HttpRequestBuilder; -import org.elasticsearch.test.rest.client.http.HttpResponse; import org.elasticsearch.test.rest.spec.RestApi; import org.elasticsearch.test.rest.spec.RestSpec; @@ -46,6 +52,8 @@ import java.io.Closeable; import java.io.IOException; import java.io.InputStream; +import java.net.URI; +import java.net.URISyntaxException; import java.net.URL; import java.nio.file.Files; import java.nio.file.Path; @@ -55,61 +63,53 @@ import java.security.NoSuchAlgorithmException; import java.security.cert.CertificateException; import java.util.ArrayList; +import java.util.Collections; import java.util.HashMap; import java.util.List; import java.util.Map; +import java.util.Objects; import java.util.Set; -import java.util.concurrent.TimeUnit; - -import static java.util.Objects.requireNonNull; /** * REST client used to test the elasticsearch REST layer * Holds the {@link RestSpec} used to translate api calls into REST calls */ -public class RestClient implements Closeable { +public class RestTestClient implements Closeable { public static final String PROTOCOL = "protocol"; public static final String TRUSTSTORE_PATH = "truststore.path"; public static final String TRUSTSTORE_PASSWORD = "truststore.password"; - private static final ESLogger logger = Loggers.getLogger(RestClient.class); + + private static final ESLogger logger = Loggers.getLogger(RestTestClient.class); //query_string params that don't need to be declared in the spec, thay are supported by default private static final Set ALWAYS_ACCEPTED_QUERY_STRING_PARAMS = Sets.newHashSet("pretty", "source", "filter_path"); - private final String protocol; private final RestSpec restSpec; - private final CloseableHttpClient httpClient; - private final URL[] urls; + private final RestClient restClient; private final Version esVersion; - private final ThreadContext threadContext; - public RestClient(RestSpec restSpec, Settings settings, URL[] urls) throws IOException, RestException { + public RestTestClient(RestSpec restSpec, Settings settings, URL[] urls) throws IOException { assert urls.length > 0; this.restSpec = restSpec; - this.protocol = settings.get(PROTOCOL, "http"); - this.httpClient = createHttpClient(settings); - this.threadContext = new ThreadContext(settings); - this.urls = urls; - this.esVersion = readAndCheckVersion(); + this.restClient = createRestClient(urls, settings); + this.esVersion = readAndCheckVersion(urls); logger.info("REST client initialized {}, elasticsearch version: [{}]", urls, esVersion); } - private Version readAndCheckVersion() throws IOException, RestException { - //we make a manual call here without using callApi method, mainly because we are initializing - //and the randomized context doesn't exist for the current thread (would be used to choose the method otherwise) + private Version readAndCheckVersion(URL[] urls) throws IOException { RestApi restApi = restApi("info"); assert restApi.getPaths().size() == 1; assert restApi.getMethods().size() == 1; String version = null; - for (URL url : urls) { - RestResponse restResponse = new RestResponse(httpRequestBuilder(url) - .path(restApi.getPaths().get(0)) - .method(restApi.getMethods().get(0)).execute()); - checkStatusCode(restResponse); - - Object latestVersion = restResponse.evaluate("version.number"); + for (URL ignored : urls) { + //we don't really use the urls here, we rely on the client doing round-robin to touch all the nodes in the cluster + String method = restApi.getMethods().get(0); + String endpoint = restApi.getPaths().get(0); + ElasticsearchResponse elasticsearchResponse = restClient.performRequest(method, endpoint, Collections.emptyMap(), null); + RestTestResponse restTestResponse = new RestTestResponse(elasticsearchResponse); + Object latestVersion = restTestResponse.evaluate("version.number"); if (latestVersion == null) { throw new RuntimeException("elasticsearch version not found in the response"); } @@ -130,77 +130,41 @@ public Version getEsVersion() { /** * Calls an api with the provided parameters and body - * @throws RestException if the obtained status code is non ok, unless the specific error code needs to be ignored - * according to the ignore parameter received as input (which won't get sent to elasticsearch) */ - public RestResponse callApi(String apiName, Map params, String body, Map headers) - throws IOException, RestException { - - List ignores = new ArrayList<>(); - Map requestParams = null; - if (params != null) { - //makes a copy of the parameters before modifying them for this specific request - requestParams = new HashMap<>(params); - //ignore is a special parameter supported by the clients, shouldn't be sent to es - String ignoreString = requestParams.remove("ignore"); - if (Strings.hasLength(ignoreString)) { - try { - ignores.add(Integer.valueOf(ignoreString)); - } catch(NumberFormatException e) { - throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead"); - } - } - } - - HttpRequestBuilder httpRequestBuilder = callApiBuilder(apiName, requestParams, body); - for (Map.Entry header : headers.entrySet()) { - logger.error("Adding header {}\n with value {}", header.getKey(), header.getValue()); - httpRequestBuilder.addHeader(header.getKey(), header.getValue()); - } - logger.debug("calling api [{}]", apiName); - HttpResponse httpResponse = httpRequestBuilder.execute(); - - // http HEAD doesn't support response body - // For the few api (exists class of api) that use it we need to accept 404 too - if (!httpResponse.supportsBody()) { - ignores.add(404); - } - - RestResponse restResponse = new RestResponse(httpResponse); - checkStatusCode(restResponse, ignores); - return restResponse; - } - - private void checkStatusCode(RestResponse restResponse, List ignores) throws RestException { - //ignore is a catch within the client, to prevent the client from throwing error if it gets non ok codes back - if (ignores.contains(restResponse.getStatusCode())) { - if (logger.isDebugEnabled()) { - logger.debug("ignored non ok status codes {} as requested", ignores); - } - return; - } - checkStatusCode(restResponse); - } + public RestTestResponse callApi(String apiName, Map params, String body, Map headers) + throws IOException { - private void checkStatusCode(RestResponse restResponse) throws RestException { - if (restResponse.isError()) { - throw new RestException("non ok status code [" + restResponse.getStatusCode() + "] returned", restResponse); - } - } - - private HttpRequestBuilder callApiBuilder(String apiName, Map params, String body) { if ("raw".equals(apiName)) { // Raw requests are bit simpler.... - HttpRequestBuilder httpRequestBuilder = httpRequestBuilder(); - httpRequestBuilder.method(requireNonNull(params.remove("method"), "Method must be set to use raw request")); - httpRequestBuilder.path("/"+ requireNonNull(params.remove("path"), "Path must be set to use raw request")); - httpRequestBuilder.body(body); - + HashMap queryStringParams = new HashMap<>(params); + String method = Objects.requireNonNull(queryStringParams.remove("method"), "Method must be set to use raw request"); + String path = "/"+ Objects.requireNonNull(queryStringParams.remove("path"), "Path must be set to use raw request"); + HttpEntity entity = null; + if (body != null && body.length() > 0) { + entity = new StringEntity(body, RestClient.JSON_CONTENT_TYPE); + } // And everything else is a url parameter! - for (Map.Entry entry : params.entrySet()) { - httpRequestBuilder.addParam(entry.getKey(), entry.getValue()); + ElasticsearchResponse response = restClient.performRequest(method, path, queryStringParams, entity); + return new RestTestResponse(response); + } + + List ignores = new ArrayList<>(); + Map requestParams; + if (params == null) { + requestParams = Collections.emptyMap(); + } else { + requestParams = new HashMap<>(params); + if (params.isEmpty() == false) { + //ignore is a special parameter supported by the clients, shouldn't be sent to es + String ignoreString = requestParams.remove("ignore"); + if (ignoreString != null) { + try { + ignores.add(Integer.valueOf(ignoreString)); + } catch (NumberFormatException e) { + throw new IllegalArgumentException("ignore value should be a number, found [" + ignoreString + "] instead"); + } + } } - return httpRequestBuilder; } //create doesn't exist in the spec but is supported in the clients (index with op_type=create) @@ -208,51 +172,91 @@ private HttpRequestBuilder callApiBuilder(String apiName, Map pa String api = indexCreateApi ? "index" : apiName; RestApi restApi = restApi(api); - HttpRequestBuilder httpRequestBuilder = httpRequestBuilder(); - //divide params between ones that go within query string and ones that go within path Map pathParts = new HashMap<>(); - if (params != null) { - for (Map.Entry entry : params.entrySet()) { - if (restApi.getPathParts().contains(entry.getKey())) { - pathParts.put(entry.getKey(), entry.getValue()); + Map queryStringParams = new HashMap<>(); + for (Map.Entry entry : requestParams.entrySet()) { + if (restApi.getPathParts().contains(entry.getKey())) { + pathParts.put(entry.getKey(), entry.getValue()); + } else { + if (restApi.getParams().contains(entry.getKey()) || ALWAYS_ACCEPTED_QUERY_STRING_PARAMS.contains(entry.getKey())) { + queryStringParams.put(entry.getKey(), entry.getValue()); } else { - if (restApi.getParams().contains(entry.getKey()) || ALWAYS_ACCEPTED_QUERY_STRING_PARAMS.contains(entry.getKey())) { - httpRequestBuilder.addParam(entry.getKey(), entry.getValue()); - } else { - throw new IllegalArgumentException("param [" + entry.getKey() + - "] not supported in [" + restApi.getName() + "] api"); - } + throw new IllegalArgumentException("param [" + entry.getKey() + "] not supported in [" + + restApi.getName() + "] " + "api"); } } } if (indexCreateApi) { - httpRequestBuilder.addParam("op_type", "create"); + queryStringParams.put("op_type", "create"); } List supportedMethods = restApi.getSupportedMethods(pathParts.keySet()); + String requestMethod; + StringEntity requestBody = null; if (Strings.hasLength(body)) { if (!restApi.isBodySupported()) { throw new IllegalArgumentException("body is not supported by [" + restApi.getName() + "] api"); } - //test the GET with source param instead of GET/POST with body + //randomly test the GET with source param instead of GET/POST with body if (supportedMethods.contains("GET") && RandomizedTest.rarely()) { logger.debug("sending the request body as source param with GET method"); - httpRequestBuilder.addParam("source", body).method("GET"); + queryStringParams.put("source", body); + requestMethod = "GET"; } else { - httpRequestBuilder.body(body).method(RandomizedTest.randomFrom(supportedMethods)); + requestMethod = RandomizedTest.randomFrom(supportedMethods); + requestBody = new StringEntity(body, RestClient.JSON_CONTENT_TYPE); } } else { if (restApi.isBodyRequired()) { throw new IllegalArgumentException("body is required by [" + restApi.getName() + "] api"); } - httpRequestBuilder.method(RandomizedTest.randomFrom(supportedMethods)); + requestMethod = RandomizedTest.randomFrom(supportedMethods); } //the rest path to use is randomized out of the matching ones (if more than one) RestPath restPath = RandomizedTest.randomFrom(restApi.getFinalPaths(pathParts)); - return httpRequestBuilder.pathParts(restPath.getPathParts()); + //Encode rules for path and query string parameters are different. We use URI to encode the path. + //We need to encode each path part separately, as each one might contain slashes that need to be escaped, which needs to + //be done manually. + String requestPath; + if (restPath.getPathParts().length == 0) { + requestPath = "/"; + } else { + StringBuilder finalPath = new StringBuilder(); + for (String pathPart : restPath.getPathParts()) { + try { + finalPath.append('/'); + // We append "/" to the path part to handle parts that start with - or other invalid characters + URI uri = new URI(null, null, null, -1, "/" + pathPart, null, null); + //manually escape any slash that each part may contain + finalPath.append(uri.getRawPath().substring(1).replaceAll("/", "%2F")); + } catch (URISyntaxException e) { + throw new RuntimeException("unable to build uri", e); + } + } + requestPath = finalPath.toString(); + } + + Header[] requestHeaders = new Header[headers.size()]; + int index = 0; + for (Map.Entry header : headers.entrySet()) { + logger.info("Adding header {}\n with value {}", header.getKey(), header.getValue()); + requestHeaders[index++] = new BasicHeader(header.getKey(), header.getValue()); + } + + logger.debug("calling api [{}]", apiName); + try { + ElasticsearchResponse response = restClient.performRequest(requestMethod, requestPath, + queryStringParams, requestBody, requestHeaders); + return new RestTestResponse(response); + } catch(ElasticsearchResponseException e) { + if (ignores.contains(e.getElasticsearchResponse().getStatusLine().getStatusCode())) { + return new RestTestResponse(e); + } + throw e; + } } private RestApi restApi(String apiName) { @@ -263,21 +267,7 @@ private RestApi restApi(String apiName) { return restApi; } - protected HttpRequestBuilder httpRequestBuilder(URL url) { - return new HttpRequestBuilder(httpClient) - .addHeaders(threadContext.getHeaders()) - .protocol(protocol) - .host(url.getHost()) - .port(url.getPort()); - } - - protected HttpRequestBuilder httpRequestBuilder() { - //the address used is randomized between the available ones - URL url = RandomizedTest.randomFrom(urls); - return httpRequestBuilder(url); - } - - protected CloseableHttpClient createHttpClient(Settings settings) throws IOException { + protected RestClient createRestClient(URL[] urls, Settings settings) throws IOException { SSLConnectionSocketFactory sslsf; String keystorePath = settings.get(TRUSTSTORE_PATH); if (keystorePath != null) { @@ -307,8 +297,24 @@ protected CloseableHttpClient createHttpClient(Settings settings) throws IOExcep .register("http", PlainConnectionSocketFactory.getSocketFactory()) .register("https", sslsf) .build(); - return HttpClients.createMinimal( - new PoolingHttpClientConnectionManager(socketFactoryRegistry, null, null, null, 15, TimeUnit.SECONDS)); + + List
headers = new ArrayList<>(); + try (ThreadContext threadContext = new ThreadContext(settings)) { + for (Map.Entry entry : threadContext.getHeaders().entrySet()) { + headers.add(new BasicHeader(entry.getKey(), entry.getValue())); + } + } + + CloseableHttpClient httpClient = HttpClientBuilder.create().setDefaultHeaders(headers) + .setConnectionManager(new PoolingHttpClientConnectionManager(socketFactoryRegistry)).build(); + + String protocol = settings.get(PROTOCOL, "http"); + HttpHost[] hosts = new HttpHost[urls.length]; + for (int i = 0; i < hosts.length; i++) { + URL url = urls[i]; + hosts[i] = new HttpHost(url.getHost(), url.getPort(), protocol); + } + return RestClient.builder().setHttpClient(httpClient).setHosts(hosts).build(); } /** @@ -316,6 +322,6 @@ protected CloseableHttpClient createHttpClient(Settings settings) throws IOExcep */ @Override public void close() { - IOUtils.closeWhileHandlingException(httpClient); + IOUtils.closeWhileHandlingException(restClient); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java similarity index 62% rename from test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java rename to test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java index e0b2f3ab729e5..abcae6b26a2c3 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestResponse.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java @@ -18,31 +18,54 @@ */ package org.elasticsearch.test.rest.client; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.util.EntityUtils; +import org.apache.lucene.util.IOUtils; +import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.ElasticsearchResponseException; import org.elasticsearch.test.rest.Stash; -import org.elasticsearch.test.rest.client.http.HttpResponse; import org.elasticsearch.test.rest.json.JsonPath; import java.io.IOException; +import java.nio.charset.StandardCharsets; /** - * Response obtained from a REST call - * Supports parsing the response body as json when needed and returning specific values extracted from it + * Response obtained from a REST call, eagerly reads the response body into a string for later optional parsing. + * Supports parsing the response body as json when needed and returning specific values extracted from it. */ -public class RestResponse { +public class RestTestResponse { - private final HttpResponse response; + private final ElasticsearchResponse response; + private final String body; private JsonPath parsedResponse; - public RestResponse(HttpResponse response) { + public RestTestResponse(ElasticsearchResponse response) { this.response = response; + if (response.getEntity() != null) { + try { + this.body = EntityUtils.toString(response.getEntity(), StandardCharsets.UTF_8); + } catch (IOException e) { + EntityUtils.consumeQuietly(response.getEntity()); + throw new RuntimeException(e); + } finally { + IOUtils.closeWhileHandlingException(response); + } + } else { + this.body = null; + } + } + + public RestTestResponse(ElasticsearchResponseException responseException) { + this.response = responseException.getElasticsearchResponse(); + this.body = responseException.getResponseBody(); } public int getStatusCode() { - return response.getStatusCode(); + return response.getStatusLine().getStatusCode(); } public String getReasonPhrase() { - return response.getReasonPhrase(); + return response.getStatusLine().getReasonPhrase(); } /** @@ -57,18 +80,18 @@ public Object getBody() throws IOException { } return parsedResponse.evaluate(""); } - return response.getBody(); + return body; } /** * Returns the body as a string */ public String getBodyAsString() { - return response.getBody(); + return body; } public boolean isError() { - return response.isError(); + return response.getStatusLine().getStatusCode() >= 400; } /** @@ -82,7 +105,6 @@ public Object evaluate(String path) throws IOException { * Parses the response body as json and extracts a specific value from it (identified by the provided path) */ public Object evaluate(String path, Stash stash) throws IOException { - if (response == null) { return null; } @@ -93,8 +115,8 @@ public Object evaluate(String path, Stash stash) throws IOException { //special case: api that don't support body (e.g. exists) return true if 200, false if 404, even if no body //is_true: '' means the response had no body but the client returned true (caused by 200) //is_false: '' means the response had no body but the client returned false (caused by 404) - if ("".equals(path) && !response.supportsBody()) { - return !response.isError(); + if ("".equals(path) && HttpHead.METHOD_NAME.equals(response.getRequestLine().getMethod())) { + return isError() == false; } return null; } @@ -103,7 +125,7 @@ public Object evaluate(String path, Stash stash) throws IOException { } private boolean isJson() { - String contentType = response.getHeaders().get("Content-Type"); + String contentType = response.getFirstHeader("Content-Type"); return contentType != null && contentType.contains("application/json"); } @@ -111,9 +133,9 @@ private JsonPath parsedResponse() throws IOException { if (parsedResponse != null) { return parsedResponse; } - if (response == null || !response.hasBody()) { + if (response == null || body == null) { return null; } - return parsedResponse = new JsonPath(response.getBody()); + return parsedResponse = new JsonPath(body); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java deleted file mode 100644 index 480fc7b2f011a..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpDeleteWithEntity.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.test.rest.client.http; - -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; - -import java.net.URI; - -/** - * Allows to send DELETE requests providing a body (not supported out of the box) - */ -public class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase { - - public final static String METHOD_NAME = "DELETE"; - - public HttpDeleteWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java deleted file mode 100644 index aa0129f466049..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpGetWithEntity.java +++ /dev/null @@ -1,40 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.test.rest.client.http; - -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; - -import java.net.URI; - -/** - * Allows to send GET requests providing a body (not supported out of the box) - */ -public class HttpGetWithEntity extends HttpEntityEnclosingRequestBase { - - public final static String METHOD_NAME = "GET"; - - public HttpGetWithEntity(final URI uri) { - setURI(uri); - } - - @Override - public String getMethod() { - return METHOD_NAME; - } -} \ No newline at end of file diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java deleted file mode 100644 index 9f47111d49ad6..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpRequestBuilder.java +++ /dev/null @@ -1,250 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.test.rest.client.http; - -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpOptions; -import org.apache.http.client.methods.HttpPost; -import org.apache.http.client.methods.HttpPut; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.entity.StringEntity; -import org.apache.http.impl.client.CloseableHttpClient; -import org.elasticsearch.common.Strings; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; -import org.elasticsearch.common.network.NetworkAddress; -import org.elasticsearch.common.transport.InetSocketTransportAddress; -import org.elasticsearch.http.HttpServerTransport; - -import java.io.IOException; -import java.io.UnsupportedEncodingException; -import java.net.URI; -import java.net.URISyntaxException; -import java.net.URLEncoder; -import java.nio.charset.Charset; -import java.util.HashMap; -import java.util.Map; -import java.util.stream.Collectors; - -/** - * Executable builder for an http request - * Holds an {@link org.apache.http.client.HttpClient} that is used to send the built http request - */ -public class HttpRequestBuilder { - - private static final ESLogger logger = Loggers.getLogger(HttpRequestBuilder.class); - - static final Charset DEFAULT_CHARSET = Charset.forName("utf-8"); - - private final CloseableHttpClient httpClient; - - private String protocol = "http"; - - private String host; - - private int port; - - private String path = ""; - - private final Map params = new HashMap<>(); - - private final Map headers = new HashMap<>(); - - private String method = HttpGetWithEntity.METHOD_NAME; - - private String body; - - public HttpRequestBuilder(CloseableHttpClient httpClient) { - this.httpClient = httpClient; - } - - public HttpRequestBuilder host(String host) { - this.host = host; - return this; - } - - public HttpRequestBuilder httpTransport(HttpServerTransport httpServerTransport) { - InetSocketTransportAddress transportAddress = (InetSocketTransportAddress) httpServerTransport.boundAddress().publishAddress(); - return host(NetworkAddress.format(transportAddress.address().getAddress())).port(transportAddress.address().getPort()); - } - - public HttpRequestBuilder port(int port) { - this.port = port; - return this; - } - - /** - * Sets the path to send the request to. Url encoding needs to be applied by the caller. - * Use {@link #pathParts(String...)} instead if the path needs to be encoded, part by part. - */ - public HttpRequestBuilder path(String path) { - this.path = path; - return this; - } - - /** - * Sets the path by providing the different parts (without slashes), which will be properly encoded. - */ - public HttpRequestBuilder pathParts(String... path) { - //encode rules for path and query string parameters are different. We use URI to encode the path, and URLEncoder for each query string parameter (see addParam). - //We need to encode each path part separately though, as each one might contain slashes that need to be escaped, which needs to be done manually. - if (path.length == 0) { - this.path = "/"; - return this; - } - StringBuilder finalPath = new StringBuilder(); - for (String pathPart : path) { - try { - finalPath.append('/'); - // We append "/" to the path part to handle parts that start with - or other invalid characters - URI uri = new URI(null, null, null, -1, "/" + pathPart, null, null); - //manually escape any slash that each part may contain - finalPath.append(uri.getRawPath().substring(1).replaceAll("/", "%2F")); - } catch(URISyntaxException e) { - throw new RuntimeException("unable to build uri", e); - } - } - this.path = finalPath.toString(); - return this; - } - - public HttpRequestBuilder addParam(String name, String value) { - try { - this.params.put(name, URLEncoder.encode(value, "utf-8")); - return this; - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - } - - public HttpRequestBuilder addHeaders(Map headers) { - this.headers.putAll(headers); - return this; - } - - public HttpRequestBuilder addHeader(String name, String value) { - this.headers.put(name, value); - return this; - } - - public HttpRequestBuilder protocol(String protocol) { - this.protocol = protocol; - return this; - } - - public HttpRequestBuilder method(String method) { - this.method = method; - return this; - } - - public HttpRequestBuilder body(String body) { - if (Strings.hasLength(body)) { - this.body = body; - } - return this; - } - - public HttpResponse execute() throws IOException { - HttpUriRequest httpUriRequest = buildRequest(); - if (logger.isTraceEnabled()) { - StringBuilder stringBuilder = new StringBuilder(httpUriRequest.getMethod()).append(" ").append(httpUriRequest.getURI()); - if (Strings.hasLength(body)) { - stringBuilder.append("\n").append(body); - } - logger.trace("sending request \n{}", stringBuilder.toString()); - } - for (Map.Entry entry : this.headers.entrySet()) { - logger.trace("adding header [{} => {}]", entry.getKey(), entry.getValue()); - httpUriRequest.addHeader(entry.getKey(), entry.getValue()); - } - try (CloseableHttpResponse closeableHttpResponse = httpClient.execute(httpUriRequest)) { - HttpResponse httpResponse = new HttpResponse(httpUriRequest, closeableHttpResponse); - logger.trace("got response \n{}\n{}", closeableHttpResponse, httpResponse.hasBody() ? httpResponse.getBody() : ""); - return httpResponse; - } - } - - private HttpUriRequest buildRequest() { - - if (HttpGetWithEntity.METHOD_NAME.equalsIgnoreCase(method)) { - return addOptionalBody(new HttpGetWithEntity(buildUri())); - } - - if (HttpHead.METHOD_NAME.equalsIgnoreCase(method)) { - checkBodyNotSupported(); - return new HttpHead(buildUri()); - } - - if (HttpOptions.METHOD_NAME.equalsIgnoreCase(method)) { - checkBodyNotSupported(); - return new HttpOptions(buildUri()); - } - - if (HttpDeleteWithEntity.METHOD_NAME.equalsIgnoreCase(method)) { - return addOptionalBody(new HttpDeleteWithEntity(buildUri())); - } - - if (HttpPut.METHOD_NAME.equalsIgnoreCase(method)) { - return addOptionalBody(new HttpPut(buildUri())); - } - - if (HttpPost.METHOD_NAME.equalsIgnoreCase(method)) { - return addOptionalBody(new HttpPost(buildUri())); - } - - throw new UnsupportedOperationException("method [" + method + "] not supported"); - } - - private URI buildUri() { - StringBuilder uriBuilder = new StringBuilder(protocol).append("://").append(host).append(":").append(port).append(path); - if (params.size() > 0) { - uriBuilder.append("?").append(params.entrySet().stream().map(e -> e.getKey() + "=" + e.getValue()).collect(Collectors.joining("&"))); - } - //using this constructor no url encoding happens, as we did everything upfront in addParam and pathPart methods - return URI.create(uriBuilder.toString()); - } - - private HttpEntityEnclosingRequestBase addOptionalBody(HttpEntityEnclosingRequestBase requestBase) { - if (Strings.hasText(body)) { - requestBase.setEntity(new StringEntity(body, DEFAULT_CHARSET)); - } - return requestBase; - } - - private void checkBodyNotSupported() { - if (Strings.hasText(body)) { - throw new IllegalArgumentException("request body not supported with head request"); - } - } - - @Override - public String toString() { - StringBuilder stringBuilder = new StringBuilder(method).append(" '") - .append(host).append(":").append(port).append(path).append("'"); - if (!params.isEmpty()) { - stringBuilder.append(", params=").append(params); - } - if (Strings.hasLength(body)) { - stringBuilder.append(", body=\n").append(body); - } - return stringBuilder.toString(); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java deleted file mode 100644 index 37fc163ac61dd..0000000000000 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/http/HttpResponse.java +++ /dev/null @@ -1,108 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ -package org.elasticsearch.test.rest.client.http; - -import org.apache.http.Header; -import org.apache.http.client.methods.CloseableHttpResponse; -import org.apache.http.client.methods.HttpHead; -import org.apache.http.client.methods.HttpUriRequest; -import org.apache.http.util.EntityUtils; -import org.elasticsearch.common.logging.ESLogger; -import org.elasticsearch.common.logging.Loggers; - -import java.io.IOException; -import java.util.HashMap; -import java.util.Map; - -/** - * Response obtained from an http request - * Always consumes the whole response body loading it entirely into a string - */ -public class HttpResponse { - - private static final ESLogger logger = Loggers.getLogger(HttpResponse.class); - - private final HttpUriRequest httpRequest; - private final int statusCode; - private final String reasonPhrase; - private final String body; - private final Map headers = new HashMap<>(); - - HttpResponse(HttpUriRequest httpRequest, CloseableHttpResponse httpResponse) { - this.httpRequest = httpRequest; - this.statusCode = httpResponse.getStatusLine().getStatusCode(); - this.reasonPhrase = httpResponse.getStatusLine().getReasonPhrase(); - for (Header header : httpResponse.getAllHeaders()) { - this.headers.put(header.getName(), header.getValue()); - } - if (httpResponse.getEntity() != null) { - try { - this.body = EntityUtils.toString(httpResponse.getEntity(), HttpRequestBuilder.DEFAULT_CHARSET); - } catch (IOException e) { - EntityUtils.consumeQuietly(httpResponse.getEntity()); - throw new RuntimeException(e); - } finally { - try { - httpResponse.close(); - } catch (IOException e) { - logger.error("Failed closing response", e); - } - } - } else { - this.body = null; - } - } - - public boolean isError() { - return statusCode >= 400; - } - - public int getStatusCode() { - return statusCode; - } - - public String getReasonPhrase() { - return reasonPhrase; - } - - public String getBody() { - return body; - } - - public boolean hasBody() { - return body != null; - } - - public boolean supportsBody() { - return !HttpHead.METHOD_NAME.equals(httpRequest.getMethod()); - } - - public Map getHeaders() { - return headers; - } - - @Override - public String toString() { - StringBuilder stringBuilder = new StringBuilder(statusCode).append(" ").append(reasonPhrase); - if (hasBody()) { - stringBuilder.append("\n").append(body); - } - return stringBuilder.toString(); - } -} diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java index 7b392a4043e9d..76593d4af9daf 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java @@ -18,13 +18,13 @@ */ package org.elasticsearch.test.rest.section; +import org.elasticsearch.client.ElasticsearchResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.test.rest.RestTestExecutionContext; -import org.elasticsearch.test.rest.client.RestException; -import org.elasticsearch.test.rest.client.RestResponse; +import org.elasticsearch.test.rest.client.RestTestResponse; import java.io.IOException; import java.util.HashMap; @@ -89,7 +89,7 @@ public void execute(RestTestExecutionContext executionContext) throws IOExceptio } try { - RestResponse restResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), + RestTestResponse restTestResponse = executionContext.callApi(apiCallSection.getApi(), apiCallSection.getParams(), apiCallSection.getBodies(), apiCallSection.getHeaders()); if (Strings.hasLength(catchParam)) { String catchStatusCode; @@ -100,16 +100,18 @@ public void execute(RestTestExecutionContext executionContext) throws IOExceptio } else { throw new UnsupportedOperationException("catch value [" + catchParam + "] not supported"); } - fail(formatStatusCodeMessage(restResponse, catchStatusCode)); + fail(formatStatusCodeMessage(restTestResponse, catchStatusCode)); } - } catch(RestException e) { + } catch(ElasticsearchResponseException e) { + RestTestResponse restTestResponse = new RestTestResponse(e); if (!Strings.hasLength(catchParam)) { - fail(formatStatusCodeMessage(e.restResponse(), "2xx")); + fail(formatStatusCodeMessage(restTestResponse, "2xx")); } else if (catches.containsKey(catchParam)) { - assertStatusCode(e.restResponse()); + assertStatusCode(restTestResponse); } else if (catchParam.length() > 2 && catchParam.startsWith("/") && catchParam.endsWith("/")) { //the text of the error message matches regular expression - assertThat(formatStatusCodeMessage(e.restResponse(), "4xx|5xx"), e.statusCode(), greaterThanOrEqualTo(400)); + assertThat(formatStatusCodeMessage(restTestResponse, "4xx|5xx"), + e.getElasticsearchResponse().getStatusLine().getStatusCode(), greaterThanOrEqualTo(400)); Object error = executionContext.response("error"); assertThat("error was expected in the response", error, notNullValue()); //remove delimiters from regex @@ -122,19 +124,19 @@ public void execute(RestTestExecutionContext executionContext) throws IOExceptio } } - private void assertStatusCode(RestResponse restResponse) { + private void assertStatusCode(RestTestResponse restTestResponse) { Tuple> stringMatcherTuple = catches.get(catchParam); - assertThat(formatStatusCodeMessage(restResponse, stringMatcherTuple.v1()), - restResponse.getStatusCode(), stringMatcherTuple.v2()); + assertThat(formatStatusCodeMessage(restTestResponse, stringMatcherTuple.v1()), + restTestResponse.getStatusCode(), stringMatcherTuple.v2()); } - private String formatStatusCodeMessage(RestResponse restResponse, String expected) { + private String formatStatusCodeMessage(RestTestResponse restTestResponse, String expected) { String api = apiCallSection.getApi(); if ("raw".equals(api)) { api += "[method=" + apiCallSection.getParams().get("method") + " path=" + apiCallSection.getParams().get("path") + "]"; } - return "expected [" + expected + "] status code but api [" + api + "] returned [" - + restResponse.getStatusCode() + " " + restResponse.getReasonPhrase() + "] [" + restResponse.getBodyAsString() + "]"; + return "expected [" + expected + "] status code but api [" + api + "] returned [" + restTestResponse.getStatusCode() + + " " + restTestResponse.getReasonPhrase() + "] [" + restTestResponse.getBodyAsString() + "]"; } private static Map>> catches = new HashMap<>(); @@ -145,6 +147,7 @@ private String formatStatusCodeMessage(RestResponse restResponse, String expecte catches.put("forbidden", tuple("403", equalTo(403))); catches.put("request_timeout", tuple("408", equalTo(408))); catches.put("unavailable", tuple("503", equalTo(503))); - catches.put("request", tuple("4xx|5xx", allOf(greaterThanOrEqualTo(400), not(equalTo(404)), not(equalTo(408)), not(equalTo(409)), not(equalTo(403))))); + catches.put("request", tuple("4xx|5xx", + allOf(greaterThanOrEqualTo(400), not(equalTo(404)), not(equalTo(408)), not(equalTo(409)), not(equalTo(403))))); } } From c70e08c393943e136970a9ef5afde75334341ece Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 20 May 2016 11:03:43 +0200 Subject: [PATCH 035/103] include response body in ElasticsearchResponseException error message --- .../ElasticsearchResponseException.java | 36 +++++++++---------- .../org/elasticsearch/client/RestClient.java | 14 +++++++- 2 files changed, 29 insertions(+), 21 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java index ad63009db3460..12aef4f0d6580 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java @@ -19,41 +19,37 @@ package org.elasticsearch.client; -import org.apache.http.HttpHost; -import org.apache.http.RequestLine; -import org.apache.http.StatusLine; -import org.apache.http.util.EntityUtils; - import java.io.IOException; /** - * Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error + * Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error. + * Note that the response body gets passed in as a string and read eagerly, which means that the ElasticsearchResponse object + * is expected to be closed and available only to read metadata like status line, request line, response headers. */ public class ElasticsearchResponseException extends IOException { private ElasticsearchResponse elasticsearchResponse; private final String responseBody; - public ElasticsearchResponseException(ElasticsearchResponse elasticsearchResponse) throws IOException { - super(buildMessage(elasticsearchResponse.getRequestLine(), elasticsearchResponse.getHost(), elasticsearchResponse.getStatusLine())); + public ElasticsearchResponseException(ElasticsearchResponse elasticsearchResponse, String responseBody) throws IOException { + super(buildMessage(elasticsearchResponse,responseBody)); this.elasticsearchResponse = elasticsearchResponse; - try { - if (elasticsearchResponse.getEntity() == null) { - this.responseBody = null; - } else { - this.responseBody = EntityUtils.toString(elasticsearchResponse.getEntity()); - } - } finally { - elasticsearchResponse.close(); - } + this.responseBody = responseBody; } - private static String buildMessage(RequestLine requestLine, HttpHost host, StatusLine statusLine) { - return requestLine.getMethod() + " " + host + requestLine.getUri() + ": " + statusLine.toString(); + private static String buildMessage(ElasticsearchResponse response, String responseBody) { + String message = response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri() + + ": " + response.getStatusLine().toString(); + if (responseBody != null) { + message += "\n" + responseBody; + } + return message; } /** - * Returns the {@link ElasticsearchResponse} that caused this exception to be thrown + * Returns the {@link ElasticsearchResponse} that caused this exception to be thrown. + * Expected to be used only to read metadata like status line, request line, response headers. The response body should + * be retrieved using {@link #getResponseBody()} */ public ElasticsearchResponse getElasticsearchResponse() { return elasticsearchResponse; diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index b366d98f77a04..e115b8bc3b532 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -37,6 +37,7 @@ import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; +import org.apache.http.util.EntityUtils; import java.io.Closeable; import java.io.IOException; @@ -127,7 +128,18 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< return elasticsearchResponse; } else { RequestLogger.log(logger, "request failed", request, connection.getHost(), response); - ElasticsearchResponseException elasticsearchResponseException = new ElasticsearchResponseException(elasticsearchResponse); + String responseBody; + try { + if (elasticsearchResponse.getEntity() == null) { + responseBody = null; + } else { + responseBody = EntityUtils.toString(elasticsearchResponse.getEntity()); + } + } finally { + elasticsearchResponse.close(); + } + ElasticsearchResponseException elasticsearchResponseException = new ElasticsearchResponseException( + elasticsearchResponse, responseBody); lastSeenException = addSuppressedException(lastSeenException, elasticsearchResponseException); //clients don't retry on 500 because elasticsearch still misuses it instead of 400 in some places if (statusCode == 502 || statusCode == 503 || statusCode == 504) { From e81aad972a6921a98cce3f5e2354aaf17e8b828a Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 20 May 2016 13:30:30 +0200 Subject: [PATCH 036/103] remove usage of deprecated api --- .../java/org/elasticsearch/test/rest/client/RestTestClient.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java index 3018af869b340..5baedf8d0603a 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java @@ -27,12 +27,12 @@ import org.apache.http.conn.socket.ConnectionSocketFactory; import org.apache.http.conn.socket.PlainConnectionSocketFactory; import org.apache.http.conn.ssl.SSLConnectionSocketFactory; -import org.apache.http.conn.ssl.SSLContexts; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.message.BasicHeader; +import org.apache.http.ssl.SSLContexts; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; import org.elasticsearch.client.ElasticsearchResponse; From 6490355cb6585cac6f460d7c68417d1e757ef2c6 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 25 May 2016 18:37:10 +0200 Subject: [PATCH 037/103] make host state immutable Instead of having a Connection mutable object that holds the state of the connection to each host, we now have immutable objects only. We keep two sets, one with all the hosts, one with the blacklisted ones. Once we blacklist a host we associate it with a DeadHostState which keeps track of the number of failed attempts and when the host should be retried. A new state object is created each and every time the state of the host needs to be updated. --- .../org/elasticsearch/client/Connection.java | 94 ------------ .../elasticsearch/client/DeadHostState.java | 58 ++++++++ .../org/elasticsearch/client/RestClient.java | 137 +++++++++++------- .../elasticsearch/client/sniff/Sniffer.java | 7 +- .../client/sniff/SnifferBuilderTests.java | 4 +- 5 files changed, 144 insertions(+), 156 deletions(-) delete mode 100644 client/src/main/java/org/elasticsearch/client/Connection.java create mode 100644 client/src/main/java/org/elasticsearch/client/DeadHostState.java diff --git a/client/src/main/java/org/elasticsearch/client/Connection.java b/client/src/main/java/org/elasticsearch/client/Connection.java deleted file mode 100644 index 1f60448360142..0000000000000 --- a/client/src/main/java/org/elasticsearch/client/Connection.java +++ /dev/null @@ -1,94 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import org.apache.http.HttpHost; - -import java.util.concurrent.TimeUnit; - -/** - * Represents a connection to a host. It holds the host that the connection points to and the state of the connection to it. - */ -public class Connection { - private static final long DEFAULT_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(1); - private static final long MAX_CONNECTION_TIMEOUT_MILLIS = TimeUnit.MINUTES.toMillis(30); - private final HttpHost host; - private volatile int failedAttempts = 0; - private volatile long deadUntil = -1; - - /** - * Creates a new connection pointing to the provided {@link HttpHost} argument - */ - public Connection(HttpHost host) { - this.host = host; - } - - /** - * Returns the {@link HttpHost} that the connection points to - */ - public HttpHost getHost() { - return host; - } - - /** - * Marks connection as dead. Should be called in case the corresponding node is not responding or caused failures. - * Once marked dead, the number of failed attempts will be incremented on each call to this method. A dead connection - * should be retried once {@link #isBlacklisted()} returns true, which depends on the number of previous failed attempts - * and when the last failure was registered. - */ - void markDead() { - synchronized (this) { - int failedAttempts = Math.max(this.failedAttempts, 0); - long timeoutMillis = (long)Math.min(DEFAULT_CONNECTION_TIMEOUT_MILLIS * 2 * Math.pow(2, failedAttempts * 0.5 - 1), - MAX_CONNECTION_TIMEOUT_MILLIS); - this.deadUntil = System.nanoTime() + TimeUnit.MILLISECONDS.toNanos(timeoutMillis); - this.failedAttempts = ++failedAttempts; - } - } - - /** - * Marks this connection alive. Should be called when the corresponding node is working properly. - * Will reset the number of failed attempts that were counted in case the connection was previously dead, as well as its timeout. - */ - void markAlive() { - if (this.failedAttempts > 0) { - synchronized (this) { - this.deadUntil = -1; - this.failedAttempts = 0; - } - } - } - - /** - * Returns the timestamp till the connection is supposed to stay dead. After that moment the connection should be retried - */ - public long getDeadUntil() { - return deadUntil; - } - - /** - * Returns true when the connection should be skipped due to previous failures, false in case the connection is alive - * or dead but ready to be retried. When the connection is dead, returns false when it is time to retry it, depending - * on how many failed attempts were registered and when the last failure happened (minimum 1 minute, maximum 30 minutes). - */ - public boolean isBlacklisted() { - return failedAttempts > 0 && System.nanoTime() - deadUntil < 0; - } -} diff --git a/client/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/src/main/java/org/elasticsearch/client/DeadHostState.java new file mode 100644 index 0000000000000..30a24bc2d5d9d --- /dev/null +++ b/client/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -0,0 +1,58 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.util.concurrent.TimeUnit; + +/** + * Holds the state of a dead connection to a host. Keeps track of how many failed attempts were performed and + * when the host should be retried (based on number of previous failed attempts). + * Class is immutable, a new copy of it should be created each time the state has to be changed. + */ +class DeadHostState { + + private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1); + private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); + + static final DeadHostState INITIAL_DEAD_STATE = new DeadHostState(); + + private final int failedAttempts; + private final long deadUntil; + + private DeadHostState() { + this.failedAttempts = 1; + this.deadUntil = System.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS; + } + + DeadHostState(DeadHostState previousDeadHostState) { + long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), + MAX_CONNECTION_TIMEOUT_NANOS); + this.deadUntil = System.nanoTime() + timeoutNanos; + this.failedAttempts = previousDeadHostState.failedAttempts + 1; + } + + /** + * Returns the timestamp (nanos) till the host is supposed to stay dead without being retried. + * After that the host should be retried. + */ + long getDeadUntil() { + return deadUntil; + } +} diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index e115b8bc3b532..4fc8d3c27416d 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -47,11 +47,15 @@ import java.util.Collection; import java.util.Collections; import java.util.Comparator; +import java.util.HashSet; import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; import java.util.Objects; +import java.util.Set; +import java.util.concurrent.ConcurrentHashMap; +import java.util.concurrent.ConcurrentMap; import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; @@ -62,23 +66,25 @@ public final class RestClient implements Closeable { private final CloseableHttpClient client; private final long maxRetryTimeout; - private final AtomicInteger lastConnectionIndex = new AtomicInteger(0); - private volatile List connections; + private final AtomicInteger lastHostIndex = new AtomicInteger(0); + private volatile Set hosts; + private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); private volatile FailureListener failureListener = new FailureListener(); private RestClient(CloseableHttpClient client, long maxRetryTimeout, HttpHost... hosts) { this.client = client; this.maxRetryTimeout = maxRetryTimeout; - setNodes(hosts); + setHosts(hosts); } - public synchronized void setNodes(HttpHost... hosts) { - List connections = new ArrayList<>(hosts.length); + public synchronized void setHosts(HttpHost... hosts) { + Set httpHosts = new HashSet<>(); for (HttpHost host : hosts) { Objects.requireNonNull(host, "host cannot be null"); - connections.add(new Connection(host)); + httpHosts.add(host); } - this.connections = Collections.unmodifiableList(connections); + this.hosts = Collections.unmodifiableSet(httpHosts); + this.blacklist.clear(); } public ElasticsearchResponse performRequest(String method, String endpoint, Map params, @@ -94,9 +100,9 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< long retryTimeout = Math.round(this.maxRetryTimeout / (float)100 * 98); IOException lastSeenException = null; long startTime = System.nanoTime(); - Iterator connectionIterator = nextConnection(); - while (connectionIterator.hasNext()) { - Connection connection = connectionIterator.next(); + Iterator hostIterator = nextHost(); + while (hostIterator.hasNext()) { + HttpHost host = hostIterator.next(); if (lastSeenException != null) { long timeElapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); @@ -112,22 +118,22 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< CloseableHttpResponse response; try { - response = client.execute(connection.getHost(), request); + response = client.execute(host, request); } catch(IOException e) { - RequestLogger.log(logger, "request failed", request, connection.getHost(), e); - onFailure(connection); + RequestLogger.log(logger, "request failed", request, host, e); + onFailure(host); lastSeenException = addSuppressedException(lastSeenException, e); continue; } ElasticsearchResponse elasticsearchResponse = new ElasticsearchResponse(request.getRequestLine(), - connection.getHost(), response); + host, response); int statusCode = response.getStatusLine().getStatusCode(); if (statusCode < 300 || (request.getMethod().equals(HttpHead.METHOD_NAME) && statusCode == 404) ) { - RequestLogger.log(logger, "request succeeded", request, connection.getHost(), response); - onSuccess(connection); + RequestLogger.log(logger, "request succeeded", request, host, response); + onSuccess(host); return elasticsearchResponse; } else { - RequestLogger.log(logger, "request failed", request, connection.getHost(), response); + RequestLogger.log(logger, "request failed", request, host, response); String responseBody; try { if (elasticsearchResponse.getEntity() == null) { @@ -143,10 +149,10 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< lastSeenException = addSuppressedException(lastSeenException, elasticsearchResponseException); //clients don't retry on 500 because elasticsearch still misuses it instead of 400 in some places if (statusCode == 502 || statusCode == 503 || statusCode == 504) { - onFailure(connection); + onFailure(host); } else { //don't retry and call onSuccess as the error should be a request problem, the node is alive - onSuccess(connection); + onSuccess(host); break; } } @@ -156,63 +162,78 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< } /** - * Returns an iterator of connections that should be used for a request call. - * Ideally, the first connection is retrieved from the iterator and used successfully for the request. - * Otherwise, after each failure the next connection should be retrieved from the iterator so that the request can be retried. - * The maximum total of attempts is equal to the number of connections that are available in the iterator. - * The iterator returned will never be empty, rather an {@link IllegalStateException} will be thrown in that case. - * In case there are no alive connections available, or dead ones that should be retried, one dead connection - * gets resurrected and returned. + * Returns an iterator of hosts to be used for a request call. + * Ideally, the first host is retrieved from the iterator and used successfully for the request. + * Otherwise, after each failure the next host should be retrieved from the iterator so that the request can be retried till + * the iterator is exhausted. The maximum total of attempts is equal to the number of hosts that are available in the iterator. + * The iterator returned will never be empty, rather an {@link IllegalStateException} in case there are no hosts. + * In case there are no healthy hosts available, or dead ones to be be retried, one dead host gets returned. */ - private Iterator nextConnection() { - if (this.connections.isEmpty()) { - throw new IllegalStateException("no connections available"); + private Iterator nextHost() { + if (this.hosts.isEmpty()) { + throw new IllegalStateException("no hosts available"); } - List rotatedConnections = new ArrayList<>(connections); - //TODO is it possible to make this O(1)? (rotate is O(n)) - Collections.rotate(rotatedConnections, rotatedConnections.size() - lastConnectionIndex.getAndIncrement()); - Iterator connectionIterator = rotatedConnections.iterator(); - while (connectionIterator.hasNext()) { - Connection connection = connectionIterator.next(); - if (connection.isBlacklisted()) { - connectionIterator.remove(); + Set filteredHosts = new HashSet<>(hosts); + for (Map.Entry entry : blacklist.entrySet()) { + if (System.nanoTime() - entry.getValue().getDeadUntil() < 0) { + filteredHosts.remove(entry.getKey()); } } - if (rotatedConnections.isEmpty()) { - List sortedConnections = new ArrayList<>(connections); - Collections.sort(sortedConnections, new Comparator() { + + if (filteredHosts.isEmpty()) { + //last resort: if there are no good hosts to use, return a single dead one, the one that's closest to being retried + List> sortedHosts = new ArrayList<>(blacklist.entrySet()); + Collections.sort(sortedHosts, new Comparator>() { @Override - public int compare(Connection o1, Connection o2) { - return Long.compare(o1.getDeadUntil(), o2.getDeadUntil()); + public int compare(Map.Entry o1, Map.Entry o2) { + return Long.compare(o1.getValue().getDeadUntil(), o2.getValue().getDeadUntil()); } }); - Connection connection = sortedConnections.get(0); - logger.trace("trying to resurrect connection for " + connection.getHost()); - return Collections.singleton(connection).iterator(); + HttpHost deadHost = sortedHosts.get(0).getKey(); + logger.trace("resurrecting host [" + deadHost + "]"); + return Collections.singleton(deadHost).iterator(); } - return rotatedConnections.iterator(); + + List rotatedHosts = new ArrayList<>(filteredHosts); + //TODO is it possible to make this O(1)? (rotate is O(n)) + Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement()); + return rotatedHosts.iterator(); } /** * Called after each successful request call. - * Receives as an argument the connection that was used for the successful request. + * Receives as an argument the host that was used for the successful request. */ - public void onSuccess(Connection connection) { - connection.markAlive(); - logger.trace("marked connection alive for " + connection.getHost()); + private void onSuccess(HttpHost host) { + DeadHostState removedHost = this.blacklist.remove(host); + if (logger.isDebugEnabled() && removedHost != null) { + logger.debug("removed host [" + host + "] from blacklist"); + } } /** * Called after each failed attempt. - * Receives as an argument the connection that was used for the failed attempt. + * Receives as an argument the host that was used for the failed attempt. */ - private void onFailure(Connection connection) throws IOException { - connection.markDead(); - logger.debug("marked connection dead for " + connection.getHost()); - failureListener.onFailure(connection); + private void onFailure(HttpHost host) throws IOException { + while(true) { + DeadHostState previousDeadHostState = blacklist.putIfAbsent(host, DeadHostState.INITIAL_DEAD_STATE); + if (previousDeadHostState == null) { + logger.debug("added host [" + host + "] to blacklist"); + break; + } + if (blacklist.replace(host, previousDeadHostState, new DeadHostState(previousDeadHostState))) { + logger.debug("updated host [" + host + "] already in blacklist"); + break; + } + } + failureListener.onFailure(host); } + /** + * Sets a {@link FailureListener} to be notified each and every time a host fails + */ public synchronized void setFailureListener(FailureListener failureListener) { this.failureListener = failureListener; } @@ -397,7 +418,11 @@ public static CloseableHttpClient createDefaultHttpClient(Collection Date: Fri, 27 May 2016 13:42:50 +0200 Subject: [PATCH 038/103] make some classes and methods package private ElasticsearchResponseException, as well as ElasticsearchResponse, should only be created from o.e.client package. RequestLogger should only be used from this package too. --- .../client/ElasticsearchResponseException.java | 2 +- .../java/org/elasticsearch/client/HttpDeleteWithEntity.java | 2 +- .../java/org/elasticsearch/client/HttpGetWithEntity.java | 2 +- .../main/java/org/elasticsearch/client/RequestLogger.java | 6 +++--- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java index 12aef4f0d6580..2e9d42797de3c 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java @@ -31,7 +31,7 @@ public class ElasticsearchResponseException extends IOException { private ElasticsearchResponse elasticsearchResponse; private final String responseBody; - public ElasticsearchResponseException(ElasticsearchResponse elasticsearchResponse, String responseBody) throws IOException { + ElasticsearchResponseException(ElasticsearchResponse elasticsearchResponse, String responseBody) throws IOException { super(buildMessage(elasticsearchResponse,responseBody)); this.elasticsearchResponse = elasticsearchResponse; this.responseBody = responseBody; diff --git a/client/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java b/client/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java index 4f378f19c8f2c..85927b6f87176 100644 --- a/client/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java +++ b/client/src/main/java/org/elasticsearch/client/HttpDeleteWithEntity.java @@ -30,7 +30,7 @@ final class HttpDeleteWithEntity extends HttpEntityEnclosingRequestBase { final static String METHOD_NAME = HttpDelete.METHOD_NAME; - public HttpDeleteWithEntity(final URI uri) { + HttpDeleteWithEntity(final URI uri) { setURI(uri); } diff --git a/client/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java b/client/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java index 18039ba9b3425..c25418387e45f 100644 --- a/client/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java +++ b/client/src/main/java/org/elasticsearch/client/HttpGetWithEntity.java @@ -30,7 +30,7 @@ final class HttpGetWithEntity extends HttpEntityEnclosingRequestBase { final static String METHOD_NAME = HttpGet.METHOD_NAME; - public HttpGetWithEntity(final URI uri) { + HttpGetWithEntity(final URI uri) { setURI(uri); } diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java index dd9fb269717d0..c15f6abf04b2e 100644 --- a/client/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -41,7 +41,7 @@ * Helper class that exposes static methods to unify the way requests are logged. * Includes trace logging to log complete requests and responses in curl format. */ -public final class RequestLogger { +final class RequestLogger { private static final Log tracer = LogFactory.getLog("tracer"); @@ -51,7 +51,7 @@ private RequestLogger() { /** * Logs a request that yielded a response */ - public static void log(Log logger, String message, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { + static void log(Log logger, String message, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "] [" + httpResponse.getStatusLine() + "]"); @@ -77,7 +77,7 @@ public static void log(Log logger, String message, HttpUriRequest request, HttpH /** * Logs a request that failed */ - public static void log(Log logger, String message, HttpUriRequest request, HttpHost host, IOException e) { + static void log(Log logger, String message, HttpUriRequest request, HttpHost host, IOException e) { logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "]", e); if (logger.isTraceEnabled()) { String traceRequest; From 7f4807b29e46c7b294b1b8c15b16b444f0961e39 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 27 May 2016 14:08:29 +0200 Subject: [PATCH 039/103] add some javadocs --- .../org/elasticsearch/client/RestClient.java | 35 +++++++++++++++++++ .../client/sniff/HostsSniffer.java | 1 + .../elasticsearch/client/sniff/Sniffer.java | 9 +++-- 3 files changed, 42 insertions(+), 3 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 4fc8d3c27416d..ab919ca3382fc 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -24,6 +24,7 @@ import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; +import org.apache.http.client.ClientProtocolException; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; @@ -59,6 +60,19 @@ import java.util.concurrent.TimeUnit; import java.util.concurrent.atomic.AtomicInteger; +/** + * Client that connects to an elasticsearch cluster through http. + * Must be created using {@link Builder}, which allows to set all the different options or just rely on defaults. + * The hosts that are part of the cluster need to be provided at creation time, but can also be replaced later + * by calling {@link #setHosts(HttpHost...)}. + * The method {@link #performRequest(String, String, Map, HttpEntity, Header...)} allows to send a request to the cluster. When + * sending a request, a host gets selected out of the provided ones in a round-robin fashion. Failing hosts are marked dead and + * retried after a certain amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously + * failed (the more failures, the later they will be retried). In case of failures all of the alive nodes (or dead nodes that + * deserve a retry) are retried till one responds or none of them does, in which case an {@link IOException} will be thrown. + * + * Requests can be traced by enabling trace logging for "tracer". The trace logger outputs requests and responses in curl format. + */ public final class RestClient implements Closeable { private static final Log logger = LogFactory.getLog(RestClient.class); @@ -77,6 +91,10 @@ private RestClient(CloseableHttpClient client, long maxRetryTimeout, HttpHost... setHosts(hosts); } + /** + * Replaces the hosts that the client communicates with. + * @see HttpHost + */ public synchronized void setHosts(HttpHost... hosts) { Set httpHosts = new HashSet<>(); for (HttpHost host : hosts) { @@ -87,6 +105,23 @@ public synchronized void setHosts(HttpHost... hosts) { this.blacklist.clear(); } + /** + * Sends a request to the elasticsearch cluster that the current client points to. + * Selects a host out of the provided ones in a round-robin fashion. Failing hosts are marked dead and retried after a certain + * amount of time (minimum 1 minute, maximum 30 minutes), depending on how many times they previously failed (the more failures, + * the later they will be retried). In case of failures all of the alive nodes (or dead nodes that deserve a retry) are retried + * till one responds or none of them does, in which case an {@link IOException} will be thrown. + * + * @param method the http method + * @param endpoint the path of the request (without host and port) + * @param params the query_string parameters + * @param entity the body of the request, null if not applicable + * @param headers the optional request headers + * @return the response returned by elasticsearch + * @throws IOException in case of a problem or the connection was aborted + * @throws ClientProtocolException in case of an http protocol error + * @throws ElasticsearchResponseException in case elasticsearch responded with a status code that indicated an error + */ public ElasticsearchResponse performRequest(String method, String endpoint, Map params, HttpEntity entity, Header... headers) throws IOException { URI uri = buildUri(endpoint, params); diff --git a/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java index 86dca173a6714..cf49cfbe68ab5 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java @@ -40,6 +40,7 @@ /** * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back */ +//TODO This could potentially be using _cat/nodes which wouldn't require jackson as a dependency, but we'd have bw comp problems with 2.x public class HostsSniffer { private static final Log logger = LogFactory.getLog(HostsSniffer.class); diff --git a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index ae5180413a07e..ed6f8284f1e3d 100644 --- a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -37,9 +37,12 @@ import java.util.concurrent.atomic.AtomicBoolean; /** - * Calls nodes info api and returns a list of http hosts extracted from it. + * Class responsible for sniffing nodes from an elasticsearch cluster and setting them to a provided instance of {@link RestClient}. + * Must be created via {@link Builder}, which allows to set all of the different options or rely on defaults. + * A background task fetches the nodes from elasticsearch and updates them periodically. + * Supports sniffing on failure, meaning that the client will notify the sniffer at each host failure, so that nodes can be updated + * straightaway. */ -//TODO This could potentially be using _cat/nodes which wouldn't require jackson as a dependency, but we'd have bw comp problems with 2.x public final class Sniffer extends RestClient.FailureListener implements Closeable { private static final Log logger = LogFactory.getLog(Sniffer.class); @@ -47,7 +50,7 @@ public final class Sniffer extends RestClient.FailureListener implements Closeab private final boolean sniffOnFailure; private final Task task; - public Sniffer(RestClient restClient, int sniffRequestTimeout, String scheme, int sniffInterval, + private Sniffer(RestClient restClient, int sniffRequestTimeout, String scheme, int sniffInterval, boolean sniffOnFailure, int sniffAfterFailureDelay) { HostsSniffer hostsSniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme); this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); From 044a97c7406392f37e5ad28021c5a74795b98022 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 27 May 2016 16:01:33 +0200 Subject: [PATCH 040/103] move client sniffer to its own project Create a new subproject called client-sniffer that contains the o.e.client.sniff package. Since it is going to go to a separate jar, due to its additional functionalities and dependency on jackson, it makes sense to have it as a separate project that depends on client. This way we make sure that client doesn't depend on it etc. --- build.gradle | 1 + client-sniffer/build.gradle | 84 +++++++++++++++++++ .../client/sniff/HostsSniffer.java | 0 .../elasticsearch/client/sniff/Sniffer.java | 0 .../client/sniff/HostsSnifferTests.java | 0 .../client/sniff/SnifferBuilderTests.java | 0 client/build.gradle | 2 - settings.gradle | 1 + 8 files changed, 86 insertions(+), 2 deletions(-) create mode 100644 client-sniffer/build.gradle rename {client => client-sniffer}/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java (100%) rename {client => client-sniffer}/src/main/java/org/elasticsearch/client/sniff/Sniffer.java (100%) rename {client => client-sniffer}/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java (100%) rename {client => client-sniffer}/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java (100%) diff --git a/build.gradle b/build.gradle index af18424944b66..bd36211b128f8 100644 --- a/build.gradle +++ b/build.gradle @@ -163,6 +163,7 @@ subprojects { "org.elasticsearch:rest-api-spec:${version}": ':rest-api-spec', "org.elasticsearch:elasticsearch:${version}": ':core', "org.elasticsearch:client:${version}": ':client', + "org.elasticsearch:client-sniffer:${version}": ':client-sniffer', "org.elasticsearch.test:framework:${version}": ':test:framework', "org.elasticsearch.distribution.integ-test-zip:elasticsearch:${version}": ':distribution:integ-test-zip', "org.elasticsearch.distribution.zip:elasticsearch:${version}": ':distribution:zip', diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle new file mode 100644 index 0000000000000..d446ffc0113e9 --- /dev/null +++ b/client-sniffer/build.gradle @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +import org.elasticsearch.gradle.precommit.PrecommitTasks +import org.gradle.api.JavaVersion + +apply plugin: 'elasticsearch.build' + +targetCompatibility = JavaVersion.VERSION_1_7 +sourceCompatibility = JavaVersion.VERSION_1_7 + +dependencies { + compile "org.elasticsearch:client:${version}" + compile "org.apache.httpcomponents:httpclient:${versions.httpclient}" + compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" + compile "commons-codec:commons-codec:${versions.commonscodec}" + compile "commons-logging:commons-logging:${versions.commonslogging}" + compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" + + testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" + testCompile "junit:junit:${versions.junit}" + testCompile "org.hamcrest:hamcrest-all:1.3" + testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" + testCompile "org.apache.lucene:lucene-core:${versions.lucene}" + testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" + //mock web server + testCompile "com.squareup.okhttp3:mockwebserver:3.2.0" + testCompile "com.squareup.okhttp3:okhttp:3.2.0" + testCompile "com.squareup.okhttp3:okhttp-ws:3.2.0" + testCompile "com.squareup.okio:okio:1.6.0" + testCompile "org.bouncycastle:bcprov-jdk15on:1.54" +} + +//TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible +compileJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' << '-Xlint:all,-path,-serial,-options' +compileTestJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' + +forbiddenApisMain { + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +forbiddenApisTest { + //client does not depend on core, so only jdk signatures should be checked + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} + +//TODO add licenses for dependencies and take care of distribution +//dependency license are currently checked in distribution +dependencyLicenses.enabled=false +//JarHell is part of es core, which we don't want to pull in +jarHell.enabled=false +//NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core +namingConventions.enabled=false + +thirdPartyAudit.excludes = [ + //commons-logging optional dependencies + 'org.apache.avalon.framework.logger.Logger', + 'org.apache.log.Hierarchy', + 'org.apache.log.Logger', + 'org.apache.log4j.Category', + 'org.apache.log4j.Level', + 'org.apache.log4j.Logger', + 'org.apache.log4j.Priority', + //commons-logging provided dependencies + 'javax.servlet.ServletContextEvent', + 'javax.servlet.ServletContextListener' +] diff --git a/client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java similarity index 100% rename from client/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java rename to client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java diff --git a/client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java similarity index 100% rename from client/src/main/java/org/elasticsearch/client/sniff/Sniffer.java rename to client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java diff --git a/client/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java similarity index 100% rename from client/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java rename to client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java diff --git a/client/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java similarity index 100% rename from client/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java rename to client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java diff --git a/client/build.gradle b/client/build.gradle index e24aa9878e312..8cb0018e9916f 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -30,8 +30,6 @@ dependencies { compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-codec:commons-codec:${versions.commonscodec}" compile "commons-logging:commons-logging:${versions.commonslogging}" - //jackson is only needed in the sniff package - compile "com.fasterxml.jackson.core:jackson-core:${versions.jackson}" testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" diff --git a/settings.gradle b/settings.gradle index 831b2c5d2fb35..dbaf9fa8aa927 100644 --- a/settings.gradle +++ b/settings.gradle @@ -6,6 +6,7 @@ List projects = [ 'core', 'docs', 'client', + 'client-sniffer', 'distribution:integ-test-zip', 'distribution:zip', 'distribution:tar', From 3745305ffb785a331843e4bb94878efcb99c051f Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 30 May 2016 10:07:34 +0200 Subject: [PATCH 041/103] [TEST] be more specific around http method used for sniffing --- .../client/sniff/HostsSnifferTests.java | 24 ++++++++++--------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index e2b7a6d7c1051..9d9c054fd068c 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -29,6 +29,7 @@ import okhttp3.mockwebserver.MockWebServer; import okhttp3.mockwebserver.RecordedRequest; import org.apache.http.HttpHost; +import org.apache.http.client.methods.HttpGet; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.ElasticsearchResponse; import org.elasticsearch.client.ElasticsearchResponseException; @@ -123,18 +124,19 @@ private static MockWebServer buildMockWebServer(final SniffResponse sniffRespons final Dispatcher dispatcher = new Dispatcher() { @Override public MockResponse dispatch(RecordedRequest request) throws InterruptedException { - String decodedUrl; - try { - decodedUrl = URLDecoder.decode(request.getPath(), StandardCharsets.UTF_8.name()); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - String sniffUrl = "/_nodes/http?timeout=" + sniffTimeout + "ms"; - if (sniffUrl.equals(decodedUrl)) { - return new MockResponse().setBody(sniffResponse.nodesInfoBody).setResponseCode(sniffResponse.nodesInfoResponseCode); - } else { - return new MockResponse().setResponseCode(404); + if (request.getMethod().equals(HttpGet.METHOD_NAME)) { + String decodedUrl; + try { + decodedUrl = URLDecoder.decode(request.getPath(), StandardCharsets.UTF_8.name()); + } catch (UnsupportedEncodingException e) { + throw new RuntimeException(e); + } + String sniffUrl = "/_nodes/http?timeout=" + sniffTimeout + "ms"; + if (sniffUrl.equals(decodedUrl)) { + return new MockResponse().setBody(sniffResponse.nodesInfoBody).setResponseCode(sniffResponse.nodesInfoResponseCode); + } } + return new MockResponse().setResponseCode(404); } }; server.setDispatcher(dispatcher); From 51e487fa555b03d507c985bb4eb3d7501f51da98 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 30 May 2016 17:08:20 +0200 Subject: [PATCH 042/103] [TEST] remove okhttp test dependency Use sun HttpServer instead and disable forbidden-apis for test classes. It turns out to be more flexible than okhttp as it allows get & delete with body. --- client-sniffer/build.gradle | 16 ++--- .../client/sniff/HostsSnifferTests.java | 70 ++++++++----------- .../client/sniff/SnifferBuilderTests.java | 5 -- client/build.gradle | 16 ++--- .../client/RestClientBuilderTests.java | 5 -- 5 files changed, 44 insertions(+), 68 deletions(-) diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index d446ffc0113e9..4055155673144 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -39,12 +39,6 @@ dependencies { testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" - //mock web server - testCompile "com.squareup.okhttp3:mockwebserver:3.2.0" - testCompile "com.squareup.okhttp3:okhttp:3.2.0" - testCompile "com.squareup.okhttp3:okhttp-ws:3.2.0" - testCompile "com.squareup.okio:okio:1.6.0" - testCompile "org.bouncycastle:bcprov-jdk15on:1.54" } //TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible @@ -56,10 +50,14 @@ forbiddenApisMain { signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } -forbiddenApisTest { +//TODO would be nice to just exclude the classes where we use com.sun.net.httpserver.* classes +//excludes don't seem to work though and we don't want to have our own @SuppressForbidden +forbiddenApisTest.enabled=false + +//forbiddenApisTest { //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] -} + //signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +//} //TODO add licenses for dependencies and take care of distribution //dependency license are currently checked in distribution diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index 9d9c054fd068c..88f0480f92a9a 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -24,10 +24,10 @@ import com.carrotsearch.randomizedtesting.generators.RandomStrings; import com.fasterxml.jackson.core.JsonFactory; import com.fasterxml.jackson.core.JsonGenerator; -import okhttp3.mockwebserver.Dispatcher; -import okhttp3.mockwebserver.MockResponse; -import okhttp3.mockwebserver.MockWebServer; -import okhttp3.mockwebserver.RecordedRequest; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.Consts; import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; import org.apache.lucene.util.LuceneTestCase; @@ -38,11 +38,10 @@ import org.junit.Before; import java.io.IOException; +import java.io.OutputStream; import java.io.StringWriter; -import java.io.UnsupportedEncodingException; +import java.net.InetSocketAddress; import java.net.URISyntaxException; -import java.net.URLDecoder; -import java.nio.charset.StandardCharsets; import java.util.ArrayList; import java.util.Arrays; import java.util.Collections; @@ -52,25 +51,19 @@ import java.util.List; import java.util.Map; import java.util.Set; -import java.util.logging.LogManager; import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; public class HostsSnifferTests extends LuceneTestCase { - static { - //prevent MockWebServer from logging to stdout and stderr - LogManager.getLogManager().reset(); - } - private int sniffRequestTimeout; private String scheme; private SniffResponse sniffResponse; - private MockWebServer server; + private HttpServer httpServer; @Before - public void startMockWebServer() throws IOException { + public void startHttpServer() throws IOException { this.sniffRequestTimeout = RandomInts.randomIntBetween(random(), 1000, 10000); this.scheme = RandomPicks.randomFrom(random(), Arrays.asList("http", "https")); if (rarely()) { @@ -78,17 +71,17 @@ public void startMockWebServer() throws IOException { } else { this.sniffResponse = buildSniffResponse(scheme); } - this.server = buildMockWebServer(sniffResponse, sniffRequestTimeout); - this.server.start(); + this.httpServer = createHttpServer(sniffResponse, sniffRequestTimeout); + this.httpServer.start(); } @After - public void stopMockWebServer() throws IOException { - server.shutdown(); + public void stopHttpServer() throws IOException { + httpServer.stop(0); } public void testSniffNodes() throws IOException, URISyntaxException { - HttpHost httpHost = new HttpHost(server.getHostName(), server.getPort()); + HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostName(), httpServer.getAddress().getPort()); try (RestClient restClient = RestClient.builder().setHosts(httpHost).build()) { HostsSniffer sniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme); try { @@ -104,7 +97,7 @@ public void testSniffNodes() throws IOException, URISyntaxException { } catch(ElasticsearchResponseException e) { ElasticsearchResponse response = e.getElasticsearchResponse(); if (sniffResponse.isFailure) { - assertThat(e.getMessage(), containsString("GET http://localhost:" + server.getPort() + + assertThat(e.getMessage(), containsString("GET http://localhost:" + httpServer.getAddress().getPort() + "/_nodes/http?timeout=" + sniffRequestTimeout)); assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); assertThat(response.getHost(), equalTo(httpHost)); @@ -118,29 +111,26 @@ public void testSniffNodes() throws IOException, URISyntaxException { } } - private static MockWebServer buildMockWebServer(final SniffResponse sniffResponse, final int sniffTimeout) - throws UnsupportedEncodingException { - MockWebServer server = new MockWebServer(); - final Dispatcher dispatcher = new Dispatcher() { + private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeout) throws IOException { + HttpServer httpServer = HttpServer.create(new InetSocketAddress(0), 0); + httpServer.createContext("/_nodes/http", new HttpHandler() { @Override - public MockResponse dispatch(RecordedRequest request) throws InterruptedException { - if (request.getMethod().equals(HttpGet.METHOD_NAME)) { - String decodedUrl; - try { - decodedUrl = URLDecoder.decode(request.getPath(), StandardCharsets.UTF_8.name()); - } catch (UnsupportedEncodingException e) { - throw new RuntimeException(e); - } - String sniffUrl = "/_nodes/http?timeout=" + sniffTimeout + "ms"; - if (sniffUrl.equals(decodedUrl)) { - return new MockResponse().setBody(sniffResponse.nodesInfoBody).setResponseCode(sniffResponse.nodesInfoResponseCode); + public void handle(HttpExchange httpExchange) throws IOException { + if (httpExchange.getRequestMethod().equals(HttpGet.METHOD_NAME)) { + if (httpExchange.getRequestURI().getRawQuery().equals("timeout=" + sniffTimeout + "ms")) { + String nodesInfoBody = sniffResponse.nodesInfoBody; + httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length()); + try (OutputStream out = httpExchange.getResponseBody()) { + out.write(nodesInfoBody.getBytes(Consts.UTF_8)); + return; + } } } - return new MockResponse().setResponseCode(404); + httpExchange.sendResponseHeaders(404, 0); + httpExchange.close(); } - }; - server.setDispatcher(dispatcher); - return server; + }); + return httpServer; } private static SniffResponse buildSniffResponse(String scheme) throws IOException { diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index 868c57881200a..ff3cad34bfac1 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -26,14 +26,9 @@ import org.elasticsearch.client.RestClient; import java.util.Arrays; -import java.util.logging.LogManager; public class SnifferBuilderTests extends LuceneTestCase { - static { - LogManager.getLogManager().reset(); - } - public void testBuild() throws Exception { try { diff --git a/client/build.gradle b/client/build.gradle index 8cb0018e9916f..43979e845971d 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -37,12 +37,6 @@ dependencies { testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" - //mock web server - testCompile "com.squareup.okhttp3:mockwebserver:3.2.0" - testCompile "com.squareup.okhttp3:okhttp:3.2.0" - testCompile "com.squareup.okhttp3:okhttp-ws:3.2.0" - testCompile "com.squareup.okio:okio:1.6.0" - testCompile "org.bouncycastle:bcprov-jdk15on:1.54" } //TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible @@ -54,10 +48,14 @@ forbiddenApisMain { signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } -forbiddenApisTest { +//TODO would be nice to just exclude the classes where we use com.sun.net.httpserver.* classes +//excludes don't seem to work though and we don't want to have our own @SuppressForbidden +forbiddenApisTest.enabled=false + +//forbiddenApisTest { //client does not depend on core, so only jdk signatures should be checked - signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] -} + //signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +//} //TODO add licenses for dependencies and take care of distribution //dependency license are currently checked in distribution diff --git a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 44909336dd687..1d73db6ec379c 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -30,14 +30,9 @@ import java.util.ArrayList; import java.util.Collection; import java.util.Collections; -import java.util.logging.LogManager; public class RestClientBuilderTests extends LuceneTestCase { - static { - LogManager.getLogManager().reset(); - } - public void testBuild() throws IOException { try { RestClient.builder().setMaxRetryTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); From 83c6e736dedea2a28eac708acfbe5f88814495ce Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 31 May 2016 16:33:24 +0200 Subject: [PATCH 043/103] add support for PATCH and TRACE methods Although elasticsearch doesn't support these methods (RestController doesn't even allow to register handler for them), the RestClient should allow to send requests using them. --- .../org/elasticsearch/client/RestClient.java | 40 +++++++++---------- 1 file changed, 19 insertions(+), 21 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index ab919ca3382fc..63ee118cc4bfa 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -30,9 +30,11 @@ import org.apache.http.client.methods.HttpEntityEnclosingRequestBase; import org.apache.http.client.methods.HttpHead; import org.apache.http.client.methods.HttpOptions; +import org.apache.http.client.methods.HttpPatch; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.methods.HttpTrace; import org.apache.http.client.utils.URIBuilder; import org.apache.http.entity.ContentType; import org.apache.http.impl.client.CloseableHttpClient; @@ -160,8 +162,7 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< lastSeenException = addSuppressedException(lastSeenException, e); continue; } - ElasticsearchResponse elasticsearchResponse = new ElasticsearchResponse(request.getRequestLine(), - host, response); + ElasticsearchResponse elasticsearchResponse = new ElasticsearchResponse(request.getRequestLine(), host, response); int statusCode = response.getStatusLine().getStatusCode(); if (statusCode < 300 || (request.getMethod().equals(HttpHead.METHOD_NAME) && statusCode == 404) ) { RequestLogger.log(logger, "request succeeded", request, host, response); @@ -288,40 +289,37 @@ private static IOException addSuppressedException(IOException suppressedExceptio private static HttpRequestBase createHttpRequest(String method, URI uri, HttpEntity entity) { switch(method.toUpperCase(Locale.ROOT)) { case HttpDeleteWithEntity.METHOD_NAME: - HttpDeleteWithEntity httpDeleteWithEntity = new HttpDeleteWithEntity(uri); - addRequestBody(httpDeleteWithEntity, entity); - return httpDeleteWithEntity; + return addRequestBody(new HttpDeleteWithEntity(uri), entity); case HttpGetWithEntity.METHOD_NAME: - HttpGetWithEntity httpGetWithEntity = new HttpGetWithEntity(uri); - addRequestBody(httpGetWithEntity, entity); - return httpGetWithEntity; + return addRequestBody(new HttpGetWithEntity(uri), entity); case HttpHead.METHOD_NAME: - if (entity != null) { - throw new UnsupportedOperationException("HEAD with body is not supported"); - } - return new HttpHead(uri); + return addRequestBody(new HttpHead(uri), entity); case HttpOptions.METHOD_NAME: - if (entity != null) { - throw new UnsupportedOperationException("OPTIONS with body is not supported"); - } - return new HttpOptions(uri); + return addRequestBody(new HttpOptions(uri), entity); + case HttpPatch.METHOD_NAME: + return addRequestBody(new HttpPatch(uri), entity); case HttpPost.METHOD_NAME: HttpPost httpPost = new HttpPost(uri); addRequestBody(httpPost, entity); return httpPost; case HttpPut.METHOD_NAME: - HttpPut httpPut = new HttpPut(uri); - addRequestBody(httpPut, entity); - return httpPut; + return addRequestBody(new HttpPut(uri), entity); + case HttpTrace.METHOD_NAME: + return addRequestBody(new HttpTrace(uri), entity); default: throw new UnsupportedOperationException("http method not supported: " + method); } } - private static void addRequestBody(HttpEntityEnclosingRequestBase httpRequest, HttpEntity entity) { + private static HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpEntity entity) { if (entity != null) { - httpRequest.setEntity(entity); + if (httpRequest instanceof HttpEntityEnclosingRequestBase) { + ((HttpEntityEnclosingRequestBase)httpRequest).setEntity(entity); + } else { + throw new UnsupportedOperationException(httpRequest.getMethod() + " with body is not supported"); + } } + return httpRequest; } private static URI buildUri(String path, Map params) { From c9db111387b1808c9dfe945326fc497f54e82990 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 1 Jun 2016 10:51:51 +0200 Subject: [PATCH 044/103] add javadocs on closing responses --- .../java/org/elasticsearch/client/ElasticsearchResponse.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java index 304d10e2b94da..2d25851b9696b 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java @@ -32,7 +32,8 @@ /** * Holds an elasticsearch response. It wraps the {@link CloseableHttpResponse} response and associates it with - * its corresponding {@link RequestLine} and {@link HttpHost} + * its corresponding {@link RequestLine} and {@link HttpHost}. + * It must be closed to free any resource held by it, as well as the corresponding connection in the connection pool. */ public class ElasticsearchResponse implements Closeable { From 6d66fbd9c15caab1f3fe089fd7ba2fc4c5c8189d Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 1 Jun 2016 14:38:46 +0200 Subject: [PATCH 045/103] add toString to DeadHostState class --- .../main/java/org/elasticsearch/client/DeadHostState.java | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/client/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/src/main/java/org/elasticsearch/client/DeadHostState.java index 30a24bc2d5d9d..52ef6e2bc6bf2 100644 --- a/client/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -55,4 +55,12 @@ private DeadHostState() { long getDeadUntil() { return deadUntil; } + + @Override + public String toString() { + return "DeadHostState{" + + "failedAttempts=" + failedAttempts + + ", deadUntil=" + deadUntil + + '}'; + } } From 35dbdeeae5173bb3b0b53b93282185bf083ec2be Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 1 Jun 2016 23:38:26 +0200 Subject: [PATCH 046/103] check hosts is not null nor empty earlier, remove check from nextHost if we check at set time, we don't need to check each single time in nextHost --- .../src/main/java/org/elasticsearch/client/RestClient.java | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 63ee118cc4bfa..7d9cebcafca4d 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -98,6 +98,9 @@ private RestClient(CloseableHttpClient client, long maxRetryTimeout, HttpHost... * @see HttpHost */ public synchronized void setHosts(HttpHost... hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("hosts must not be null nor empty"); + } Set httpHosts = new HashSet<>(); for (HttpHost host : hosts) { Objects.requireNonNull(host, "host cannot be null"); @@ -206,10 +209,6 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< * In case there are no healthy hosts available, or dead ones to be be retried, one dead host gets returned. */ private Iterator nextHost() { - if (this.hosts.isEmpty()) { - throw new IllegalStateException("no hosts available"); - } - Set filteredHosts = new HashSet<>(hosts); for (Map.Entry entry : blacklist.entrySet()) { if (System.nanoTime() - entry.getValue().getDeadUntil() < 0) { From 47e52044e452073c3205475515bee87168360812 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 2 Jun 2016 11:30:09 +0200 Subject: [PATCH 047/103] [TEST] add setHosts test and rename RestClientBuilderTests to RestClientTests --- ...BuilderTests.java => RestClientTests.java} | 31 ++++++++++++++++++- 1 file changed, 30 insertions(+), 1 deletion(-) rename client/src/test/java/org/elasticsearch/client/{RestClientBuilderTests.java => RestClientTests.java} (75%) diff --git a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/src/test/java/org/elasticsearch/client/RestClientTests.java similarity index 75% rename from client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java rename to client/src/test/java/org/elasticsearch/client/RestClientTests.java index 1d73db6ec379c..f6a297eb14567 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientTests.java @@ -31,7 +31,7 @@ import java.util.Collection; import java.util.Collections; -public class RestClientBuilderTests extends LuceneTestCase { +public class RestClientTests extends LuceneTestCase { public void testBuild() throws IOException { try { @@ -105,4 +105,33 @@ public void testBuild() throws IOException { assertNotNull(restClient); } } + + public void testSetNodes() throws IOException { + try (RestClient restClient = RestClient.builder().setHosts(new HttpHost("localhost", 9200)).build()) { + try { + restClient.setHosts((HttpHost[]) null); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try { + restClient.setHosts(); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try { + restClient.setHosts((HttpHost) null); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + try { + restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + } + } } From 24ea585c9ee8ebd150fc87c3de403667ff1f76bf Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 15:44:51 +0200 Subject: [PATCH 048/103] don't use setDefaultHeaders from HttpClient Store default headers ourselves instead, otherwise default ones cannot be replaced. Don't allow for multiple headers with same key, last one wins and replaces previous ones with same key. Also fail with null params or headers. --- .../elasticsearch/client/sniff/Sniffer.java | 3 +- .../org/elasticsearch/client/RestClient.java | 56 +++++++++++-------- 2 files changed, 33 insertions(+), 26 deletions(-) diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index ed6f8284f1e3d..855b15e1e4d95 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -27,7 +27,6 @@ import java.io.Closeable; import java.io.IOException; -import java.util.Collection; import java.util.List; import java.util.Objects; import java.util.concurrent.Executors; @@ -204,7 +203,7 @@ public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { /** * Sets the http client. Mandatory argument. Best practice is to use the same client used * within {@link org.elasticsearch.client.RestClient} which can be created manually or - * through {@link RestClient.Builder#createDefaultHttpClient(Collection)}. + * through {@link RestClient.Builder#createDefaultHttpClient()}. * @see CloseableHttpClient */ public Builder setRestClient(RestClient restClient) { diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 7d9cebcafca4d..856a50c1834f0 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -24,6 +24,7 @@ import org.apache.http.Header; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; +import org.apache.http.HttpRequest; import org.apache.http.client.ClientProtocolException; import org.apache.http.client.config.RequestConfig; import org.apache.http.client.methods.CloseableHttpResponse; @@ -47,7 +48,6 @@ import java.net.URI; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Collection; import java.util.Collections; import java.util.Comparator; import java.util.HashSet; @@ -81,15 +81,19 @@ public final class RestClient implements Closeable { public static ContentType JSON_CONTENT_TYPE = ContentType.create("application/json", Consts.UTF_8); private final CloseableHttpClient client; + //we don't rely on default headers supported by HttpClient as those cannot be replaced, plus it would get hairy + //when we create the HttpClient instance on our own as there would be two different ways to set the default headers. + private final Header[] defaultHeaders; private final long maxRetryTimeout; private final AtomicInteger lastHostIndex = new AtomicInteger(0); private volatile Set hosts; private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); private volatile FailureListener failureListener = new FailureListener(); - private RestClient(CloseableHttpClient client, long maxRetryTimeout, HttpHost... hosts) { + private RestClient(CloseableHttpClient client, long maxRetryTimeout, Header[] defaultHeaders, HttpHost[] hosts) { this.client = client; this.maxRetryTimeout = maxRetryTimeout; + this.defaultHeaders = defaultHeaders; setHosts(hosts); } @@ -131,11 +135,7 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< HttpEntity entity, Header... headers) throws IOException { URI uri = buildUri(endpoint, params); HttpRequestBase request = createHttpRequest(method, uri, entity); - if (headers.length > 0) { - for (Header header : headers) { - request.addHeader(header); - } - } + setHeaders(request, headers); //we apply a soft margin so that e.g. if a request took 59 seconds and timeout is set to 60 we don't do another attempt long retryTimeout = Math.round(this.maxRetryTimeout / (float)100 * 98); IOException lastSeenException = null; @@ -200,6 +200,17 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< throw lastSeenException; } + private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) { + Objects.requireNonNull(requestHeaders, "request headers must not be null"); + for (Header defaultHeader : defaultHeaders) { + httpRequest.setHeader(defaultHeader); + } + for (Header requestHeader : requestHeaders) { + Objects.requireNonNull(requestHeader, "request header must not be null"); + httpRequest.setHeader(requestHeader); + } + } + /** * Returns an iterator of hosts to be used for a request call. * Ideally, the first host is retrieved from the iterator and used successfully for the request. @@ -322,6 +333,7 @@ private static HttpRequestBase addRequestBody(HttpRequestBase httpRequest, HttpE } private static URI buildUri(String path, Map params) { + Objects.requireNonNull(params, "params must not be null"); try { URIBuilder uriBuilder = new URIBuilder(path); for (Map.Entry param : params.entrySet()) { @@ -349,10 +361,12 @@ public static final class Builder { public static final int DEFAULT_MAX_RETRY_TIMEOUT = DEFAULT_SOCKET_TIMEOUT; public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT = 500; + private static final Header[] EMPTY_HEADERS = new Header[0]; + private CloseableHttpClient httpClient; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT; private HttpHost[] hosts; - private Collection defaultHeaders; + private Header[] defaultHeaders = EMPTY_HEADERS; private Builder() { @@ -360,7 +374,7 @@ private Builder() { /** * Sets the http client. A new default one will be created if not - * specified, by calling {@link #createDefaultHttpClient(Collection)}. + * specified, by calling {@link #createDefaultHttpClient()}. * * @see CloseableHttpClient */ @@ -399,7 +413,11 @@ public Builder setHosts(HttpHost... hosts) { * In case the http client is set through {@link #setHttpClient(CloseableHttpClient)}, the default headers need to be * set to it externally during http client construction. */ - public Builder setDefaultHeaders(Collection defaultHeaders) { + public Builder setDefaultHeaders(Header[] defaultHeaders) { + Objects.requireNonNull(defaultHeaders, "default headers must not be null"); + for (Header defaultHeader : defaultHeaders) { + Objects.requireNonNull(defaultHeader, "default header must not be null"); + } this.defaultHeaders = defaultHeaders; return this; } @@ -409,16 +427,12 @@ public Builder setDefaultHeaders(Collection defaultHeaders) { */ public RestClient build() { if (httpClient == null) { - httpClient = createDefaultHttpClient(defaultHeaders); - } else { - if (defaultHeaders != null) { - throw new IllegalArgumentException("defaultHeaders need to be set to the HttpClient directly when manually provided"); - } + httpClient = createDefaultHttpClient(); } if (hosts == null || hosts.length == 0) { throw new IllegalArgumentException("no hosts provided"); } - return new RestClient(httpClient, maxRetryTimeout, hosts); + return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts); } /** @@ -426,7 +440,7 @@ public RestClient build() { * * @see CloseableHttpClient */ - public static CloseableHttpClient createDefaultHttpClient(Collection defaultHeaders) { + public static CloseableHttpClient createDefaultHttpClient() { PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); //default settings may be too constraining connectionManager.setDefaultMaxPerRoute(10); @@ -436,12 +450,7 @@ public static CloseableHttpClient createDefaultHttpClient(Collection Date: Fri, 3 Jun 2016 15:45:52 +0200 Subject: [PATCH 049/103] [TEST] rename restClientTests back to RestClientBuilderTests --- ...Tests.java => RestClientBuilderTests.java} | 69 ++++++------------- 1 file changed, 20 insertions(+), 49 deletions(-) rename client/src/test/java/org/elasticsearch/client/{RestClientTests.java => RestClientBuilderTests.java} (58%) diff --git a/client/src/test/java/org/elasticsearch/client/RestClientTests.java b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java similarity index 58% rename from client/src/test/java/org/elasticsearch/client/RestClientTests.java rename to client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index f6a297eb14567..3ea0fe14ed28d 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -20,18 +20,15 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.Header; import org.apache.http.HttpHost; -import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; import org.apache.http.message.BasicHeader; import org.apache.lucene.util.LuceneTestCase; import java.io.IOException; -import java.util.ArrayList; -import java.util.Collection; -import java.util.Collections; -public class RestClientTests extends LuceneTestCase { +public class RestClientBuilderTests extends LuceneTestCase { public void testBuild() throws IOException { try { @@ -69,12 +66,18 @@ public void testBuild() throws IOException { assertEquals(e.getMessage(), "host cannot be null"); } - try (CloseableHttpClient httpClient = HttpClientBuilder.create().build()) { - RestClient.builder().setHttpClient(httpClient) - .setDefaultHeaders(Collections.singleton(new BasicHeader("header", "value"))).build(); + try { + RestClient.builder().setDefaultHeaders(null); fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "defaultHeaders need to be set to the HttpClient directly when manually provided"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "default headers must not be null"); + } + + try { + RestClient.builder().setDefaultHeaders(new Header[]{null}); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "default header must not be null"); } RestClient.Builder builder = RestClient.builder(); @@ -84,20 +87,17 @@ public void testBuild() throws IOException { hosts[i] = new HttpHost("localhost", 9200 + i); } builder.setHosts(hosts); - if (random().nextBoolean()) { builder.setHttpClient(HttpClientBuilder.create().build()); - } else { - if (random().nextBoolean()) { - int numHeaders = RandomInts.randomIntBetween(random(), 1, 5); - Collection headers = new ArrayList<>(numHeaders); - for (int i = 0; i < numHeaders; i++) { - headers.add(new BasicHeader("header" + i, "value")); - } - builder.setDefaultHeaders(headers); + } + if (random().nextBoolean()) { + int numHeaders = RandomInts.randomIntBetween(random(), 1, 5); + Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + headers[i] = new BasicHeader("header" + i, "value"); } + builder.setDefaultHeaders(headers); } - if (random().nextBoolean()) { builder.setMaxRetryTimeout(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); } @@ -105,33 +105,4 @@ public void testBuild() throws IOException { assertNotNull(restClient); } } - - public void testSetNodes() throws IOException { - try (RestClient restClient = RestClient.builder().setHosts(new HttpHost("localhost", 9200)).build()) { - try { - restClient.setHosts((HttpHost[]) null); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try { - restClient.setHosts(); - fail("setHosts should have failed"); - } catch (IllegalArgumentException e) { - assertEquals("hosts must not be null nor empty", e.getMessage()); - } - try { - restClient.setHosts((HttpHost) null); - fail("setHosts should have failed"); - } catch (NullPointerException e) { - assertEquals("host cannot be null", e.getMessage()); - } - try { - restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); - fail("setHosts should have failed"); - } catch (NullPointerException e) { - assertEquals("host cannot be null", e.getMessage()); - } - } - } } From 4572b69011bc8f057717a2c8d8d2fc861cebef82 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 15:55:44 +0200 Subject: [PATCH 050/103] [TEST] add RestClient unit tests Unit tests rely on mockito to mock the internal HttpClient instance. No http request is performed, we only simulate interaction between RestClient and its internal HttpClient. --- client-sniffer/build.gradle | 1 + client/build.gradle | 1 + .../client/CloseableBasicHttpResponse.java | 42 ++ .../client/RestClientMultipleHostsTests.java | 279 ++++++++++++ .../client/RestClientSingleHostTests.java | 421 ++++++++++++++++++ .../client/RestClientTestUtil.java | 84 ++++ .../client/TrackingFailureListener.java | 52 +++ 7 files changed, 880 insertions(+) create mode 100644 client/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java create mode 100644 client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java create mode 100644 client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java create mode 100644 client/src/test/java/org/elasticsearch/client/RestClientTestUtil.java create mode 100644 client/src/test/java/org/elasticsearch/client/TrackingFailureListener.java diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index 4055155673144..6542453fa4f92 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -39,6 +39,7 @@ dependencies { testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" + testCompile "org.elasticsearch:securemock:1.2" } //TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible diff --git a/client/build.gradle b/client/build.gradle index 43979e845971d..6d2b92f0b0c96 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -37,6 +37,7 @@ dependencies { testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" + testCompile "org.elasticsearch:securemock:1.2" } //TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible diff --git a/client/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java b/client/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java new file mode 100644 index 0000000000000..904cbe7cfeb35 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/CloseableBasicHttpResponse.java @@ -0,0 +1,42 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.message.BasicHttpResponse; + +import java.io.IOException; + +/** + * Simple {@link CloseableHttpResponse} impl needed to easily create http responses that are closeable given that + * org.apache.http.impl.execchain.HttpResponseProxy is not public. + */ +class CloseableBasicHttpResponse extends BasicHttpResponse implements CloseableHttpResponse { + + public CloseableBasicHttpResponse(StatusLine statusline) { + super(statusline); + } + + @Override + public void close() throws IOException { + //nothing to close + } +} diff --git a/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java new file mode 100644 index 0000000000000..d78a03509d085 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -0,0 +1,279 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.HttpHost; +import org.apache.http.HttpRequest; +import org.apache.http.ProtocolVersion; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.conn.ConnectTimeoutException; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.message.BasicStatusLine; +import org.apache.lucene.util.LuceneTestCase; +import org.junit.Before; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.util.Collections; +import java.util.HashSet; +import java.util.Set; + +import static org.elasticsearch.client.RestClientTestUtil.randomErrorNoRetryStatusCode; +import static org.elasticsearch.client.RestClientTestUtil.randomErrorRetryStatusCode; +import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; +import static org.elasticsearch.client.RestClientTestUtil.randomOkStatusCode; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.when; + +/** + * Tests for {@link RestClient} behaviour against multiple hosts: fail-over, blacklisting etc. + * Relies on a mock http client to intercept requests and return desired responses based on request path. + */ +public class RestClientMultipleHostsTests extends LuceneTestCase { + + private RestClient restClient; + private HttpHost[] httpHosts; + private TrackingFailureListener failureListener; + + @Before + public void createRestClient() throws IOException { + CloseableHttpClient httpClient = mock(CloseableHttpClient.class); + when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer() { + @Override + public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable { + HttpHost httpHost = (HttpHost) invocationOnMock.getArguments()[0]; + HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1]; + //return the desired status code or exception depending on the path + if (request.getURI().getPath().equals("/soe")) { + throw new SocketTimeoutException(httpHost.toString()); + } else if (request.getURI().getPath().equals("/coe")) { + throw new ConnectTimeoutException(httpHost.toString()); + } else if (request.getURI().getPath().equals("/ioe")) { + throw new IOException(httpHost.toString()); + } + int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); + StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, ""); + return new CloseableBasicHttpResponse(statusLine); + } + }); + + int numHosts = RandomInts.randomIntBetween(random(), 2, 5); + httpHosts = new HttpHost[numHosts]; + for (int i = 0; i < numHosts; i++) { + httpHosts[i] = new HttpHost("localhost", 9200 + i); + } + restClient = RestClient.builder().setHosts(httpHosts).setHttpClient(httpClient).build(); + failureListener = new TrackingFailureListener(); + restClient.setFailureListener(failureListener); + } + + /** + * Test that + */ + public void testRoundRobinOkStatusCodes() throws Exception { + int numIters = RandomInts.randomIntBetween(random(), 1, 5); + for (int i = 0; i < numIters; i++) { + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + for (int j = 0; j < httpHosts.length; j++) { + int statusCode = randomOkStatusCode(random()); + try (ElasticsearchResponse response = restClient.performRequest(randomHttpMethod(random()), "/" + statusCode, + Collections.emptyMap(), null)) { + assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); + assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); + } + } + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } + failureListener.assertNotCalled(); + } + + public void testRoundRobinNoRetryErrors() throws Exception { + int numIters = RandomInts.randomIntBetween(random(), 1, 5); + for (int i = 0; i < numIters; i++) { + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + for (int j = 0; j < httpHosts.length; j++) { + String method = randomHttpMethod(random()); + int statusCode = randomErrorNoRetryStatusCode(random()); + try (ElasticsearchResponse response = restClient.performRequest(method, "/" + statusCode, + Collections.emptyMap(), null)) { + if (method.equals("HEAD") && statusCode == 404) { + //no exception gets thrown although we got a 404 + assertThat(response.getStatusLine().getStatusCode(), equalTo(404)); + assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); + assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); + } else { + fail("request should have failed"); + } + } catch(ElasticsearchResponseException e) { + if (method.equals("HEAD") && statusCode == 404) { + throw e; + } + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); + assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); + assertEquals(0, e.getSuppressed().length); + } + } + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } + failureListener.assertNotCalled(); + } + + public void testRoundRobinRetryErrors() throws Exception { + String retryEndpoint = randomErrorRetryEndpoint(); + try { + restClient.performRequest(randomHttpMethod(random()), retryEndpoint, Collections.emptyMap(), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each + failureListener.assertCalled(httpHosts); + do { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); + assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times", + hostsSet.remove(response.getHost())); + if (e.getSuppressed().length > 0) { + assertEquals(1, e.getSuppressed().length); + Throwable suppressed = e.getSuppressed()[0]; + assertThat(suppressed, instanceOf(ElasticsearchResponseException.class)); + e = (ElasticsearchResponseException)suppressed; + } else { + e = null; + } + } while(e != null); + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } catch(IOException e) { + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each + failureListener.assertCalled(httpHosts); + do { + HttpHost httpHost = HttpHost.create(e.getMessage()); + assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); + if (e.getSuppressed().length > 0) { + assertEquals(1, e.getSuppressed().length); + Throwable suppressed = e.getSuppressed()[0]; + assertThat(suppressed, instanceOf(IOException.class)); + e = (IOException) suppressed; + } else { + e = null; + } + } while(e != null); + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + } + + int numIters = RandomInts.randomIntBetween(random(), 2, 5); + for (int i = 1; i <= numIters; i++) { + //check that one different host is resurrected at each new attempt + Set hostsSet = new HashSet<>(); + Collections.addAll(hostsSet, httpHosts); + for (int j = 0; j < httpHosts.length; j++) { + retryEndpoint = randomErrorRetryEndpoint(); + try { + restClient.performRequest(randomHttpMethod(random()), retryEndpoint, Collections.emptyMap(), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); + assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times", + hostsSet.remove(response.getHost())); + //after the first request, all hosts are blacklisted, a single one gets resurrected each time + failureListener.assertCalled(response.getHost()); + assertEquals(0, e.getSuppressed().length); + } catch(IOException e) { + HttpHost httpHost = HttpHost.create(e.getMessage()); + assertTrue("host [" + httpHost + "] not found, most likely used multiple times", hostsSet.remove(httpHost)); + //after the first request, all hosts are blacklisted, a single one gets resurrected each time + failureListener.assertCalled(httpHost); + assertEquals(0, e.getSuppressed().length); + } + } + assertEquals("every host should have been used but some weren't: " + hostsSet, 0, hostsSet.size()); + if (random().nextBoolean()) { + //mark one host back alive through a successful request and check that all requests after that are sent to it + HttpHost selectedHost = null; + int iters = RandomInts.randomIntBetween(random(), 2, 10); + for (int y = 0; y < iters; y++) { + int statusCode = randomErrorNoRetryStatusCode(random()); + ElasticsearchResponse response; + try (ElasticsearchResponse esResponse = restClient.performRequest(randomHttpMethod(random()), "/" + statusCode, + Collections.emptyMap(), null)) { + response = esResponse; + } + catch(ElasticsearchResponseException e) { + response = e.getElasticsearchResponse(); + } + assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); + if (selectedHost == null) { + selectedHost = response.getHost(); + } else { + assertThat(response.getHost(), equalTo(selectedHost)); + } + } + failureListener.assertNotCalled(); + //let the selected host catch up on number of failures, it gets selected a consecutive number of times as it's the one + //selected to be retried earlier (due to lower number of failures) till all the hosts have the same number of failures + for (int y = 0; y < i + 1; y++) { + retryEndpoint = randomErrorRetryEndpoint(); + try { + restClient.performRequest(randomHttpMethod(random()), retryEndpoint, + Collections.emptyMap(), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); + assertThat(response.getHost(), equalTo(selectedHost)); + failureListener.assertCalled(selectedHost); + } catch(IOException e) { + HttpHost httpHost = HttpHost.create(e.getMessage()); + assertThat(httpHost, equalTo(selectedHost)); + failureListener.assertCalled(selectedHost); + } + } + } + } + } + + private static String randomErrorRetryEndpoint() { + switch(RandomInts.randomIntBetween(random(), 0, 3)) { + case 0: + return "/" + randomErrorRetryStatusCode(random()); + case 1: + return "/coe"; + case 2: + return "/soe"; + case 3: + return "/ioe"; + } + throw new UnsupportedOperationException(); + } +} diff --git a/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java new file mode 100644 index 0000000000000..c3c08bee62886 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -0,0 +1,421 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import org.apache.http.Header; +import org.apache.http.HttpEntity; +import org.apache.http.HttpEntityEnclosingRequest; +import org.apache.http.HttpHost; +import org.apache.http.HttpRequest; +import org.apache.http.ProtocolVersion; +import org.apache.http.StatusLine; +import org.apache.http.client.methods.CloseableHttpResponse; +import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpOptions; +import org.apache.http.client.methods.HttpPatch; +import org.apache.http.client.methods.HttpPost; +import org.apache.http.client.methods.HttpPut; +import org.apache.http.client.methods.HttpTrace; +import org.apache.http.client.methods.HttpUriRequest; +import org.apache.http.client.utils.URIBuilder; +import org.apache.http.conn.ConnectTimeoutException; +import org.apache.http.entity.StringEntity; +import org.apache.http.impl.client.CloseableHttpClient; +import org.apache.http.message.BasicHeader; +import org.apache.http.message.BasicStatusLine; +import org.apache.http.util.EntityUtils; +import org.apache.lucene.util.LuceneTestCase; +import org.junit.Before; +import org.mockito.ArgumentCaptor; +import org.mockito.invocation.InvocationOnMock; +import org.mockito.stubbing.Answer; + +import java.io.IOException; +import java.net.SocketTimeoutException; +import java.net.URI; +import java.net.URISyntaxException; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.Map; + +import static org.elasticsearch.client.RestClientTestUtil.getAllErrorStatusCodes; +import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; +import static org.elasticsearch.client.RestClientTestUtil.getOkStatusCodes; +import static org.elasticsearch.client.RestClientTestUtil.randomHttpMethod; +import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; +import static org.hamcrest.CoreMatchers.equalTo; +import static org.hamcrest.CoreMatchers.instanceOf; +import static org.mockito.Matchers.any; +import static org.mockito.Mockito.mock; +import static org.mockito.Mockito.times; +import static org.mockito.Mockito.verify; +import static org.mockito.Mockito.when; + +/** + * Tests for basic functionality of {@link RestClient} against one single host: tests http requests being sent, headers, + * body, different status codes and corresponding responses/exceptions. + * Relies on a mock http client to intercept requests and return desired responses based on request path. + */ +public class RestClientSingleHostTests extends LuceneTestCase { + + private RestClient restClient; + private Header[] defaultHeaders; + private HttpHost httpHost; + private CloseableHttpClient httpClient; + private TrackingFailureListener failureListener; + + @Before + public void createRestClient() throws IOException { + httpClient = mock(CloseableHttpClient.class); + when(httpClient.execute(any(HttpHost.class), any(HttpRequest.class))).thenAnswer(new Answer() { + @Override + public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Throwable { + HttpUriRequest request = (HttpUriRequest) invocationOnMock.getArguments()[1]; + //return the desired status code or exception depending on the path + if (request.getURI().getPath().equals("/soe")) { + throw new SocketTimeoutException(); + } else if (request.getURI().getPath().equals("/coe")) { + throw new ConnectTimeoutException(); + } + int statusCode = Integer.parseInt(request.getURI().getPath().substring(1)); + StatusLine statusLine = new BasicStatusLine(new ProtocolVersion("http", 1, 1), statusCode, ""); + + CloseableHttpResponse httpResponse = new CloseableBasicHttpResponse(statusLine); + //return the same body that was sent + if (request instanceof HttpEntityEnclosingRequest) { + HttpEntity entity = ((HttpEntityEnclosingRequest) request).getEntity(); + if (entity != null) { + assertTrue("the entity is not repeatable, cannot set it to the response directly", entity.isRepeatable()); + httpResponse.setEntity(entity); + } + } + //return the same headers that were sent + httpResponse.setHeaders(request.getAllHeaders()); + return httpResponse; + } + }); + int numHeaders = RandomInts.randomIntBetween(random(), 0, 3); + defaultHeaders = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header-default" + (random().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); + defaultHeaders[i] = new BasicHeader(headerName, headerValue); + } + httpHost = new HttpHost("localhost", 9200); + restClient = RestClient.builder().setHosts(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders).build(); + failureListener = new TrackingFailureListener(); + restClient.setFailureListener(failureListener); + } + + /** + * Verifies the content of the {@link HttpRequest} that's internally created and passed through to the http client + */ + public void testInternalHttpRequest() throws Exception { + ArgumentCaptor requestArgumentCaptor = ArgumentCaptor.forClass(HttpUriRequest.class); + int times = 0; + for (String httpMethod : getHttpMethods()) { + HttpUriRequest expectedRequest = performRandomRequest(httpMethod); + verify(httpClient, times(++times)).execute(any(HttpHost.class), requestArgumentCaptor.capture()); + HttpUriRequest actualRequest = requestArgumentCaptor.getValue(); + assertEquals(expectedRequest.getURI(), actualRequest.getURI()); + assertEquals(expectedRequest.getClass(), actualRequest.getClass()); + assertArrayEquals(expectedRequest.getAllHeaders(), actualRequest.getAllHeaders()); + if (expectedRequest instanceof HttpEntityEnclosingRequest) { + HttpEntity expectedEntity = ((HttpEntityEnclosingRequest) expectedRequest).getEntity(); + if (expectedEntity != null) { + HttpEntity actualEntity = ((HttpEntityEnclosingRequest) actualRequest).getEntity(); + assertEquals(EntityUtils.toString(expectedEntity), EntityUtils.toString(actualEntity)); + } + } + } + } + + public void testSetNodes() throws IOException { + try { + restClient.setHosts((HttpHost[]) null); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try { + restClient.setHosts(); + fail("setHosts should have failed"); + } catch (IllegalArgumentException e) { + assertEquals("hosts must not be null nor empty", e.getMessage()); + } + try { + restClient.setHosts((HttpHost) null); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + try { + restClient.setHosts(new HttpHost("localhost", 9200), null, new HttpHost("localhost", 9201)); + fail("setHosts should have failed"); + } catch (NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); + } + } + + /** + * End to end test for ok status codes + */ + public void testOkStatusCodes() throws Exception { + for (String method : getHttpMethods()) { + for (int okStatusCode : getOkStatusCodes()) { + ElasticsearchResponse response = restClient.performRequest(method, "/" + okStatusCode, + Collections.emptyMap(), null); + assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); + } + } + failureListener.assertNotCalled(); + } + + /** + * End to end test for error status codes: they should cause an exception to be thrown, apart from 404 with HEAD requests + */ + public void testErrorStatusCodes() throws Exception { + for (String method : getHttpMethods()) { + //error status codes should cause an exception to be thrown + for (int errorStatusCode : getAllErrorStatusCodes()) { + try (ElasticsearchResponse response = restClient.performRequest(method, "/" + errorStatusCode, + Collections.emptyMap(), null)) { + if (method.equals("HEAD") && errorStatusCode == 404) { + //no exception gets thrown although we got a 404 + assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode)); + } else { + fail("request should have failed"); + } + } catch(ElasticsearchResponseException e) { + if (method.equals("HEAD") && errorStatusCode == 404) { + throw e; + } + assertThat(e.getElasticsearchResponse().getStatusLine().getStatusCode(), equalTo(errorStatusCode)); + } + if (errorStatusCode <= 500) { + failureListener.assertNotCalled(); + } else { + failureListener.assertCalled(httpHost); + } + } + } + } + + public void testIOExceptions() throws IOException { + for (String method : getHttpMethods()) { + //IOExceptions should be let bubble up + try { + restClient.performRequest(method, "/coe", Collections.emptyMap(), null); + fail("request should have failed"); + } catch(IOException e) { + assertThat(e, instanceOf(ConnectTimeoutException.class)); + } + failureListener.assertCalled(httpHost); + try { + restClient.performRequest(method, "/soe", Collections.emptyMap(), null); + fail("request should have failed"); + } catch(IOException e) { + assertThat(e, instanceOf(SocketTimeoutException.class)); + } + failureListener.assertCalled(httpHost); + } + } + + /** + * End to end test for request and response body. Exercises the mock http client ability to send back + * whatever body it has received. + */ + public void testBody() throws Exception { + String body = "{ \"field\": \"value\" }"; + StringEntity entity = new StringEntity(body); + for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) { + for (int okStatusCode : getOkStatusCodes()) { + try (ElasticsearchResponse response = restClient.performRequest(method, "/" + okStatusCode, + Collections.emptyMap(), entity)) { + assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); + assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); + } + } + for (int errorStatusCode : getAllErrorStatusCodes()) { + try { + restClient.performRequest(method, "/" + errorStatusCode, Collections.emptyMap(), entity); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode)); + assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); + } + } + } + for (String method : Arrays.asList("HEAD", "OPTIONS", "TRACE")) { + try { + restClient.performRequest(method, "/" + randomStatusCode(random()), + Collections.emptyMap(), entity); + fail("request should have failed"); + } catch(UnsupportedOperationException e) { + assertThat(e.getMessage(), equalTo(method + " with body is not supported")); + } + } + } + + public void testNullHeaders() throws Exception { + String method = randomHttpMethod(random()); + int statusCode = randomStatusCode(random()); + try { + restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), null, (Header[])null); + fail("request should have failed"); + } catch(NullPointerException e) { + assertEquals("request headers must not be null", e.getMessage()); + } + try { + restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), null, (Header)null); + fail("request should have failed"); + } catch(NullPointerException e) { + assertEquals("request header must not be null", e.getMessage()); + } + } + + public void testNullParams() throws Exception { + String method = randomHttpMethod(random()); + int statusCode = randomStatusCode(random()); + try { + restClient.performRequest(method, "/" + statusCode, null, null); + fail("request should have failed"); + } catch(NullPointerException e) { + assertEquals("params must not be null", e.getMessage()); + } + } + + /** + * End to end test for request and response headers. Exercises the mock http client ability to send back + * whatever headers it has received. + */ + public void testHeaders() throws Exception { + for (String method : getHttpMethods()) { + Map expectedHeaders = new HashMap<>(); + for (Header defaultHeader : defaultHeaders) { + expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue()); + } + int numHeaders = RandomInts.randomIntBetween(random(), 1, 5); + Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header" + (random().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); + headers[i] = new BasicHeader(headerName, headerValue); + expectedHeaders.put(headerName, headerValue); + } + + int statusCode = randomStatusCode(random()); + ElasticsearchResponse esResponse; + try (ElasticsearchResponse response = restClient.performRequest(method, "/" + statusCode, + Collections.emptyMap(), null, headers)) { + esResponse = response; + } catch(ElasticsearchResponseException e) { + esResponse = e.getElasticsearchResponse(); + } + assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); + for (Header responseHeader : esResponse.getHeaders()) { + String headerValue = expectedHeaders.remove(responseHeader.getName()); + assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue); + } + assertEquals("some headers that were sent weren't returned " + expectedHeaders, 0, expectedHeaders.size()); + } + } + + private HttpUriRequest performRandomRequest(String method) throws IOException, URISyntaxException { + String uriAsString = "/" + randomStatusCode(random()); + URIBuilder uriBuilder = new URIBuilder(uriAsString); + Map params = Collections.emptyMap(); + if (random().nextBoolean()) { + int numParams = RandomInts.randomIntBetween(random(), 1, 3); + params = new HashMap<>(numParams); + for (int i = 0; i < numParams; i++) { + String paramKey = "param-" + i; + String paramValue = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); + params.put(paramKey, paramValue); + uriBuilder.addParameter(paramKey, paramValue); + } + } + URI uri = uriBuilder.build(); + + HttpUriRequest request; + switch(method) { + case "DELETE": + request = new HttpDeleteWithEntity(uri); + break; + case "GET": + request = new HttpGetWithEntity(uri); + break; + case "HEAD": + request = new HttpHead(uri); + break; + case "OPTIONS": + request = new HttpOptions(uri); + break; + case "PATCH": + request = new HttpPatch(uri); + break; + case "POST": + request = new HttpPost(uri); + break; + case "PUT": + request = new HttpPut(uri); + break; + case "TRACE": + request = new HttpTrace(uri); + break; + default: + throw new UnsupportedOperationException("method not supported: " + method); + } + + HttpEntity entity = null; + if (request instanceof HttpEntityEnclosingRequest && random().nextBoolean()) { + entity = new StringEntity(RandomStrings.randomAsciiOfLengthBetween(random(), 10, 100)); + ((HttpEntityEnclosingRequest) request).setEntity(entity); + } + + Header[] headers = new Header[0]; + for (Header defaultHeader : defaultHeaders) { + //default headers are expected but not sent for each request + request.setHeader(defaultHeader); + } + if (random().nextBoolean()) { + int numHeaders = RandomInts.randomIntBetween(random(), 1, 5); + headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header" + (random().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); + BasicHeader basicHeader = new BasicHeader(headerName, headerValue); + headers[i] = basicHeader; + request.setHeader(basicHeader); + } + } + + try { + restClient.performRequest(method, uriAsString, params, entity, headers); + } catch(ElasticsearchResponseException e) { + //all good + } + return request; + } +} diff --git a/client/src/test/java/org/elasticsearch/client/RestClientTestUtil.java b/client/src/test/java/org/elasticsearch/client/RestClientTestUtil.java new file mode 100644 index 0000000000000..4d4aa00f4929f --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/RestClientTestUtil.java @@ -0,0 +1,84 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomPicks; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +final class RestClientTestUtil { + + private static final String[] HTTP_METHODS = new String[]{"DELETE", "HEAD", "GET", "OPTIONS", "PATCH", "POST", "PUT", "TRACE"}; + private static final List ALL_STATUS_CODES; + private static final List OK_STATUS_CODES = Arrays.asList(200, 201); + private static final List ALL_ERROR_STATUS_CODES; + private static List ERROR_NO_RETRY_STATUS_CODES = Arrays.asList(400, 401, 403, 404, 405, 500); + private static List ERROR_RETRY_STATUS_CODES = Arrays.asList(502, 503, 504); + + static { + ALL_ERROR_STATUS_CODES = new ArrayList<>(ERROR_RETRY_STATUS_CODES); + ALL_ERROR_STATUS_CODES.addAll(ERROR_NO_RETRY_STATUS_CODES); + ALL_STATUS_CODES = new ArrayList<>(ALL_ERROR_STATUS_CODES); + ALL_STATUS_CODES.addAll(OK_STATUS_CODES); + } + + private RestClientTestUtil() { + + } + + static String[] getHttpMethods() { + return HTTP_METHODS; + } + + static String randomHttpMethod(Random random) { + return RandomPicks.randomFrom(random, HTTP_METHODS); + } + + static int randomStatusCode(Random random) { + return RandomPicks.randomFrom(random, ALL_ERROR_STATUS_CODES); + } + + static int randomOkStatusCode(Random random) { + return RandomPicks.randomFrom(random, OK_STATUS_CODES); + } + + static int randomErrorNoRetryStatusCode(Random random) { + return RandomPicks.randomFrom(random, ERROR_NO_RETRY_STATUS_CODES); + } + + static int randomErrorRetryStatusCode(Random random) { + return RandomPicks.randomFrom(random, ERROR_RETRY_STATUS_CODES); + } + + static List getOkStatusCodes() { + return OK_STATUS_CODES; + } + + static List getAllErrorStatusCodes() { + return ALL_ERROR_STATUS_CODES; + } + + static List getAllStatusCodes() { + return ALL_STATUS_CODES; + } +} diff --git a/client/src/test/java/org/elasticsearch/client/TrackingFailureListener.java b/client/src/test/java/org/elasticsearch/client/TrackingFailureListener.java new file mode 100644 index 0000000000000..358428239237e --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/TrackingFailureListener.java @@ -0,0 +1,52 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import org.apache.http.HttpHost; + +import java.io.IOException; +import java.util.HashSet; +import java.util.Set; + +import static org.hamcrest.Matchers.containsInAnyOrder; +import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertThat; + +/** + * {@link org.elasticsearch.client.RestClient.FailureListener} impl that allows to track when it gets called + */ +class TrackingFailureListener extends RestClient.FailureListener { + private Set hosts = new HashSet<>(); + + @Override + public void onFailure(HttpHost host) throws IOException { + hosts.add(host); + } + + void assertCalled(HttpHost... hosts) { + assertEquals(hosts.length, this.hosts.size()); + assertThat(this.hosts, containsInAnyOrder(hosts)); + this.hosts.clear(); + } + + void assertNotCalled() { + assertEquals(0, hosts.size()); + } +} \ No newline at end of file From 13a27a34f8bed4fe7243d0c8c2ead5169009a2a0 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 15:56:43 +0200 Subject: [PATCH 051/103] [TEST] add RestClient integration test Relies on real HttpServer to test the actual interaction between RestClient and HttpClient, and how requests get sent in real life. --- .../client/RestClientIntegTests.java | 204 ++++++++++++++++++ 1 file changed, 204 insertions(+) create mode 100644 client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java diff --git a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java new file mode 100644 index 0000000000000..420ff6ed7f7d5 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -0,0 +1,204 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomStrings; +import com.sun.net.httpserver.Headers; +import com.sun.net.httpserver.HttpExchange; +import com.sun.net.httpserver.HttpHandler; +import com.sun.net.httpserver.HttpServer; +import org.apache.http.Consts; +import org.apache.http.Header; +import org.apache.http.HttpHost; +import org.apache.http.entity.StringEntity; +import org.apache.http.message.BasicHeader; +import org.apache.http.util.EntityUtils; +import org.apache.lucene.util.LuceneTestCase; +import org.junit.AfterClass; +import org.junit.BeforeClass; + +import java.io.IOException; +import java.io.InputStreamReader; +import java.io.OutputStream; +import java.net.InetSocketAddress; +import java.util.Arrays; +import java.util.Collections; +import java.util.HashMap; +import java.util.HashSet; +import java.util.List; +import java.util.Map; +import java.util.Set; + +import static org.elasticsearch.client.RestClientTestUtil.getAllStatusCodes; +import static org.elasticsearch.client.RestClientTestUtil.getHttpMethods; +import static org.elasticsearch.client.RestClientTestUtil.randomStatusCode; +import static org.hamcrest.CoreMatchers.equalTo; + +/** + * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. + * Works against a real http server, one single host. + */ +public class RestClientIntegTests extends LuceneTestCase { + + private static HttpServer httpServer; + private static RestClient restClient; + private static Header[] defaultHeaders; + + @BeforeClass + public static void startHttpServer() throws Exception { + httpServer = HttpServer.create(new InetSocketAddress(0), 0); + httpServer.start(); + //returns a different status code depending on the path + for (int statusCode : getAllStatusCodes()) { + createStatusCodeContext(httpServer, statusCode); + } + int numHeaders = RandomInts.randomIntBetween(random(), 0, 3); + defaultHeaders = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header-default" + (random().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); + defaultHeaders[i] = new BasicHeader(headerName, headerValue); + } + restClient = RestClient.builder().setDefaultHeaders(defaultHeaders) + .setHosts(new HttpHost(httpServer.getAddress().getHostName(), httpServer.getAddress().getPort())).build(); + } + + private static void createStatusCodeContext(HttpServer httpServer, final int statusCode) { + httpServer.createContext("/" + statusCode, new HttpHandler() { + @Override + public void handle(HttpExchange httpExchange) throws IOException { + StringBuilder body = new StringBuilder(); + try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) { + char[] buffer = new char[256]; + int read; + while ((read = reader.read(buffer)) != -1) { + body.append(buffer, 0, read); + } + } + Headers requestHeaders = httpExchange.getRequestHeaders(); + Headers responseHeaders = httpExchange.getResponseHeaders(); + for (Map.Entry> header : requestHeaders.entrySet()) { + responseHeaders.put(header.getKey(), header.getValue()); + } + httpExchange.getRequestBody().close(); + httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length()); + if (body.length() > 0) { + try (OutputStream out = httpExchange.getResponseBody()) { + out.write(body.toString().getBytes(Consts.UTF_8)); + } + } + httpExchange.close(); + } + }); + } + + @AfterClass + public static void stopHttpServers() throws IOException { + restClient.close(); + restClient = null; + httpServer.stop(0); + httpServer = null; + } + + /** + * End to end test for headers. We test it explicitly against a real http client as there are different ways + * to set/add headers to the {@link org.apache.http.client.HttpClient}. + * Exercises the test http server ability to send back whatever headers it received. + */ + public void testHeaders() throws Exception { + for (String method : getHttpMethods()) { + Set standardHeaders = new HashSet<>( + Arrays.asList("Accept-encoding", "Connection", "Host", "User-agent", "Date")); + if (method.equals("HEAD") == false) { + standardHeaders.add("Content-length"); + } + int numHeaders = RandomInts.randomIntBetween(random(), 1, 5); + Map expectedHeaders = new HashMap<>(); + for (Header defaultHeader : defaultHeaders) { + expectedHeaders.put(defaultHeader.getName(), defaultHeader.getValue()); + } + Header[] headers = new Header[numHeaders]; + for (int i = 0; i < numHeaders; i++) { + String headerName = "Header" + (random().nextBoolean() ? i : ""); + String headerValue = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); + headers[i] = new BasicHeader(headerName, headerValue); + expectedHeaders.put(headerName, headerValue); + } + + int statusCode = randomStatusCode(random()); + ElasticsearchResponse esResponse; + try (ElasticsearchResponse response = restClient.performRequest(method, "/" + statusCode, + Collections.emptyMap(), null, headers)) { + esResponse = response; + } catch(ElasticsearchResponseException e) { + esResponse = e.getElasticsearchResponse(); + } + assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); + for (Header responseHeader : esResponse.getHeaders()) { + if (responseHeader.getName().startsWith("Header")) { + String headerValue = expectedHeaders.remove(responseHeader.getName()); + assertNotNull("found response header [" + responseHeader.getName() + "] that wasn't originally sent", headerValue); + } else { + assertTrue("unknown header was returned " + responseHeader.getName(), + standardHeaders.remove(responseHeader.getName())); + } + } + assertEquals("some headers that were sent weren't returned: " + expectedHeaders, 0, expectedHeaders.size()); + assertEquals("some expected standard headers weren't returned: " + standardHeaders, 0, standardHeaders.size()); + } + } + + /** + * End to end test for delete with body. We test it explicitly as it is not supported + * out of the box by {@link org.apache.http.client.HttpClient}. + * Exercises the test http server ability to send back whatever body it received. + */ + public void testDeleteWithBody() throws Exception { + testBody("DELETE"); + } + + /** + * End to end test for get with body. We test it explicitly as it is not supported + * out of the box by {@link org.apache.http.client.HttpClient}. + * Exercises the test http server ability to send back whatever body it received. + */ + public void testGetWithBody() throws Exception { + testBody("GET"); + } + + private void testBody(String method) throws Exception { + String requestBody = "{ \"field\": \"value\" }"; + StringEntity entity = new StringEntity(requestBody); + ElasticsearchResponse esResponse; + String responseBody; + int statusCode = randomStatusCode(random()); + try (ElasticsearchResponse response = restClient.performRequest(method, "/" + statusCode, + Collections.emptyMap(), entity)) { + responseBody = EntityUtils.toString(response.getEntity()); + esResponse = response; + } catch(ElasticsearchResponseException e) { + responseBody = e.getResponseBody(); + esResponse = e.getElasticsearchResponse(); + } + assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); + assertEquals(requestBody, responseBody); + } +} From 4fa824f891f5f19597c22557627fcc4bed2de98b Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 16:42:07 +0200 Subject: [PATCH 052/103] [TEST] add a lot of forgotten try with resources to wrap ElasticsearchResponses --- .../http/netty/NettyHttpCompressionIT.java | 34 ++++++++------- .../plugins/ResponseHeaderPluginIT.java | 9 ++-- .../org/elasticsearch/rest/CorsNotSetIT.java | 20 +++++---- .../org/elasticsearch/rest/CorsRegexIT.java | 42 ++++++++++--------- .../rest/action/main/RestMainActionIT.java | 16 +++---- .../ContextAndHeaderTransportIT.java | 19 +++++---- 6 files changed, 78 insertions(+), 62 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java index 9e995f0a010d7..32a331d61f7b9 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java @@ -62,11 +62,12 @@ public void testCompressesResponseIfRequested() throws Exception { // we need to intercept early, otherwise internal logic in HttpClient will just remove the header and we cannot verify it ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); try (RestClient client = restClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { - ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, - new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING)); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertTrue(headerExtractor.hasContentEncodingHeader()); - assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue()); + try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING))) { + assertEquals(200, response.getStatusLine().getStatusCode()); + assertTrue(headerExtractor.hasContentEncodingHeader()); + assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue()); + } } } @@ -75,9 +76,10 @@ public void testUncompressedResponseByDefault() throws Exception { ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); CloseableHttpClient httpClient = HttpClients.custom().disableContentCompression().addInterceptorFirst(headerExtractor).build(); try (RestClient client = restClient(httpClient)) { - ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null); - assertEquals(200, response.getStatusLine().getStatusCode()); - assertFalse(headerExtractor.hasContentEncodingHeader()); + try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null)) { + assertEquals(200, response.getStatusLine().getStatusCode()); + assertFalse(headerExtractor.hasContentEncodingHeader()); + } } } @@ -87,9 +89,11 @@ public void testCanInterpretUncompressedRequest() throws Exception { // this disable content compression in both directions (request and response) CloseableHttpClient httpClient = HttpClients.custom().disableContentCompression().addInterceptorFirst(headerExtractor).build(); try (RestClient client = restClient(httpClient)) { - ElasticsearchResponse response = client.performRequest("POST", "/company/employees/1", Collections.emptyMap(), SAMPLE_DOCUMENT); - assertEquals(201, response.getStatusLine().getStatusCode()); - assertFalse(headerExtractor.hasContentEncodingHeader()); + try (ElasticsearchResponse response = client.performRequest("POST", "/company/employees/1", + Collections.emptyMap(), SAMPLE_DOCUMENT)) { + assertEquals(201, response.getStatusLine().getStatusCode()); + assertFalse(headerExtractor.hasContentEncodingHeader()); + } } } @@ -98,9 +102,11 @@ public void testCanInterpretCompressedRequest() throws Exception { ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); // we don't call #disableContentCompression() hence the client will send the content compressed try (RestClient client = restClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { - ElasticsearchResponse response = client.performRequest("POST", "/company/employees/2", Collections.emptyMap(), SAMPLE_DOCUMENT); - assertEquals(201, response.getStatusLine().getStatusCode()); - assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue()); + try (ElasticsearchResponse response = client.performRequest("POST", "/company/employees/2", + Collections.emptyMap(), SAMPLE_DOCUMENT)) { + assertEquals(201, response.getStatusLine().getStatusCode()); + assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue()); + } } } diff --git a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java index 4ae9e3912b28d..4b905d7b1a80c 100644 --- a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java +++ b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java @@ -66,10 +66,11 @@ public void testThatSettingHeadersWorks() throws Exception { assertThat(response.getFirstHeader("Secret"), equalTo("required")); } - ElasticsearchResponse authResponse = client.performRequest("GET", "/_protected", Collections.emptyMap(), null, - new BasicHeader("Secret", "password")); - assertThat(authResponse, hasStatus(OK)); - assertThat(authResponse.getFirstHeader("Secret"), equalTo("granted")); + try (ElasticsearchResponse authResponse = client.performRequest("GET", "/_protected", Collections.emptyMap(), null, + new BasicHeader("Secret", "password"))) { + assertThat(authResponse, hasStatus(OK)); + assertThat(authResponse.getFirstHeader("Secret"), equalTo("granted")); + } } } } diff --git a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java index e407bccf872be..c6c23da102fee 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java @@ -48,20 +48,22 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws Exception { String corsValue = "http://localhost:9200"; try (RestClient restClient = restClient()) { - ElasticsearchResponse response = restClient.performRequest("GET", "/", Collections.emptyMap(), null, - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); - assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); - assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); + try (ElasticsearchResponse response = restClient.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue))) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); + } } } public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws Exception { try (RestClient restClient = restClient()) { - ElasticsearchResponse response = restClient.performRequest("GET", "/", Collections.emptyMap(), null); - assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); - assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); + try (ElasticsearchResponse response = restClient.performRequest("GET", "/", Collections.emptyMap(), null)) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); + } } } } diff --git a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java index 729fed8e96d86..49bfd82e6136a 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java @@ -63,15 +63,16 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testThatRegularExpressionWorksOnMatch() throws Exception { String corsValue = "http://localhost:9200"; try (RestClient client = restClient()) { - ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); - assertResponseWithOriginheader(response, corsValue); - + try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue))) { + assertResponseWithOriginheader(response, corsValue); + } corsValue = "https://localhost:9200"; - response = client.performRequest("GET", "/", Collections.emptyMap(), null, - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue)); - assertResponseWithOriginheader(response, corsValue); - assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), is("true")); + try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue));) { + assertResponseWithOriginheader(response, corsValue); + assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), is("true")); + } } } @@ -90,29 +91,32 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws Excepti public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws Exception { try (RestClient client = restClient()) { - ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, - new BasicHeader("User-Agent", "Mozilla Bar")); - assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"))) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + } } } public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws Exception { try (RestClient client = restClient()) { - ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null); - assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null)) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + } } } public void testThatPreFlightRequestWorksOnMatch() throws Exception { String corsValue = "http://localhost:9200"; try (RestClient client = restClient()) { - ElasticsearchResponse response = client.performRequest("OPTIONS", "/", Collections.emptyMap(), null, + try (ElasticsearchResponse response = client.performRequest("OPTIONS", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue), - new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET")); - assertResponseWithOriginheader(response, corsValue); - assertNotNull(response.getFirstHeader("Access-Control-Allow-Methods")); + new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET"));) { + assertResponseWithOriginheader(response, corsValue); + assertNotNull(response.getFirstHeader("Access-Control-Allow-Methods")); + } } } diff --git a/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java b/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java index 9c2b0284ef7ee..0723a9573b433 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java +++ b/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java @@ -42,18 +42,20 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testHeadRequest() throws IOException { try (RestClient client = restClient()) { - ElasticsearchResponse response = client.performRequest("HEAD", "/", Collections.emptyMap(), null); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertNull(response.getEntity()); + try (ElasticsearchResponse response = client.performRequest("HEAD", "/", Collections.emptyMap(), null)) { + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertNull(response.getEntity()); + } } } public void testGetRequest() throws IOException { try (RestClient client = restClient()) { - ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null); - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertNotNull(response.getEntity()); - assertThat(EntityUtils.toString(response.getEntity()), containsString("cluster_name")); + try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null)) { + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertNotNull(response.getEntity()); + assertThat(EntityUtils.toString(response.getEntity()), containsString("cluster_name")); + } } } } diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index aae15dc778a68..e2139d7321854 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -221,15 +221,16 @@ public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws Exception { } try (RestClient client = restClient()) { - ElasticsearchResponse response = client.performRequest("GET", "/" + queryIndex + "/_search", Collections.emptyMap(), null, - new BasicHeader(randomHeaderKey, randomHeaderValue), new BasicHeader(relevantHeaderName, randomHeaderValue)); - assertThat(response, hasStatus(OK)); - List searchRequests = getRequests(SearchRequest.class); - assertThat(searchRequests, hasSize(greaterThan(0))); - for (RequestAndHeaders requestAndHeaders : searchRequests) { - assertThat(requestAndHeaders.headers.containsKey(relevantHeaderName), is(true)); - // was not specified, thus is not included - assertThat(requestAndHeaders.headers.containsKey(randomHeaderKey), is(false)); + try (ElasticsearchResponse response = client.performRequest("GET", "/" + queryIndex + "/_search", Collections.emptyMap(), null, + new BasicHeader(randomHeaderKey, randomHeaderValue), new BasicHeader(relevantHeaderName, randomHeaderValue))) { + assertThat(response, hasStatus(OK)); + List searchRequests = getRequests(SearchRequest.class); + assertThat(searchRequests, hasSize(greaterThan(0))); + for (RequestAndHeaders requestAndHeaders : searchRequests) { + assertThat(requestAndHeaders.headers.containsKey(relevantHeaderName), is(true)); + // was not specified, thus is not included + assertThat(requestAndHeaders.headers.containsKey(randomHeaderKey), is(false)); + } } } } From f5825b86e631905f1108f72ae9466f2bc42119b9 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 16:56:29 +0200 Subject: [PATCH 053/103] [TEST] restore needed LogManager.reset to prevent logging to sysout --- .../org/elasticsearch/client/sniff/SnifferBuilderTests.java | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index ff3cad34bfac1..868c57881200a 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -26,9 +26,14 @@ import org.elasticsearch.client.RestClient; import java.util.Arrays; +import java.util.logging.LogManager; public class SnifferBuilderTests extends LuceneTestCase { + static { + LogManager.getLogManager().reset(); + } + public void testBuild() throws Exception { try { From 23a94bb9745aeba8bc8c04c02c874deaf89ae621 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 18:00:54 +0200 Subject: [PATCH 054/103] [TEST] create standard RestClient at first request and reuse it A RestClient instance is now created whenever EsIntegTestCase#getRestClient is invoked for the first time. It is then kept until the cluster is cleared (depending on the cluster scope of the test). Renamed other two restClient methods to createRestClient, as that instance needs to be closed and managed in the tests. --- .../http/netty/NettyHttpCompressionIT.java | 8 +-- .../DetailedErrorsDisabledIT.java | 5 +- .../DetailedErrorsEnabledIT.java | 37 ++++++------ .../plugins/ResponseHeaderPluginIT.java | 27 ++++----- .../org/elasticsearch/rest/CorsNotSetIT.java | 23 +++----- .../org/elasticsearch/rest/CorsRegexIT.java | 59 ++++++++----------- .../rest/action/main/RestMainActionIT.java | 19 +++--- .../ContextAndHeaderTransportIT.java | 21 +++---- .../elasticsearch/test/ESIntegTestCase.java | 25 ++++++-- 9 files changed, 104 insertions(+), 120 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java index 32a331d61f7b9..fb616a87810ba 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java @@ -61,7 +61,7 @@ public void testCompressesResponseIfRequested() throws Exception { ensureGreen(); // we need to intercept early, otherwise internal logic in HttpClient will just remove the header and we cannot verify it ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); - try (RestClient client = restClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { + try (RestClient client = createRestClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING))) { assertEquals(200, response.getStatusLine().getStatusCode()); @@ -75,7 +75,7 @@ public void testUncompressedResponseByDefault() throws Exception { ensureGreen(); ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); CloseableHttpClient httpClient = HttpClients.custom().disableContentCompression().addInterceptorFirst(headerExtractor).build(); - try (RestClient client = restClient(httpClient)) { + try (RestClient client = createRestClient(httpClient)) { try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null)) { assertEquals(200, response.getStatusLine().getStatusCode()); assertFalse(headerExtractor.hasContentEncodingHeader()); @@ -88,7 +88,7 @@ public void testCanInterpretUncompressedRequest() throws Exception { ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); // this disable content compression in both directions (request and response) CloseableHttpClient httpClient = HttpClients.custom().disableContentCompression().addInterceptorFirst(headerExtractor).build(); - try (RestClient client = restClient(httpClient)) { + try (RestClient client = createRestClient(httpClient)) { try (ElasticsearchResponse response = client.performRequest("POST", "/company/employees/1", Collections.emptyMap(), SAMPLE_DOCUMENT)) { assertEquals(201, response.getStatusLine().getStatusCode()); @@ -101,7 +101,7 @@ public void testCanInterpretCompressedRequest() throws Exception { ensureGreen(); ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); // we don't call #disableContentCompression() hence the client will send the content compressed - try (RestClient client = restClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { + try (RestClient client = createRestClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { try (ElasticsearchResponse response = client.performRequest("POST", "/company/employees/2", Collections.emptyMap(), SAMPLE_DOCUMENT)) { assertEquals(201, response.getStatusLine().getStatusCode()); diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java index ba8840bbc2c21..c23b6fc1239b8 100644 --- a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java +++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.client.ElasticsearchResponse; import org.elasticsearch.client.ElasticsearchResponseException; -import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.http.HttpTransportSettings; @@ -49,8 +48,8 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testThatErrorTraceParamReturns400() throws Exception { - try (RestClient restClient = restClient()) { - restClient.performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"), null); + try { + getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"), null); fail("request should have failed"); } catch(ElasticsearchResponseException e) { ElasticsearchResponse response = e.getElasticsearchResponse(); diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java index a040c31299da9..df1cb5a6308f8 100644 --- a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java +++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java @@ -21,7 +21,6 @@ import org.elasticsearch.client.ElasticsearchResponse; import org.elasticsearch.client.ElasticsearchResponseException; -import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; @@ -47,26 +46,24 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testThatErrorTraceWorksByDefault() throws Exception { - try (RestClient restClient = restClient()) { - try { - restClient.performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"), null); - fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); - assertThat(response.getFirstHeader("Content-Type"), containsString("application/json")); - assertThat(e.getResponseBody(), containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " + - "nested: ActionRequestValidationException[Validation Failed: 1:")); - } + try { + getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getFirstHeader("Content-Type"), containsString("application/json")); + assertThat(e.getResponseBody(), containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " + + "nested: ActionRequestValidationException[Validation Failed: 1:")); + } - try { - restClient.performRequest("DELETE", "/", Collections.emptyMap(), null); - fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); - assertThat(response.getFirstHeader("Content-Type"), containsString("application/json")); - assertThat(e.getResponseBody(), not(containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " - + "nested: ActionRequestValidationException[Validation Failed: 1:"))); - } + try { + getRestClient().performRequest("DELETE", "/", Collections.emptyMap(), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response.getFirstHeader("Content-Type"), containsString("application/json")); + assertThat(e.getResponseBody(), not(containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " + + "nested: ActionRequestValidationException[Validation Failed: 1:"))); } } } diff --git a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java index 4b905d7b1a80c..0fd21651ac018 100644 --- a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java +++ b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java @@ -21,7 +21,6 @@ import org.apache.http.message.BasicHeader; import org.elasticsearch.client.ElasticsearchResponse; import org.elasticsearch.client.ElasticsearchResponseException; -import org.elasticsearch.client.RestClient; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.responseheader.TestResponseHeaderPlugin; import org.elasticsearch.test.ESIntegTestCase; @@ -56,21 +55,19 @@ protected Collection> nodePlugins() { public void testThatSettingHeadersWorks() throws Exception { ensureGreen(); - try (RestClient client = restClient()) { - try { - client.performRequest("GET", "/_protected", Collections.emptyMap(), null); - fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); - assertThat(response, hasStatus(UNAUTHORIZED)); - assertThat(response.getFirstHeader("Secret"), equalTo("required")); - } + try { + getRestClient().performRequest("GET", "/_protected", Collections.emptyMap(), null); + fail("request should have failed"); + } catch(ElasticsearchResponseException e) { + ElasticsearchResponse response = e.getElasticsearchResponse(); + assertThat(response, hasStatus(UNAUTHORIZED)); + assertThat(response.getFirstHeader("Secret"), equalTo("required")); + } - try (ElasticsearchResponse authResponse = client.performRequest("GET", "/_protected", Collections.emptyMap(), null, - new BasicHeader("Secret", "password"))) { - assertThat(authResponse, hasStatus(OK)); - assertThat(authResponse.getFirstHeader("Secret"), equalTo("granted")); - } + try (ElasticsearchResponse authResponse = getRestClient().performRequest("GET", "/_protected", Collections.emptyMap(), null, + new BasicHeader("Secret", "password"))) { + assertThat(authResponse, hasStatus(OK)); + assertThat(authResponse.getFirstHeader("Secret"), equalTo("granted")); } } } diff --git a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java index c6c23da102fee..0f4ccd1a6cf5b 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java @@ -21,7 +21,6 @@ import org.apache.http.message.BasicHeader; import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; @@ -47,23 +46,19 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws Exception { String corsValue = "http://localhost:9200"; - try (RestClient restClient = restClient()) { - try (ElasticsearchResponse response = restClient.performRequest("GET", "/", Collections.emptyMap(), null, - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue))) { - assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); - assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); - } + try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue))) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); } } public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws Exception { - try (RestClient restClient = restClient()) { - try (ElasticsearchResponse response = restClient.performRequest("GET", "/", Collections.emptyMap(), null)) { - assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); - assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); - } + try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); } } } diff --git a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java index 49bfd82e6136a..f6012702ce87b 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java @@ -21,7 +21,6 @@ import org.apache.http.message.BasicHeader; import org.elasticsearch.client.ElasticsearchResponse; import org.elasticsearch.client.ElasticsearchResponseException; -import org.elasticsearch.client.RestClient; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; @@ -62,23 +61,21 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testThatRegularExpressionWorksOnMatch() throws Exception { String corsValue = "http://localhost:9200"; - try (RestClient client = restClient()) { - try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue))) { - assertResponseWithOriginheader(response, corsValue); - } - corsValue = "https://localhost:9200"; - try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue));) { - assertResponseWithOriginheader(response, corsValue); - assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), is("true")); - } + try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue))) { + assertResponseWithOriginheader(response, corsValue); + } + corsValue = "https://localhost:9200"; + try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue));) { + assertResponseWithOriginheader(response, corsValue); + assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), is("true")); } } public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws Exception { - try (RestClient client = restClient()) { - client.performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), + try { + getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", "http://evil-host:9200")); fail("request should have failed"); } catch(ElasticsearchResponseException e) { @@ -90,39 +87,33 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws Excepti } public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws Exception { - try (RestClient client = restClient()) { - try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, - new BasicHeader("User-Agent", "Mozilla Bar"))) { - assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); - } + try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"))) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); } } public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws Exception { - try (RestClient client = restClient()) { - try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null)) { - assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); - } + try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { + assertThat(response.getStatusLine().getStatusCode(), is(200)); + assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); } } public void testThatPreFlightRequestWorksOnMatch() throws Exception { String corsValue = "http://localhost:9200"; - try (RestClient client = restClient()) { - try (ElasticsearchResponse response = client.performRequest("OPTIONS", "/", Collections.emptyMap(), null, - new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue), - new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET"));) { - assertResponseWithOriginheader(response, corsValue); - assertNotNull(response.getFirstHeader("Access-Control-Allow-Methods")); - } + try (ElasticsearchResponse response = getRestClient().performRequest("OPTIONS", "/", Collections.emptyMap(), null, + new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue), + new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET"));) { + assertResponseWithOriginheader(response, corsValue); + assertNotNull(response.getFirstHeader("Access-Control-Allow-Methods")); } } public void testThatPreFlightRequestReturnsNullOnNonMatch() throws Exception { - try (RestClient client = restClient()) { - client.performRequest("OPTIONS", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), + try { + getRestClient().performRequest("OPTIONS", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", "http://evil-host:9200"), new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET")); fail("request should have failed"); diff --git a/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java b/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java index 0723a9573b433..edf9b5b2c0633 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java +++ b/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java @@ -20,7 +20,6 @@ import org.apache.http.util.EntityUtils; import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; @@ -41,21 +40,17 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testHeadRequest() throws IOException { - try (RestClient client = restClient()) { - try (ElasticsearchResponse response = client.performRequest("HEAD", "/", Collections.emptyMap(), null)) { - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertNull(response.getEntity()); - } + try (ElasticsearchResponse response = getRestClient().performRequest("HEAD", "/", Collections.emptyMap(), null)) { + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertNull(response.getEntity()); } } public void testGetRequest() throws IOException { - try (RestClient client = restClient()) { - try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null)) { - assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); - assertNotNull(response.getEntity()); - assertThat(EntityUtils.toString(response.getEntity()), containsString("cluster_name")); - } + try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); + assertNotNull(response.getEntity()); + assertThat(EntityUtils.toString(response.getEntity()), containsString("cluster_name")); } } } diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index e2139d7321854..ecee90f313ba3 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -33,7 +33,6 @@ import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.client.Client; import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.RestClient; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Module; @@ -220,17 +219,15 @@ public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws Exception { restController.registerRelevantHeaders(relevantHeaderName); } - try (RestClient client = restClient()) { - try (ElasticsearchResponse response = client.performRequest("GET", "/" + queryIndex + "/_search", Collections.emptyMap(), null, - new BasicHeader(randomHeaderKey, randomHeaderValue), new BasicHeader(relevantHeaderName, randomHeaderValue))) { - assertThat(response, hasStatus(OK)); - List searchRequests = getRequests(SearchRequest.class); - assertThat(searchRequests, hasSize(greaterThan(0))); - for (RequestAndHeaders requestAndHeaders : searchRequests) { - assertThat(requestAndHeaders.headers.containsKey(relevantHeaderName), is(true)); - // was not specified, thus is not included - assertThat(requestAndHeaders.headers.containsKey(randomHeaderKey), is(false)); - } + try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/" + queryIndex + "/_search", Collections.emptyMap(), null, + new BasicHeader(randomHeaderKey, randomHeaderValue), new BasicHeader(relevantHeaderName, randomHeaderValue))) { + assertThat(response, hasStatus(OK)); + List searchRequests = getRequests(SearchRequest.class); + assertThat(searchRequests, hasSize(greaterThan(0))); + for (RequestAndHeaders requestAndHeaders : searchRequests) { + assertThat(requestAndHeaders.headers.containsKey(relevantHeaderName), is(true)); + // was not specified, thus is not included + assertThat(requestAndHeaders.headers.containsKey(randomHeaderKey), is(false)); } } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index 67f2a65492867..ab25d25fac035 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -314,6 +314,7 @@ public abstract class ESIntegTestCase extends ESTestCase { * By default if no {@link ClusterScope} is configured this will hold a reference to the suite cluster. */ private static TestCluster currentCluster; + private static RestClient restClient = null; private static final double TRANSPORT_CLIENT_RATIO = transportClientRatio(); @@ -510,6 +511,10 @@ private static void clearClusters() throws IOException { IOUtils.close(clusters.values()); clusters.clear(); } + if (restClient != null) { + restClient.close(); + restClient = null; + } } protected final void afterInternal(boolean afterClass) throws Exception { @@ -2033,15 +2038,24 @@ protected Settings prepareBackwardsDataDir(Path backwardsIndex, Object... settin return builder.build(); } - protected static RestClient restClient() { - return restClient(null); + /** + * Returns an instance of {@link RestClient} pointing to the current test cluster. + * Creates a new client if the method is invoked for the first time in the context of the current test scope. + * The returned client gets automatically closed when needed, it shouldn't be closed as part of tests otherwise + * it cannot be reused by other tests anymore. + */ + protected synchronized static RestClient getRestClient() { + if (restClient == null) { + restClient = createRestClient(null); + } + return restClient; } - protected static RestClient restClient(CloseableHttpClient httpClient) { - return restClient(httpClient, "http"); + protected static RestClient createRestClient(CloseableHttpClient httpClient) { + return createRestClient(httpClient, "http"); } - protected static RestClient restClient(CloseableHttpClient httpClient, String protocol) { + protected static RestClient createRestClient(CloseableHttpClient httpClient, String protocol) { final NodesInfoResponse nodeInfos = client().admin().cluster().prepareNodesInfo().get(); final List nodes = nodeInfos.getNodes(); assertFalse(nodeInfos.hasFailures()); @@ -2054,7 +2068,6 @@ protected static RestClient restClient(CloseableHttpClient httpClient, String pr hosts.add(new HttpHost(NetworkAddress.format(address.getAddress()), address.getPort(), protocol)); } } - RestClient.Builder builder = RestClient.builder().setHosts(hosts.toArray(new HttpHost[hosts.size()])); if (httpClient != null) { builder.setHttpClient(httpClient); From c48b7a7a97a629e3272583ff511d9f960d63e4a5 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 18:08:37 +0200 Subject: [PATCH 055/103] [TEST] create standard RestClient at first request and reuse it A RestClient instance is now created whenever EsIntegTestCase#getRestClient is invoked for the first time. It is then kept until the cluster is cleared (depending on the cluster scope of the test). Renamed other two restClient methods to createRestClient, as that instance needs to be closed and managed in the tests. --- .../elasticsearch/transport/ContextAndHeaderTransportIT.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index ecee90f313ba3..6dd6114ce795a 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -219,7 +219,8 @@ public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws Exception { restController.registerRelevantHeaders(relevantHeaderName); } - try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/" + queryIndex + "/_search", Collections.emptyMap(), null, + try (ElasticsearchResponse response = getRestClient().performRequest( + "GET", "/" + queryIndex + "/_search", Collections.emptyMap(), null, new BasicHeader(randomHeaderKey, randomHeaderValue), new BasicHeader(relevantHeaderName, randomHeaderValue))) { assertThat(response, hasStatus(OK)); List searchRequests = getRequests(SearchRequest.class); From 29eee328fe8b7baab0f7c68c89a6c92c915dec41 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 18:23:52 +0200 Subject: [PATCH 056/103] [TEST] expand RequestLoggerTests to all the supported http methods --- .../elasticsearch/client/RequestLoggerTests.java | 14 +++++++++++++- 1 file changed, 13 insertions(+), 1 deletion(-) diff --git a/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java index 89fa30c7e0867..64bddf88af2ca 100644 --- a/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java +++ b/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java @@ -24,9 +24,12 @@ import org.apache.http.HttpHost; import org.apache.http.ProtocolVersion; import org.apache.http.client.methods.HttpHead; +import org.apache.http.client.methods.HttpOptions; +import org.apache.http.client.methods.HttpPatch; import org.apache.http.client.methods.HttpPost; import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpRequestBase; +import org.apache.http.client.methods.HttpTrace; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHttpResponse; import org.apache.http.message.BasicStatusLine; @@ -46,7 +49,7 @@ public void testTraceRequest() throws IOException, URISyntaxException { URI uri = new URI("/index/type/_api"); HttpRequestBase request; - int requestType = RandomInts.randomIntBetween(random(), 0, 4); + int requestType = RandomInts.randomIntBetween(random(), 0, 7); switch(requestType) { case 0: request = new HttpGetWithEntity(uri); @@ -63,6 +66,15 @@ public void testTraceRequest() throws IOException, URISyntaxException { case 4: request = new HttpHead(uri); break; + case 5: + request = new HttpTrace(uri); + break; + case 6: + request = new HttpOptions(uri); + break; + case 7: + request = new HttpPatch(uri); + break; default: throw new UnsupportedOperationException(); } From f17f0f9247360276652a76098daa3de8de8ed1e5 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 18:28:31 +0200 Subject: [PATCH 057/103] rename ElasticsearchResponse#getFirstHeader to getHeader --- .../client/ElasticsearchResponse.java | 2 +- .../detailederrors/DetailedErrorsDisabledIT.java | 2 +- .../detailederrors/DetailedErrorsEnabledIT.java | 4 ++-- .../plugins/ResponseHeaderPluginIT.java | 4 ++-- .../org/elasticsearch/rest/CorsNotSetIT.java | 8 ++++---- .../java/org/elasticsearch/rest/CorsRegexIT.java | 16 ++++++++-------- .../test/rest/client/RestTestResponse.java | 2 +- 7 files changed, 19 insertions(+), 19 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java index 2d25851b9696b..037f7412f66e9 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java +++ b/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java @@ -83,7 +83,7 @@ public Header[] getHeaders() { * If there is more than one matching header in the message the first element is returned. * If there is no matching header in the message null is returned. */ - public String getFirstHeader(String name) { + public String getHeader(String name) { Header header = response.getFirstHeader(name); if (header == null) { return null; diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java index c23b6fc1239b8..fb38f774c3d1f 100644 --- a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java +++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java @@ -53,7 +53,7 @@ public void testThatErrorTraceParamReturns400() throws Exception { fail("request should have failed"); } catch(ElasticsearchResponseException e) { ElasticsearchResponse response = e.getElasticsearchResponse(); - assertThat(response.getFirstHeader("Content-Type"), is("application/json")); + assertThat(response.getHeader("Content-Type"), is("application/json")); assertThat(e.getResponseBody(), is("{\"error\":\"error traces in responses are disabled.\"}")); assertThat(response.getStatusLine().getStatusCode(), is(400)); } diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java index df1cb5a6308f8..39aeb00d05d1a 100644 --- a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java +++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java @@ -51,7 +51,7 @@ public void testThatErrorTraceWorksByDefault() throws Exception { fail("request should have failed"); } catch(ElasticsearchResponseException e) { ElasticsearchResponse response = e.getElasticsearchResponse(); - assertThat(response.getFirstHeader("Content-Type"), containsString("application/json")); + assertThat(response.getHeader("Content-Type"), containsString("application/json")); assertThat(e.getResponseBody(), containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " + "nested: ActionRequestValidationException[Validation Failed: 1:")); } @@ -61,7 +61,7 @@ public void testThatErrorTraceWorksByDefault() throws Exception { fail("request should have failed"); } catch(ElasticsearchResponseException e) { ElasticsearchResponse response = e.getElasticsearchResponse(); - assertThat(response.getFirstHeader("Content-Type"), containsString("application/json")); + assertThat(response.getHeader("Content-Type"), containsString("application/json")); assertThat(e.getResponseBody(), not(containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " + "nested: ActionRequestValidationException[Validation Failed: 1:"))); } diff --git a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java index 0fd21651ac018..2f95fe202327a 100644 --- a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java +++ b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java @@ -61,13 +61,13 @@ public void testThatSettingHeadersWorks() throws Exception { } catch(ElasticsearchResponseException e) { ElasticsearchResponse response = e.getElasticsearchResponse(); assertThat(response, hasStatus(UNAUTHORIZED)); - assertThat(response.getFirstHeader("Secret"), equalTo("required")); + assertThat(response.getHeader("Secret"), equalTo("required")); } try (ElasticsearchResponse authResponse = getRestClient().performRequest("GET", "/_protected", Collections.emptyMap(), null, new BasicHeader("Secret", "password"))) { assertThat(authResponse, hasStatus(OK)); - assertThat(authResponse.getFirstHeader("Secret"), equalTo("granted")); + assertThat(authResponse.getHeader("Secret"), equalTo("granted")); } } } diff --git a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java index 0f4ccd1a6cf5b..2a38f3bcd3acb 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java @@ -49,16 +49,16 @@ public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws Except try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue))) { assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); - assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); + assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue()); } } public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws Exception { try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); - assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), nullValue()); + assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue()); } } } diff --git a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java index f6012702ce87b..4da0b675d02d3 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java @@ -69,7 +69,7 @@ public void testThatRegularExpressionWorksOnMatch() throws Exception { try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue));) { assertResponseWithOriginheader(response, corsValue); - assertThat(response.getFirstHeader("Access-Control-Allow-Credentials"), is("true")); + assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); } } @@ -82,7 +82,7 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws Excepti ElasticsearchResponse response = e.getElasticsearchResponse(); // a rejected origin gets a FORBIDDEN - 403 assertThat(response.getStatusLine().getStatusCode(), is(403)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } } @@ -90,14 +90,14 @@ public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws E try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"))) { assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } } public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws Exception { try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } } @@ -107,7 +107,7 @@ public void testThatPreFlightRequestWorksOnMatch() throws Exception { new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue), new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET"));) { assertResponseWithOriginheader(response, corsValue); - assertNotNull(response.getFirstHeader("Access-Control-Allow-Methods")); + assertNotNull(response.getHeader("Access-Control-Allow-Methods")); } } @@ -121,13 +121,13 @@ public void testThatPreFlightRequestReturnsNullOnNonMatch() throws Exception { ElasticsearchResponse response = e.getElasticsearchResponse(); // a rejected origin gets a FORBIDDEN - 403 assertThat(response.getStatusLine().getStatusCode(), is(403)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), nullValue()); - assertThat(response.getFirstHeader("Access-Control-Allow-Methods"), nullValue()); + assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); + assertThat(response.getHeader("Access-Control-Allow-Methods"), nullValue()); } } protected static void assertResponseWithOriginheader(ElasticsearchResponse response, String expectedCorsHeader) { assertThat(response.getStatusLine().getStatusCode(), is(200)); - assertThat(response.getFirstHeader("Access-Control-Allow-Origin"), is(expectedCorsHeader)); + assertThat(response.getHeader("Access-Control-Allow-Origin"), is(expectedCorsHeader)); } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java index abcae6b26a2c3..5406fb7944344 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java @@ -125,7 +125,7 @@ public Object evaluate(String path, Stash stash) throws IOException { } private boolean isJson() { - String contentType = response.getFirstHeader("Content-Type"); + String contentType = response.getHeader("Content-Type"); return contentType != null && contentType.contains("application/json"); } From b891c46657a9d2e031395172f00464f1ac91c90d Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 23:25:17 +0200 Subject: [PATCH 058/103] [TEST] remove status matcher and hasStatus assertion All it does is checking the status code of a response, which can be done with a single line in each test --- .../plugins/ResponseHeaderPluginIT.java | 7 ++--- .../ContextAndHeaderTransportIT.java | 5 ++-- .../hamcrest/ElasticsearchAssertions.java | 5 ---- .../test/hamcrest/ElasticsearchMatchers.java | 26 ------------------- 4 files changed, 4 insertions(+), 39 deletions(-) diff --git a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java index 2f95fe202327a..6b4fdee47c694 100644 --- a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java +++ b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java @@ -30,9 +30,6 @@ import java.util.Collection; import java.util.Collections; -import static org.elasticsearch.rest.RestStatus.OK; -import static org.elasticsearch.rest.RestStatus.UNAUTHORIZED; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasStatus; import static org.hamcrest.Matchers.equalTo; /** @@ -60,13 +57,13 @@ public void testThatSettingHeadersWorks() throws Exception { fail("request should have failed"); } catch(ElasticsearchResponseException e) { ElasticsearchResponse response = e.getElasticsearchResponse(); - assertThat(response, hasStatus(UNAUTHORIZED)); + assertThat(response.getStatusLine().getStatusCode(), equalTo(401)); assertThat(response.getHeader("Secret"), equalTo("required")); } try (ElasticsearchResponse authResponse = getRestClient().performRequest("GET", "/_protected", Collections.emptyMap(), null, new BasicHeader("Secret", "password"))) { - assertThat(authResponse, hasStatus(OK)); + assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200)); assertThat(authResponse.getHeader("Secret"), equalTo("granted")); } } diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index 6dd6114ce795a..9d25414f4d8e6 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -63,12 +63,11 @@ import static org.elasticsearch.cluster.metadata.IndexMetaData.SETTING_NUMBER_OF_SHARDS; import static org.elasticsearch.common.xcontent.XContentFactory.jsonBuilder; -import static org.elasticsearch.rest.RestStatus.OK; import static org.elasticsearch.test.ESIntegTestCase.Scope.SUITE; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertAcked; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertHitCount; import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.assertNoFailures; -import static org.elasticsearch.test.hamcrest.ElasticsearchAssertions.hasStatus; +import static org.hamcrest.Matchers.equalTo; import static org.hamcrest.Matchers.greaterThan; import static org.hamcrest.Matchers.hasSize; import static org.hamcrest.Matchers.is; @@ -222,7 +221,7 @@ public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws Exception { try (ElasticsearchResponse response = getRestClient().performRequest( "GET", "/" + queryIndex + "/_search", Collections.emptyMap(), null, new BasicHeader(randomHeaderKey, randomHeaderValue), new BasicHeader(relevantHeaderName, randomHeaderValue))) { - assertThat(response, hasStatus(OK)); + assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); List searchRequests = getRequests(SearchRequest.class); assertThat(searchRequests, hasSize(greaterThan(0))); for (RequestAndHeaders requestAndHeaders : searchRequests) { diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java index 0fe65ccf7520e..51d15e019a407 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchAssertions.java @@ -42,7 +42,6 @@ import org.elasticsearch.action.support.broadcast.BroadcastResponse; import org.elasticsearch.action.support.master.AcknowledgedRequestBuilder; import org.elasticsearch.action.support.master.AcknowledgedResponse; -import org.elasticsearch.client.ElasticsearchResponse; import org.elasticsearch.cluster.block.ClusterBlock; import org.elasticsearch.cluster.block.ClusterBlockException; import org.elasticsearch.cluster.metadata.IndexTemplateMetaData; @@ -486,10 +485,6 @@ public static Matcher hasScore(final float score) { return new ElasticsearchMatchers.SearchHitHasScoreMatcher(score); } - public static Matcher hasStatus(RestStatus restStatus) { - return new ElasticsearchMatchers.ElasticsearchResponseHasStatusMatcher(restStatus); - } - public static T assertBooleanSubQuery(Query query, Class subqueryType, int i) { assertThat(query, instanceOf(BooleanQuery.class)); BooleanQuery q = (BooleanQuery) query; diff --git a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java index c40e434a5a773..f49cc3bd39ee7 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java +++ b/test/framework/src/main/java/org/elasticsearch/test/hamcrest/ElasticsearchMatchers.java @@ -18,8 +18,6 @@ */ package org.elasticsearch.test.hamcrest; -import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.rest.RestStatus; import org.elasticsearch.search.SearchHit; import org.hamcrest.Description; import org.hamcrest.TypeSafeMatcher; @@ -117,28 +115,4 @@ public void describeTo(final Description description) { description.appendText("searchHit score should be ").appendValue(score); } } - - public static class ElasticsearchResponseHasStatusMatcher extends TypeSafeMatcher { - - private RestStatus restStatus; - - public ElasticsearchResponseHasStatusMatcher(RestStatus restStatus) { - this.restStatus = restStatus; - } - - @Override - protected boolean matchesSafely(ElasticsearchResponse response) { - return response.getStatusLine().getStatusCode() == restStatus.getStatus(); - } - - @Override - public void describeMismatchSafely(final ElasticsearchResponse response, final Description mismatchDescription) { - mismatchDescription.appendText(" was ").appendValue(response.getStatusLine().getStatusCode()); - } - - @Override - public void describeTo(final Description description) { - description.appendText("Elasticsearch response status code should be ").appendValue(restStatus.getStatus()); - } - } } From b15279b5eff2515b479e7350a5583d87f31ce9bf Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 3 Jun 2016 23:59:26 +0200 Subject: [PATCH 059/103] Allow to pass socket facttry registry to createDefaultHttpClient method --- .../elasticsearch/client/sniff/Sniffer.java | 3 +- .../org/elasticsearch/client/RestClient.java | 17 +++++++--- .../test/rest/client/RestTestClient.java | 32 +++++++++---------- 3 files changed, 30 insertions(+), 22 deletions(-) diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 855b15e1e4d95..fa44ae7d8100e 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -22,6 +22,7 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.http.HttpHost; +import org.apache.http.config.Registry; import org.apache.http.impl.client.CloseableHttpClient; import org.elasticsearch.client.RestClient; @@ -203,7 +204,7 @@ public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { /** * Sets the http client. Mandatory argument. Best practice is to use the same client used * within {@link org.elasticsearch.client.RestClient} which can be created manually or - * through {@link RestClient.Builder#createDefaultHttpClient()}. + * through {@link RestClient.Builder#createDefaultHttpClient(Registry)}. * @see CloseableHttpClient */ public Builder setRestClient(RestClient restClient) { diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 856a50c1834f0..3f7e5a18aecb0 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -37,6 +37,8 @@ import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.methods.HttpTrace; import org.apache.http.client.utils.URIBuilder; +import org.apache.http.config.Registry; +import org.apache.http.conn.socket.ConnectionSocketFactory; import org.apache.http.entity.ContentType; import org.apache.http.impl.client.CloseableHttpClient; import org.apache.http.impl.client.HttpClientBuilder; @@ -374,7 +376,7 @@ private Builder() { /** * Sets the http client. A new default one will be created if not - * specified, by calling {@link #createDefaultHttpClient()}. + * specified, by calling {@link #createDefaultHttpClient(Registry)})}. * * @see CloseableHttpClient */ @@ -427,7 +429,7 @@ public Builder setDefaultHeaders(Header[] defaultHeaders) { */ public RestClient build() { if (httpClient == null) { - httpClient = createDefaultHttpClient(); + httpClient = createDefaultHttpClient(null); } if (hosts == null || hosts.length == 0) { throw new IllegalArgumentException("no hosts provided"); @@ -436,12 +438,17 @@ public RestClient build() { } /** - * Creates an http client with default settings + * Creates a {@link CloseableHttpClient} with default settings. Used when the http client instance is not provided. * * @see CloseableHttpClient */ - public static CloseableHttpClient createDefaultHttpClient() { - PoolingHttpClientConnectionManager connectionManager = new PoolingHttpClientConnectionManager(); + public static CloseableHttpClient createDefaultHttpClient(Registry socketFactoryRegistry) { + PoolingHttpClientConnectionManager connectionManager; + if (socketFactoryRegistry == null) { + connectionManager = new PoolingHttpClientConnectionManager(); + } else { + connectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry); + } //default settings may be too constraining connectionManager.setDefaultMaxPerRoute(10); connectionManager.setMaxTotal(30); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java index 5baedf8d0603a..94f85422edf5b 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java @@ -29,8 +29,6 @@ import org.apache.http.conn.ssl.SSLConnectionSocketFactory; import org.apache.http.entity.StringEntity; import org.apache.http.impl.client.CloseableHttpClient; -import org.apache.http.impl.client.HttpClientBuilder; -import org.apache.http.impl.conn.PoolingHttpClientConnectionManager; import org.apache.http.message.BasicHeader; import org.apache.http.ssl.SSLContexts; import org.apache.lucene.util.IOUtils; @@ -71,7 +69,8 @@ import java.util.Set; /** - * REST client used to test the elasticsearch REST layer + * REST client used to test the elasticsearch REST layer. + * Wraps a {@link RestClient} instance used to send the REST requests. * Holds the {@link RestSpec} used to translate api calls into REST calls */ public class RestTestClient implements Closeable { @@ -136,7 +135,7 @@ public RestTestResponse callApi(String apiName, Map params, Stri if ("raw".equals(apiName)) { // Raw requests are bit simpler.... - HashMap queryStringParams = new HashMap<>(params); + Map queryStringParams = new HashMap<>(params); String method = Objects.requireNonNull(queryStringParams.remove("method"), "Method must be set to use raw request"); String path = "/"+ Objects.requireNonNull(queryStringParams.remove("path"), "Path must be set to use raw request"); HttpEntity entity = null; @@ -267,7 +266,7 @@ private RestApi restApi(String apiName) { return restApi; } - protected RestClient createRestClient(URL[] urls, Settings settings) throws IOException { + private static RestClient createRestClient(URL[] urls, Settings settings) throws IOException { SSLConnectionSocketFactory sslsf; String keystorePath = settings.get(TRUSTSTORE_PATH); if (keystorePath != null) { @@ -297,16 +296,7 @@ protected RestClient createRestClient(URL[] urls, Settings settings) throws IOEx .register("http", PlainConnectionSocketFactory.getSocketFactory()) .register("https", sslsf) .build(); - - List
headers = new ArrayList<>(); - try (ThreadContext threadContext = new ThreadContext(settings)) { - for (Map.Entry entry : threadContext.getHeaders().entrySet()) { - headers.add(new BasicHeader(entry.getKey(), entry.getValue())); - } - } - - CloseableHttpClient httpClient = HttpClientBuilder.create().setDefaultHeaders(headers) - .setConnectionManager(new PoolingHttpClientConnectionManager(socketFactoryRegistry)).build(); + CloseableHttpClient httpClient = RestClient.Builder.createDefaultHttpClient(socketFactoryRegistry); String protocol = settings.get(PROTOCOL, "http"); HttpHost[] hosts = new HttpHost[urls.length]; @@ -314,7 +304,17 @@ protected RestClient createRestClient(URL[] urls, Settings settings) throws IOEx URL url = urls[i]; hosts[i] = new HttpHost(url.getHost(), url.getPort(), protocol); } - return RestClient.builder().setHttpClient(httpClient).setHosts(hosts).build(); + + RestClient.Builder builder = RestClient.builder().setHttpClient(httpClient).setHosts(hosts); + try (ThreadContext threadContext = new ThreadContext(settings)) { + Header[] defaultHeaders = new Header[threadContext.getHeaders().size()]; + int i = 0; + for (Map.Entry entry : threadContext.getHeaders().entrySet()) { + defaultHeaders[i++] = new BasicHeader(entry.getKey(), entry.getValue()); + } + builder.setDefaultHeaders(defaultHeaders); + } + return builder.build(); } /** From f2318ed5ae1a18da9491c6057ac40179a32f6856 Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 4 Jun 2016 00:45:27 +0200 Subject: [PATCH 060/103] Build: add missing licenses, SHAs and enable dependency licenses check --- client-sniffer/build.gradle | 3 - .../licenses/client-5.0.0-SNAPSHOT.jar.sha1 | 1 + client-sniffer/licenses/client-LICENSE.txt | 202 +++++++ client-sniffer/licenses/client-NOTICE.txt | 5 + .../licenses/commons-codec-1.10.jar.sha1 | 1 + .../licenses/commons-codec-LICENSE.txt | 202 +++++++ .../licenses/commons-codec-NOTICE.txt | 17 + .../licenses/commons-logging-1.1.3.jar.sha1 | 1 + .../licenses/commons-logging-LICENSE.txt | 202 +++++++ .../licenses/commons-logging-NOTICE.txt | 6 + .../licenses/httpclient-4.5.2.jar.sha1 | 1 + .../licenses/httpclient-LICENSE.txt | 558 ++++++++++++++++++ client-sniffer/licenses/httpclient-NOTICE.txt | 6 + .../licenses/httpcore-4.4.4.jar.sha1 | 1 + client-sniffer/licenses/httpcore-LICENSE.txt | 558 ++++++++++++++++++ client-sniffer/licenses/httpcore-NOTICE.txt | 6 + .../licenses/jackson-core-2.7.1.jar.sha1 | 1 + client-sniffer/licenses/jackson-core-LICENSE | 8 + client-sniffer/licenses/jackson-core-NOTICE | 20 + client/build.gradle | 3 - client/licenses/commons-codec-1.10.jar.sha1 | 1 + client/licenses/commons-codec-LICENSE.txt | 202 +++++++ client/licenses/commons-codec-NOTICE.txt | 17 + .../licenses/commons-logging-1.1.3.jar.sha1 | 1 + client/licenses/commons-logging-LICENSE.txt | 202 +++++++ client/licenses/commons-logging-NOTICE.txt | 6 + client/licenses/httpclient-4.5.2.jar.sha1 | 1 + client/licenses/httpclient-LICENSE.txt | 558 ++++++++++++++++++ client/licenses/httpclient-NOTICE.txt | 6 + client/licenses/httpcore-4.4.4.jar.sha1 | 1 + client/licenses/httpcore-LICENSE.txt | 558 ++++++++++++++++++ client/licenses/httpcore-NOTICE.txt | 6 + 32 files changed, 3355 insertions(+), 6 deletions(-) create mode 100644 client-sniffer/licenses/client-5.0.0-SNAPSHOT.jar.sha1 create mode 100644 client-sniffer/licenses/client-LICENSE.txt create mode 100644 client-sniffer/licenses/client-NOTICE.txt create mode 100644 client-sniffer/licenses/commons-codec-1.10.jar.sha1 create mode 100644 client-sniffer/licenses/commons-codec-LICENSE.txt create mode 100644 client-sniffer/licenses/commons-codec-NOTICE.txt create mode 100644 client-sniffer/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 client-sniffer/licenses/commons-logging-LICENSE.txt create mode 100644 client-sniffer/licenses/commons-logging-NOTICE.txt create mode 100644 client-sniffer/licenses/httpclient-4.5.2.jar.sha1 create mode 100644 client-sniffer/licenses/httpclient-LICENSE.txt create mode 100644 client-sniffer/licenses/httpclient-NOTICE.txt create mode 100644 client-sniffer/licenses/httpcore-4.4.4.jar.sha1 create mode 100644 client-sniffer/licenses/httpcore-LICENSE.txt create mode 100644 client-sniffer/licenses/httpcore-NOTICE.txt create mode 100644 client-sniffer/licenses/jackson-core-2.7.1.jar.sha1 create mode 100644 client-sniffer/licenses/jackson-core-LICENSE create mode 100644 client-sniffer/licenses/jackson-core-NOTICE create mode 100644 client/licenses/commons-codec-1.10.jar.sha1 create mode 100644 client/licenses/commons-codec-LICENSE.txt create mode 100644 client/licenses/commons-codec-NOTICE.txt create mode 100644 client/licenses/commons-logging-1.1.3.jar.sha1 create mode 100644 client/licenses/commons-logging-LICENSE.txt create mode 100644 client/licenses/commons-logging-NOTICE.txt create mode 100644 client/licenses/httpclient-4.5.2.jar.sha1 create mode 100644 client/licenses/httpclient-LICENSE.txt create mode 100644 client/licenses/httpclient-NOTICE.txt create mode 100644 client/licenses/httpcore-4.4.4.jar.sha1 create mode 100644 client/licenses/httpcore-LICENSE.txt create mode 100644 client/licenses/httpcore-NOTICE.txt diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index 6542453fa4f92..b0f8e1e22e061 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -60,9 +60,6 @@ forbiddenApisTest.enabled=false //signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] //} -//TODO add licenses for dependencies and take care of distribution -//dependency license are currently checked in distribution -dependencyLicenses.enabled=false //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false //NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core diff --git a/client-sniffer/licenses/client-5.0.0-SNAPSHOT.jar.sha1 b/client-sniffer/licenses/client-5.0.0-SNAPSHOT.jar.sha1 new file mode 100644 index 0000000000000..de30dc58fe6d0 --- /dev/null +++ b/client-sniffer/licenses/client-5.0.0-SNAPSHOT.jar.sha1 @@ -0,0 +1 @@ +4ba4746aa38f81ec7e8341da8c86784bf5384046 \ No newline at end of file diff --git a/client-sniffer/licenses/client-LICENSE.txt b/client-sniffer/licenses/client-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/client-sniffer/licenses/client-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client-sniffer/licenses/client-NOTICE.txt b/client-sniffer/licenses/client-NOTICE.txt new file mode 100644 index 0000000000000..c99b958193198 --- /dev/null +++ b/client-sniffer/licenses/client-NOTICE.txt @@ -0,0 +1,5 @@ +Elasticsearch +Copyright 2009-2016 Elasticsearch + +This product includes software developed by The Apache Software +Foundation (http://www.apache.org/). diff --git a/client-sniffer/licenses/commons-codec-1.10.jar.sha1 b/client-sniffer/licenses/commons-codec-1.10.jar.sha1 new file mode 100644 index 0000000000000..3fe8682a1b0f9 --- /dev/null +++ b/client-sniffer/licenses/commons-codec-1.10.jar.sha1 @@ -0,0 +1 @@ +4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/client-sniffer/licenses/commons-codec-LICENSE.txt b/client-sniffer/licenses/commons-codec-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/client-sniffer/licenses/commons-codec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client-sniffer/licenses/commons-codec-NOTICE.txt b/client-sniffer/licenses/commons-codec-NOTICE.txt new file mode 100644 index 0000000000000..1da9af50f6008 --- /dev/null +++ b/client-sniffer/licenses/commons-codec-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/client-sniffer/licenses/commons-logging-1.1.3.jar.sha1 b/client-sniffer/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 0000000000000..5b8f029e58293 --- /dev/null +++ b/client-sniffer/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/client-sniffer/licenses/commons-logging-LICENSE.txt b/client-sniffer/licenses/commons-logging-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/client-sniffer/licenses/commons-logging-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client-sniffer/licenses/commons-logging-NOTICE.txt b/client-sniffer/licenses/commons-logging-NOTICE.txt new file mode 100644 index 0000000000000..556bd03951d4b --- /dev/null +++ b/client-sniffer/licenses/commons-logging-NOTICE.txt @@ -0,0 +1,6 @@ +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client-sniffer/licenses/httpclient-4.5.2.jar.sha1 b/client-sniffer/licenses/httpclient-4.5.2.jar.sha1 new file mode 100644 index 0000000000000..6937112a09fb6 --- /dev/null +++ b/client-sniffer/licenses/httpclient-4.5.2.jar.sha1 @@ -0,0 +1 @@ +733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/client-sniffer/licenses/httpclient-LICENSE.txt b/client-sniffer/licenses/httpclient-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client-sniffer/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client-sniffer/licenses/httpclient-NOTICE.txt b/client-sniffer/licenses/httpclient-NOTICE.txt new file mode 100644 index 0000000000000..91e5c40c4c6d3 --- /dev/null +++ b/client-sniffer/licenses/httpclient-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client-sniffer/licenses/httpcore-4.4.4.jar.sha1 b/client-sniffer/licenses/httpcore-4.4.4.jar.sha1 new file mode 100644 index 0000000000000..ef0c257e0128c --- /dev/null +++ b/client-sniffer/licenses/httpcore-4.4.4.jar.sha1 @@ -0,0 +1 @@ +b31526a230871fbe285fbcbe2813f9c0839ae9b0 \ No newline at end of file diff --git a/client-sniffer/licenses/httpcore-LICENSE.txt b/client-sniffer/licenses/httpcore-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client-sniffer/licenses/httpcore-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client-sniffer/licenses/httpcore-NOTICE.txt b/client-sniffer/licenses/httpcore-NOTICE.txt new file mode 100644 index 0000000000000..91e5c40c4c6d3 --- /dev/null +++ b/client-sniffer/licenses/httpcore-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client-sniffer/licenses/jackson-core-2.7.1.jar.sha1 b/client-sniffer/licenses/jackson-core-2.7.1.jar.sha1 new file mode 100644 index 0000000000000..73831ed2d5188 --- /dev/null +++ b/client-sniffer/licenses/jackson-core-2.7.1.jar.sha1 @@ -0,0 +1 @@ +4127b62db028f981e81caa248953c0899d720f98 \ No newline at end of file diff --git a/client-sniffer/licenses/jackson-core-LICENSE b/client-sniffer/licenses/jackson-core-LICENSE new file mode 100644 index 0000000000000..f5f45d26a49d6 --- /dev/null +++ b/client-sniffer/licenses/jackson-core-LICENSE @@ -0,0 +1,8 @@ +This copy of Jackson JSON processor streaming parser/generator is licensed under the +Apache (Software) License, version 2.0 ("the License"). +See the License for details about distribution rights, and the +specific rights regarding derivate works. + +You may obtain a copy of the License at: + +http://www.apache.org/licenses/LICENSE-2.0 diff --git a/client-sniffer/licenses/jackson-core-NOTICE b/client-sniffer/licenses/jackson-core-NOTICE new file mode 100644 index 0000000000000..4c976b7b4cc58 --- /dev/null +++ b/client-sniffer/licenses/jackson-core-NOTICE @@ -0,0 +1,20 @@ +# Jackson JSON processor + +Jackson is a high-performance, Free/Open Source JSON processing library. +It was originally written by Tatu Saloranta (tatu.saloranta@iki.fi), and has +been in development since 2007. +It is currently developed by a community of developers, as well as supported +commercially by FasterXML.com. + +## Licensing + +Jackson core and extension components may licensed under different licenses. +To find the details that apply to this artifact see the accompanying LICENSE file. +For more information, including possible other licensing options, contact +FasterXML.com (http://fasterxml.com). + +## Credits + +A list of contributors may be found from CREDITS file, which is included +in some artifacts (usually source distributions); but is always available +from the source code management (SCM) system project uses. diff --git a/client/build.gradle b/client/build.gradle index 6d2b92f0b0c96..4c4a56d7731b0 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -58,9 +58,6 @@ forbiddenApisTest.enabled=false //signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] //} -//TODO add licenses for dependencies and take care of distribution -//dependency license are currently checked in distribution -dependencyLicenses.enabled=false //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false //NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core diff --git a/client/licenses/commons-codec-1.10.jar.sha1 b/client/licenses/commons-codec-1.10.jar.sha1 new file mode 100644 index 0000000000000..3fe8682a1b0f9 --- /dev/null +++ b/client/licenses/commons-codec-1.10.jar.sha1 @@ -0,0 +1 @@ +4b95f4897fa13f2cd904aee711aeafc0c5295cd8 \ No newline at end of file diff --git a/client/licenses/commons-codec-LICENSE.txt b/client/licenses/commons-codec-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/client/licenses/commons-codec-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client/licenses/commons-codec-NOTICE.txt b/client/licenses/commons-codec-NOTICE.txt new file mode 100644 index 0000000000000..1da9af50f6008 --- /dev/null +++ b/client/licenses/commons-codec-NOTICE.txt @@ -0,0 +1,17 @@ +Apache Commons Codec +Copyright 2002-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + +src/test/org/apache/commons/codec/language/DoubleMetaphoneTest.java +contains test data from http://aspell.net/test/orig/batch0.tab. +Copyright (C) 2002 Kevin Atkinson (kevina@gnu.org) + +=============================================================================== + +The content of package org.apache.commons.codec.language.bm has been translated +from the original php source code available at http://stevemorse.org/phoneticinfo.htm +with permission from the original authors. +Original source copyright: +Copyright (c) 2008 Alexander Beider & Stephen P. Morse. diff --git a/client/licenses/commons-logging-1.1.3.jar.sha1 b/client/licenses/commons-logging-1.1.3.jar.sha1 new file mode 100644 index 0000000000000..5b8f029e58293 --- /dev/null +++ b/client/licenses/commons-logging-1.1.3.jar.sha1 @@ -0,0 +1 @@ +f6f66e966c70a83ffbdb6f17a0919eaf7c8aca7f \ No newline at end of file diff --git a/client/licenses/commons-logging-LICENSE.txt b/client/licenses/commons-logging-LICENSE.txt new file mode 100644 index 0000000000000..d645695673349 --- /dev/null +++ b/client/licenses/commons-logging-LICENSE.txt @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/client/licenses/commons-logging-NOTICE.txt b/client/licenses/commons-logging-NOTICE.txt new file mode 100644 index 0000000000000..556bd03951d4b --- /dev/null +++ b/client/licenses/commons-logging-NOTICE.txt @@ -0,0 +1,6 @@ +Apache Commons Logging +Copyright 2003-2014 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/licenses/httpclient-4.5.2.jar.sha1 b/client/licenses/httpclient-4.5.2.jar.sha1 new file mode 100644 index 0000000000000..6937112a09fb6 --- /dev/null +++ b/client/licenses/httpclient-4.5.2.jar.sha1 @@ -0,0 +1 @@ +733db77aa8d9b2d68015189df76ab06304406e50 \ No newline at end of file diff --git a/client/licenses/httpclient-LICENSE.txt b/client/licenses/httpclient-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/licenses/httpclient-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/licenses/httpclient-NOTICE.txt b/client/licenses/httpclient-NOTICE.txt new file mode 100644 index 0000000000000..91e5c40c4c6d3 --- /dev/null +++ b/client/licenses/httpclient-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + diff --git a/client/licenses/httpcore-4.4.4.jar.sha1 b/client/licenses/httpcore-4.4.4.jar.sha1 new file mode 100644 index 0000000000000..ef0c257e0128c --- /dev/null +++ b/client/licenses/httpcore-4.4.4.jar.sha1 @@ -0,0 +1 @@ +b31526a230871fbe285fbcbe2813f9c0839ae9b0 \ No newline at end of file diff --git a/client/licenses/httpcore-LICENSE.txt b/client/licenses/httpcore-LICENSE.txt new file mode 100644 index 0000000000000..32f01eda18fe9 --- /dev/null +++ b/client/licenses/httpcore-LICENSE.txt @@ -0,0 +1,558 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + +========================================================================= + +This project includes Public Suffix List copied from + +licensed under the terms of the Mozilla Public License, v. 2.0 + +Full license text: + +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/client/licenses/httpcore-NOTICE.txt b/client/licenses/httpcore-NOTICE.txt new file mode 100644 index 0000000000000..91e5c40c4c6d3 --- /dev/null +++ b/client/licenses/httpcore-NOTICE.txt @@ -0,0 +1,6 @@ +Apache HttpComponents Client +Copyright 1999-2016 The Apache Software Foundation + +This product includes software developed at +The Apache Software Foundation (http://www.apache.org/). + From 56e689e1b36183d9b2a5e214a07d44548fd2faba Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 4 Jun 2016 01:05:53 +0200 Subject: [PATCH 061/103] [TEST] remove unused method --- .../java/org/elasticsearch/test/rest/ESRestTestCase.java | 5 ----- 1 file changed, 5 deletions(-) diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index 720408637047a..c27d3ed560474 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -340,11 +340,6 @@ protected Settings restAdminSettings() { return restClientSettings(); // default to the same client settings } - /** Returns the addresses the client uses to connect to the test cluster. */ - protected URL[] getClusterUrls() { - return clusterUrls; - } - @Before public void reset() throws IOException { // admin context must be available for @After always, regardless of whether the test was blacklisted From 05e26a46d736ff5caf6e6d6394e674828283e3d7 Mon Sep 17 00:00:00 2001 From: javanna Date: Sat, 4 Jun 2016 01:06:18 +0200 Subject: [PATCH 062/103] raise default socket timeout to 10s and default connect timeout to 1s --- client/src/main/java/org/elasticsearch/client/RestClient.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 3f7e5a18aecb0..161abbd40a78b 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -358,8 +358,8 @@ public static Builder builder() { * Rest client builder. Helps creating a new {@link RestClient}. */ public static final class Builder { - public static final int DEFAULT_CONNECT_TIMEOUT = 500; - public static final int DEFAULT_SOCKET_TIMEOUT = 5000; + public static final int DEFAULT_CONNECT_TIMEOUT = 1000; + public static final int DEFAULT_SOCKET_TIMEOUT = 10000; public static final int DEFAULT_MAX_RETRY_TIMEOUT = DEFAULT_SOCKET_TIMEOUT; public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT = 500; From a461dd84d22186774beaa4fbe931ea3ebf49bd04 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 6 Jun 2016 15:02:52 +0200 Subject: [PATCH 063/103] Build: add hamcrest and securemock to version.properties --- buildSrc/version.properties | 2 ++ client-sniffer/build.gradle | 4 ++-- client/build.gradle | 4 ++-- test/framework/build.gradle | 4 ++-- 4 files changed, 8 insertions(+), 6 deletions(-) diff --git a/buildSrc/version.properties b/buildSrc/version.properties index d9e3908df22c2..5aaaca745faa2 100644 --- a/buildSrc/version.properties +++ b/buildSrc/version.properties @@ -17,3 +17,5 @@ httpclient = 4.5.2 httpcore = 4.4.4 commonslogging = 1.1.3 commonscodec = 1.10 +hamcrest = 1.3 +securemock = 1.2 \ No newline at end of file diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index b0f8e1e22e061..d561e00718479 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -35,11 +35,11 @@ dependencies { testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" - testCompile "org.hamcrest:hamcrest-all:1.3" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" - testCompile "org.elasticsearch:securemock:1.2" + testCompile "org.elasticsearch:securemock:${versions.securemock}" } //TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible diff --git a/client/build.gradle b/client/build.gradle index 4c4a56d7731b0..2c82fd74569e5 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -33,11 +33,11 @@ dependencies { testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" - testCompile "org.hamcrest:hamcrest-all:1.3" + testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" - testCompile "org.elasticsearch:securemock:1.2" + testCompile "org.elasticsearch:securemock:${versions.securemock}" } //TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible diff --git a/test/framework/build.gradle b/test/framework/build.gradle index 078511611feca..6ddcb7598a14f 100644 --- a/test/framework/build.gradle +++ b/test/framework/build.gradle @@ -23,7 +23,7 @@ dependencies { compile "org.elasticsearch:elasticsearch:${version}" compile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" compile "junit:junit:${versions.junit}" - compile "org.hamcrest:hamcrest-all:1.3" + compile "org.hamcrest:hamcrest-all:${versions.hamcrest}" compile "org.apache.lucene:lucene-test-framework:${versions.lucene}" compile "org.apache.lucene:lucene-codecs:${versions.lucene}" compile "org.elasticsearch:client:${version}" @@ -31,7 +31,7 @@ dependencies { compile "org.apache.httpcomponents:httpcore:${versions.httpcore}" compile "commons-logging:commons-logging:${versions.commonslogging}" compile "commons-codec:commons-codec:${versions.commonscodec}" - compile "org.elasticsearch:securemock:1.2" + compile "org.elasticsearch:securemock:${versions.securemock}" } compileJava.options.compilerArgs << '-Xlint:-cast,-rawtypes,-try,-unchecked' From 467dfbda0ceab516091983fdb7b369c889040081 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 6 Jun 2016 15:12:13 +0200 Subject: [PATCH 064/103] use TimeUnit and long to keep track of time --- .../client/sniff/HostsSniffer.java | 2 +- .../elasticsearch/client/sniff/Sniffer.java | 24 +++++++++---------- 2 files changed, 13 insertions(+), 13 deletions(-) diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java index cf49cfbe68ab5..4af9d20558921 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java @@ -50,7 +50,7 @@ public class HostsSniffer { private final String scheme; private final JsonFactory jsonFactory; - public HostsSniffer(RestClient restClient, int sniffRequestTimeout, String scheme) { + public HostsSniffer(RestClient restClient, long sniffRequestTimeout, String scheme) { this.restClient = restClient; this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeout + "ms"); this.scheme = scheme; diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index fa44ae7d8100e..2cd146bd80d27 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -50,8 +50,8 @@ public final class Sniffer extends RestClient.FailureListener implements Closeab private final boolean sniffOnFailure; private final Task task; - private Sniffer(RestClient restClient, int sniffRequestTimeout, String scheme, int sniffInterval, - boolean sniffOnFailure, int sniffAfterFailureDelay) { + private Sniffer(RestClient restClient, long sniffRequestTimeout, String scheme, long sniffInterval, + boolean sniffOnFailure, long sniffAfterFailureDelay) { HostsSniffer hostsSniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme); this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); this.sniffOnFailure = sniffOnFailure; @@ -75,14 +75,14 @@ private static class Task implements Runnable { private final HostsSniffer hostsSniffer; private final RestClient restClient; - private final int sniffInterval; - private final int sniffAfterFailureDelay; + private final long sniffInterval; + private final long sniffAfterFailureDelay; private final ScheduledExecutorService scheduledExecutorService; private final AtomicBoolean running = new AtomicBoolean(false); - private volatile int nextSniffDelay; + private volatile long nextSniffDelay; private volatile ScheduledFuture scheduledFuture; - private Task(HostsSniffer hostsSniffer, RestClient restClient, int sniffInterval, int sniffAfterFailureDelay) { + private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffInterval, long sniffAfterFailureDelay) { this.hostsSniffer = hostsSniffer; this.restClient = restClient; this.sniffInterval = sniffInterval; @@ -153,14 +153,14 @@ public static Builder builder() { * Sniffer builder. Helps creating a new {@link Sniffer}. */ public static final class Builder { - public static final int DEFAULT_SNIFF_INTERVAL = 60000 * 5; //5 minutes - public static final int DEFAULT_SNIFF_AFTER_FAILURE_DELAY = 60000; //1 minute - public static final int DEFAULT_SNIFF_REQUEST_TIMEOUT = 1000; //1 second + public static final long DEFAULT_SNIFF_INTERVAL = TimeUnit.MINUTES.toMillis(5); + public static final long DEFAULT_SNIFF_AFTER_FAILURE_DELAY = TimeUnit.MINUTES.toMillis(1); + public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); - private int sniffRequestTimeout = DEFAULT_SNIFF_REQUEST_TIMEOUT; - private int sniffInterval = DEFAULT_SNIFF_INTERVAL; + private long sniffRequestTimeout = DEFAULT_SNIFF_REQUEST_TIMEOUT; + private long sniffInterval = DEFAULT_SNIFF_INTERVAL; private boolean sniffOnFailure = true; - private int sniffAfterFailureDelay = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; + private long sniffAfterFailureDelay = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; private String scheme = "http"; private RestClient restClient; From 1f7f6e2709fc988d2b1ae93223fc2a1861999dd6 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 6 Jun 2016 15:13:42 +0200 Subject: [PATCH 065/103] wrap log statement with logger.isDebugEnabled --- .../main/java/org/elasticsearch/client/RequestLogger.java | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java index c15f6abf04b2e..cdee2d9944dba 100644 --- a/client/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -52,8 +52,10 @@ private RequestLogger() { * Logs a request that yielded a response */ static void log(Log logger, String message, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { - logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + - "] [" + httpResponse.getStatusLine() + "]"); + if (logger.isDebugEnabled()) { + logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + + "] [" + httpResponse.getStatusLine() + "]"); + } if (tracer.isTraceEnabled()) { String requestLine; From 2cf04c08772ab7ca3ea5fd65d7c5878e3852d9e8 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 6 Jun 2016 15:41:11 +0200 Subject: [PATCH 066/103] wrap entity only when not repeatable and improve RequestLoggerTests --- .../elasticsearch/client/RequestLogger.java | 13 ++++-- .../client/RequestLoggerTests.java | 44 +++++++++++++++---- 2 files changed, 44 insertions(+), 13 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java index cdee2d9944dba..f1b406b5cf32b 100644 --- a/client/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -102,9 +102,12 @@ static String buildTraceRequest(HttpUriRequest request, HttpHost host) throws IO HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; if (enclosingRequest.getEntity() != null) { requestLine += " -d '"; - HttpEntity entity = new BufferedHttpEntity(enclosingRequest.getEntity()); - enclosingRequest.setEntity(entity); - requestLine += EntityUtils.toString(entity) + "'"; + HttpEntity entity = enclosingRequest.getEntity(); + if (entity.isRepeatable() == false) { + entity = new BufferedHttpEntity(enclosingRequest.getEntity()); + enclosingRequest.setEntity(entity); + } + requestLine += EntityUtils.toString(entity, StandardCharsets.UTF_8) + "'"; } } return requestLine; @@ -121,7 +124,9 @@ static String buildTraceResponse(HttpResponse httpResponse) throws IOException { responseLine += "\n#"; HttpEntity entity = httpResponse.getEntity(); if (entity != null) { - entity = new BufferedHttpEntity(entity); + if (entity.isRepeatable() == false) { + entity = new BufferedHttpEntity(entity); + } httpResponse.setEntity(entity); ContentType contentType = ContentType.get(entity); Charset charset = StandardCharsets.UTF_8; diff --git a/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java b/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java index 64bddf88af2ca..c16d9e7876e0a 100644 --- a/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java +++ b/client/src/test/java/org/elasticsearch/client/RequestLoggerTests.java @@ -20,6 +20,7 @@ package org.elasticsearch.client; import com.carrotsearch.randomizedtesting.generators.RandomInts; +import org.apache.http.HttpEntity; import org.apache.http.HttpEntityEnclosingRequest; import org.apache.http.HttpHost; import org.apache.http.ProtocolVersion; @@ -30,11 +31,14 @@ import org.apache.http.client.methods.HttpPut; import org.apache.http.client.methods.HttpRequestBase; import org.apache.http.client.methods.HttpTrace; +import org.apache.http.entity.InputStreamEntity; import org.apache.http.entity.StringEntity; import org.apache.http.message.BasicHttpResponse; import org.apache.http.message.BasicStatusLine; +import org.apache.http.util.EntityUtils; import org.apache.lucene.util.LuceneTestCase; +import java.io.ByteArrayInputStream; import java.io.IOException; import java.net.URI; import java.net.URISyntaxException; @@ -80,16 +84,27 @@ public void testTraceRequest() throws IOException, URISyntaxException { } String expected = "curl -iX " + request.getMethod() + " '" + host + uri + "'"; - - if (request instanceof HttpEntityEnclosingRequest && random().nextBoolean()) { - HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; - String requestBody = "{ \"field\": \"value\" }"; - enclosingRequest.setEntity(new StringEntity(requestBody, StandardCharsets.UTF_8)); + boolean hasBody = request instanceof HttpEntityEnclosingRequest && random().nextBoolean(); + String requestBody = "{ \"field\": \"value\" }"; + if (hasBody) { expected += " -d '" + requestBody + "'"; + HttpEntityEnclosingRequest enclosingRequest = (HttpEntityEnclosingRequest) request; + HttpEntity entity; + if (random().nextBoolean()) { + entity = new StringEntity(requestBody, StandardCharsets.UTF_8); + } else { + entity = new InputStreamEntity(new ByteArrayInputStream(requestBody.getBytes(StandardCharsets.UTF_8))); + } + enclosingRequest.setEntity(entity); } String traceRequest = RequestLogger.buildTraceRequest(request, host); assertThat(traceRequest, equalTo(expected)); + if (hasBody) { + //check that the body is still readable as most entities are not repeatable + String body = EntityUtils.toString(((HttpEntityEnclosingRequest) request).getEntity(), StandardCharsets.UTF_8); + assertThat(body, equalTo(requestBody)); + } } public void testTraceResponse() throws IOException { @@ -105,15 +120,26 @@ public void testTraceResponse() throws IOException { expected += "\n# header" + i + ": value"; } expected += "\n#"; - if (random().nextBoolean()) { - String responseBody = "{\n \"field\": \"value\"\n}"; - httpResponse.setEntity(new StringEntity(responseBody, StandardCharsets.UTF_8)); + boolean hasBody = random().nextBoolean(); + String responseBody = "{\n \"field\": \"value\"\n}"; + if (hasBody) { expected += "\n# {"; expected += "\n# \"field\": \"value\""; expected += "\n# }"; + HttpEntity entity; + if (random().nextBoolean()) { + entity = new StringEntity(responseBody, StandardCharsets.UTF_8); + } else { + entity = new InputStreamEntity(new ByteArrayInputStream(responseBody.getBytes(StandardCharsets.UTF_8))); + } + httpResponse.setEntity(entity); } - String traceResponse = RequestLogger.buildTraceResponse(httpResponse); assertThat(traceResponse, equalTo(expected)); + if (hasBody) { + //check that the body is still readable as most entities are not repeatable + String body = EntityUtils.toString(httpResponse.getEntity(), StandardCharsets.UTF_8); + assertThat(body, equalTo(responseBody)); + } } } From 791db1fb48e339da05e20d4d8da30323af0ecf1b Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 8 Jun 2016 11:43:02 +0200 Subject: [PATCH 067/103] remove TODO around using /_cat/nodes rather than /_nodes, test compatibility with 2.x _cat/nodes returns the http address only in 5.x. Would be nice to use as we could drop the jackson dependency, but we care more about being compatible with 2.x. Not compatible with previous versions as the format of returned http addresses have changed since 2.0. Also fixed test bug that caused sporadic failures. --- .../client/sniff/HostsSniffer.java | 4 +-- .../client/sniff/HostsSnifferTests.java | 31 ++++++++++--------- 2 files changed, 18 insertions(+), 17 deletions(-) diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java index 4af9d20558921..cc3abdfec7e49 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java @@ -38,9 +38,9 @@ import java.util.Map; /** - * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back + * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. + * Compatible with elasticsearch 5.x and 2.x. */ -//TODO This could potentially be using _cat/nodes which wouldn't require jackson as a dependency, but we'd have bw comp problems with 2.x public class HostsSniffer { private static final Log logger = LogFactory.getLog(HostsSniffer.class); diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index 88f0480f92a9a..57705c1b832e9 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -97,8 +97,7 @@ public void testSniffNodes() throws IOException, URISyntaxException { } catch(ElasticsearchResponseException e) { ElasticsearchResponse response = e.getElasticsearchResponse(); if (sniffResponse.isFailure) { - assertThat(e.getMessage(), containsString("GET http://localhost:" + httpServer.getAddress().getPort() + - "/_nodes/http?timeout=" + sniffRequestTimeout)); + assertThat(e.getMessage(), containsString("GET " + httpHost + "/_nodes/http?timeout=" + sniffRequestTimeout + "ms")); assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); assertThat(response.getHost(), equalTo(httpHost)); assertThat(response.getStatusLine().getStatusCode(), equalTo(sniffResponse.nodesInfoResponseCode)); @@ -185,20 +184,22 @@ private static SniffResponse buildSniffResponse(String scheme) throws IOExceptio } generator.writeEndObject(); } - String[] roles = {"master", "data", "ingest"}; - int numRoles = RandomInts.randomIntBetween(random(), 0, 3); - Set nodeRoles = new HashSet<>(numRoles); - for (int j = 0; j < numRoles; j++) { - String role; - do { - role = RandomPicks.randomFrom(random(), roles); - } while(nodeRoles.add(role) == false); - } - generator.writeArrayFieldStart("roles"); - for (String nodeRole : nodeRoles) { - generator.writeString(nodeRole); + if (random().nextBoolean()) { + String[] roles = {"master", "data", "ingest"}; + int numRoles = RandomInts.randomIntBetween(random(), 0, 3); + Set nodeRoles = new HashSet<>(numRoles); + for (int j = 0; j < numRoles; j++) { + String role; + do { + role = RandomPicks.randomFrom(random(), roles); + } while(nodeRoles.add(role) == false); + } + generator.writeArrayFieldStart("roles"); + for (String nodeRole : nodeRoles) { + generator.writeString(nodeRole); + } + generator.writeEndArray(); } - generator.writeEndArray(); int numAttributes = RandomInts.randomIntBetween(random(), 0, 3); Map attributes = new HashMap<>(numAttributes); for (int j = 0; j < numAttributes; j++) { From a78fe1305dd8148db1c4f9a93f0284e21443042f Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 8 Jun 2016 12:19:52 +0200 Subject: [PATCH 068/103] require RestClient when creating Sniffer.Builder --- .../elasticsearch/client/sniff/Sniffer.java | 30 +++---- .../client/sniff/SnifferBuilderTests.java | 89 +++++++++---------- 2 files changed, 53 insertions(+), 66 deletions(-) diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 2cd146bd80d27..7ae6b0502e51e 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -22,8 +22,6 @@ import org.apache.commons.logging.Log; import org.apache.commons.logging.LogFactory; import org.apache.http.HttpHost; -import org.apache.http.config.Registry; -import org.apache.http.impl.client.CloseableHttpClient; import org.elasticsearch.client.RestClient; import java.io.Closeable; @@ -145,8 +143,8 @@ void shutdown() { /** * Returns a new {@link Builder} to help with {@link Sniffer} creation. */ - public static Builder builder() { - return new Builder(); + public static Builder builder(RestClient restClient) { + return new Builder(restClient); } /** @@ -157,15 +155,19 @@ public static final class Builder { public static final long DEFAULT_SNIFF_AFTER_FAILURE_DELAY = TimeUnit.MINUTES.toMillis(1); public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); + private final RestClient restClient; private long sniffRequestTimeout = DEFAULT_SNIFF_REQUEST_TIMEOUT; private long sniffInterval = DEFAULT_SNIFF_INTERVAL; private boolean sniffOnFailure = true; private long sniffAfterFailureDelay = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; private String scheme = "http"; - private RestClient restClient; - - private Builder() { + /** + * Creates a new builder instance and sets the {@link RestClient} that will be used to communicate with elasticsearch. + */ + private Builder(RestClient restClient) { + Objects.requireNonNull(restClient, "restClient cannot be null"); + this.restClient = restClient; } /** @@ -201,24 +203,13 @@ public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { return this; } - /** - * Sets the http client. Mandatory argument. Best practice is to use the same client used - * within {@link org.elasticsearch.client.RestClient} which can be created manually or - * through {@link RestClient.Builder#createDefaultHttpClient(Registry)}. - * @see CloseableHttpClient - */ - public Builder setRestClient(RestClient restClient) { - this.restClient = restClient; - return this; - } - /** * Sets the sniff request timeout to be passed in as a query string parameter to elasticsearch. * Allows to halt the request without any failure, as only the nodes that have responded * within this timeout will be returned. */ public Builder setSniffRequestTimeout(int sniffRequestTimeout) { - if (sniffRequestTimeout <=0) { + if (sniffRequestTimeout <= 0) { throw new IllegalArgumentException("sniffRequestTimeout must be greater than 0"); } this.sniffRequestTimeout = sniffRequestTimeout; @@ -242,7 +233,6 @@ public Builder setScheme(String scheme) { * Creates the {@link Sniffer} based on the provided configuration. */ public Sniffer build() { - Objects.requireNonNull(restClient, "restClient cannot be null"); return new Sniffer(restClient, sniffRequestTimeout, scheme, sniffInterval, sniffOnFailure, sniffAfterFailureDelay); } } diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index 868c57881200a..9bcd567ed2aac 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -35,63 +35,60 @@ public class SnifferBuilderTests extends LuceneTestCase { } public void testBuild() throws Exception { - - try { - Sniffer.builder().setScheme(null); - fail("should have failed"); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "scheme cannot be null"); + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); } + try (RestClient client = RestClient.builder().setHosts(hosts).build()) { - try { - Sniffer.builder().setScheme("whatever"); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "scheme must be either http or https"); - } + try { + Sniffer.builder(client).setScheme(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "scheme cannot be null"); + } - try { - Sniffer.builder().setSniffInterval(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffInterval must be greater than 0"); - } + try { + Sniffer.builder(client).setScheme("whatever"); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "scheme must be either http or https"); + } - try { - Sniffer.builder().setSniffRequestTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); - } + try { + Sniffer.builder(client).setSniffInterval(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffInterval must be greater than 0"); + } - try { - Sniffer.builder().setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); - } + try { + Sniffer.builder(client).setSniffRequestTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); + } - try { - Sniffer.builder().build(); - fail("should have failed"); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "restClient cannot be null"); - } + try { + Sniffer.builder(client).setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); + } - int numNodes = RandomInts.randomIntBetween(random(), 1, 5); - HttpHost[] hosts = new HttpHost[numNodes]; - for (int i = 0; i < numNodes; i++) { - hosts[i] = new HttpHost("localhost", 9200 + i); - } + try { + Sniffer.builder(null).build(); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "restClient cannot be null"); + } - try (RestClient client = RestClient.builder().setHosts(hosts).build()) { - try (Sniffer sniffer = Sniffer.builder().setRestClient(client).build()) { + try (Sniffer sniffer = Sniffer.builder(client).build()) { assertNotNull(sniffer); } - } - try (RestClient client = RestClient.builder().setHosts(hosts).build()) { - Sniffer.Builder builder = Sniffer.builder().setRestClient(client); + Sniffer.Builder builder = Sniffer.builder(client); if (random().nextBoolean()) { builder.setScheme(RandomPicks.randomFrom(random(), Arrays.asList("http", "https"))); } From 04d620da742ef57d4bba08af54934b2f2d305de0 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 8 Jun 2016 12:29:16 +0200 Subject: [PATCH 069/103] require hosts when creating RestClient.Builder Also fix order of arguments when using assertEquals --- .../client/sniff/HostsSnifferTests.java | 2 +- .../client/sniff/SnifferBuilderTests.java | 14 ++++---- .../org/elasticsearch/client/RestClient.java | 30 ++++++---------- .../client/RestClientBuilderTests.java | 36 ++++++++----------- .../client/RestClientIntegTests.java | 4 +-- .../client/RestClientMultipleHostsTests.java | 2 +- .../client/RestClientSingleHostTests.java | 2 +- .../elasticsearch/test/ESIntegTestCase.java | 2 +- .../test/rest/client/RestTestClient.java | 2 +- 9 files changed, 39 insertions(+), 55 deletions(-) diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index 57705c1b832e9..826d7fb35f895 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -82,7 +82,7 @@ public void stopHttpServer() throws IOException { public void testSniffNodes() throws IOException, URISyntaxException { HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostName(), httpServer.getAddress().getPort()); - try (RestClient restClient = RestClient.builder().setHosts(httpHost).build()) { + try (RestClient restClient = RestClient.builder(httpHost).build()) { HostsSniffer sniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme); try { List sniffedHosts = sniffer.sniffHosts(); diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index 9bcd567ed2aac..c0bb9def96457 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -40,48 +40,48 @@ public void testBuild() throws Exception { for (int i = 0; i < numNodes; i++) { hosts[i] = new HttpHost("localhost", 9200 + i); } - try (RestClient client = RestClient.builder().setHosts(hosts).build()) { + try (RestClient client = RestClient.builder(hosts).build()) { try { Sniffer.builder(client).setScheme(null); fail("should have failed"); } catch(NullPointerException e) { - assertEquals(e.getMessage(), "scheme cannot be null"); + assertEquals("scheme cannot be null", e.getMessage()); } try { Sniffer.builder(client).setScheme("whatever"); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "scheme must be either http or https"); + assertEquals("scheme must be either http or https", e.getMessage()); } try { Sniffer.builder(client).setSniffInterval(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffInterval must be greater than 0"); + assertEquals("sniffInterval must be greater than 0", e.getMessage()); } try { Sniffer.builder(client).setSniffRequestTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); + assertEquals("sniffRequestTimeout must be greater than 0", e.getMessage()); } try { Sniffer.builder(client).setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffAfterFailureDelay must be greater than 0"); + assertEquals("sniffAfterFailureDelay must be greater than 0", e.getMessage()); } try { Sniffer.builder(null).build(); fail("should have failed"); } catch(NullPointerException e) { - assertEquals(e.getMessage(), "restClient cannot be null"); + assertEquals("restClient cannot be null", e.getMessage()); } try (Sniffer sniffer = Sniffer.builder(client).build()) { diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 161abbd40a78b..a01ad694b9623 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -350,8 +350,8 @@ private static URI buildUri(String path, Map params) { /** * Returns a new {@link Builder} to help with {@link RestClient} creation. */ - public static Builder builder() { - return new Builder(); + public static Builder builder(HttpHost... hosts) { + return new Builder(hosts); } /** @@ -365,13 +365,19 @@ public static final class Builder { private static final Header[] EMPTY_HEADERS = new Header[0]; + private final HttpHost[] hosts; private CloseableHttpClient httpClient; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT; - private HttpHost[] hosts; private Header[] defaultHeaders = EMPTY_HEADERS; - private Builder() { - + /** + * Creates a new builder instance and sets the hosts that the client will send requests to. + */ + private Builder(HttpHost... hosts) { + if (hosts == null || hosts.length == 0) { + throw new IllegalArgumentException("no hosts provided"); + } + this.hosts = hosts; } /** @@ -399,17 +405,6 @@ public Builder setMaxRetryTimeout(int maxRetryTimeout) { return this; } - /** - * Sets the hosts that the client will send requests to. - */ - public Builder setHosts(HttpHost... hosts) { - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("no hosts provided"); - } - this.hosts = hosts; - return this; - } - /** * Sets the default request headers, to be used when creating the default http client instance. * In case the http client is set through {@link #setHttpClient(CloseableHttpClient)}, the default headers need to be @@ -431,9 +426,6 @@ public RestClient build() { if (httpClient == null) { httpClient = createDefaultHttpClient(null); } - if (hosts == null || hosts.length == 0) { - throw new IllegalArgumentException("no hosts provided"); - } return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts); } diff --git a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 3ea0fe14ed28d..75587ed8db19c 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -32,61 +32,53 @@ public class RestClientBuilderTests extends LuceneTestCase { public void testBuild() throws IOException { try { - RestClient.builder().setMaxRetryTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + RestClient.builder((HttpHost[])null); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "maxRetryTimeout must be greater than 0"); + assertEquals("no hosts provided", e.getMessage()); } try { - RestClient.builder().setHosts((HttpHost[])null); + RestClient.builder(); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); + assertEquals("no hosts provided", e.getMessage()); } try { - RestClient.builder().setHosts(); + RestClient.builder(new HttpHost[]{new HttpHost("localhost", 9200), null}).build(); fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); + } catch(NullPointerException e) { + assertEquals("host cannot be null", e.getMessage()); } try { - RestClient.builder().build(); + RestClient.builder(new HttpHost("localhost", 9200)).setMaxRetryTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "no hosts provided"); - } - - try { - RestClient.builder().setHosts(new HttpHost[]{new HttpHost("localhost", 9200), null}).build(); - fail("should have failed"); - } catch(NullPointerException e) { - assertEquals(e.getMessage(), "host cannot be null"); + assertEquals("maxRetryTimeout must be greater than 0", e.getMessage()); } try { - RestClient.builder().setDefaultHeaders(null); + RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(null); fail("should have failed"); } catch(NullPointerException e) { - assertEquals(e.getMessage(), "default headers must not be null"); + assertEquals("default headers must not be null", e.getMessage()); } try { - RestClient.builder().setDefaultHeaders(new Header[]{null}); + RestClient.builder(new HttpHost("localhost", 9200)).setDefaultHeaders(new Header[]{null}); fail("should have failed"); } catch(NullPointerException e) { - assertEquals(e.getMessage(), "default header must not be null"); + assertEquals("default header must not be null", e.getMessage()); } - RestClient.Builder builder = RestClient.builder(); int numNodes = RandomInts.randomIntBetween(random(), 1, 5); HttpHost[] hosts = new HttpHost[numNodes]; for (int i = 0; i < numNodes; i++) { hosts[i] = new HttpHost("localhost", 9200 + i); } - builder.setHosts(hosts); + RestClient.Builder builder = RestClient.builder(hosts); if (random().nextBoolean()) { builder.setHttpClient(HttpClientBuilder.create().build()); } diff --git a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java index 420ff6ed7f7d5..c5067dca63f52 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -77,8 +77,8 @@ public static void startHttpServer() throws Exception { String headerValue = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); defaultHeaders[i] = new BasicHeader(headerName, headerValue); } - restClient = RestClient.builder().setDefaultHeaders(defaultHeaders) - .setHosts(new HttpHost(httpServer.getAddress().getHostName(), httpServer.getAddress().getPort())).build(); + restClient = RestClient.builder(new HttpHost(httpServer.getAddress().getHostName(), httpServer.getAddress().getPort())) + .setDefaultHeaders(defaultHeaders).build(); } private static void createStatusCodeContext(HttpServer httpServer, final int statusCode) { diff --git a/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index d78a03509d085..7ea24cef8e363 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -87,7 +87,7 @@ public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Th for (int i = 0; i < numHosts; i++) { httpHosts[i] = new HttpHost("localhost", 9200 + i); } - restClient = RestClient.builder().setHosts(httpHosts).setHttpClient(httpClient).build(); + restClient = RestClient.builder(httpHosts).setHttpClient(httpClient).build(); failureListener = new TrackingFailureListener(); restClient.setFailureListener(failureListener); } diff --git a/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index c3c08bee62886..1112f38bbee00 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -122,7 +122,7 @@ public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Th defaultHeaders[i] = new BasicHeader(headerName, headerValue); } httpHost = new HttpHost("localhost", 9200); - restClient = RestClient.builder().setHosts(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders).build(); + restClient = RestClient.builder(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders).build(); failureListener = new TrackingFailureListener(); restClient.setFailureListener(failureListener); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java index ab25d25fac035..054da7664b5f9 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/ESIntegTestCase.java @@ -2068,7 +2068,7 @@ protected static RestClient createRestClient(CloseableHttpClient httpClient, Str hosts.add(new HttpHost(NetworkAddress.format(address.getAddress()), address.getPort(), protocol)); } } - RestClient.Builder builder = RestClient.builder().setHosts(hosts.toArray(new HttpHost[hosts.size()])); + RestClient.Builder builder = RestClient.builder(hosts.toArray(new HttpHost[hosts.size()])); if (httpClient != null) { builder.setHttpClient(httpClient); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java index 94f85422edf5b..d21e302168bfb 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java @@ -305,7 +305,7 @@ private static RestClient createRestClient(URL[] urls, Settings settings) throws hosts[i] = new HttpHost(url.getHost(), url.getPort(), protocol); } - RestClient.Builder builder = RestClient.builder().setHttpClient(httpClient).setHosts(hosts); + RestClient.Builder builder = RestClient.builder(hosts).setHttpClient(httpClient); try (ThreadContext threadContext = new ThreadContext(settings)) { Header[] defaultHeaders = new Header[threadContext.getHeaders().size()]; int i = 0; From be5e2e145b0390153b90041348c9350b83667f16 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 14:27:30 +0200 Subject: [PATCH 070/103] Decouple HostsSniffer from Sniffer Sniffer now requires a HostsSniffer instance as a constructor argument, HostsSniffer has its won Builder helper. Also synchronized accesses to scheduledExecutorService in SnifferTask. --- .../client/sniff/HostsSniffer.java | 78 +++++++++++++++- .../elasticsearch/client/sniff/Sniffer.java | 91 +++++++------------ .../sniff/HostsSnifferBuilderTests.java | 69 ++++++++++++++ .../client/sniff/HostsSnifferTests.java | 9 +- .../client/sniff/SnifferBuilderTests.java | 63 ++++++------- 5 files changed, 206 insertions(+), 104 deletions(-) create mode 100644 client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java index cc3abdfec7e49..340ff89459da7 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java @@ -36,6 +36,8 @@ import java.util.Collections; import java.util.List; import java.util.Map; +import java.util.Objects; +import java.util.concurrent.TimeUnit; /** * Class responsible for sniffing the http hosts from elasticsearch through the nodes info api and returning them back. @@ -47,14 +49,13 @@ public class HostsSniffer { private final RestClient restClient; private final Map sniffRequestParams; - private final String scheme; - private final JsonFactory jsonFactory; + private final Scheme scheme; + private final JsonFactory jsonFactory = new JsonFactory(); - public HostsSniffer(RestClient restClient, long sniffRequestTimeout, String scheme) { + protected HostsSniffer(RestClient restClient, long sniffRequestTimeout, Scheme scheme) { this.restClient = restClient; this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeout + "ms"); this.scheme = scheme; - this.jsonFactory = new JsonFactory(); } /** @@ -95,7 +96,7 @@ private List readHosts(HttpEntity entity) throws IOException { } } - private static HttpHost readHost(String nodeId, JsonParser parser, String scheme) throws IOException { + private static HttpHost readHost(String nodeId, JsonParser parser, Scheme scheme) throws IOException { HttpHost httpHost = null; String fieldName = null; while (parser.nextToken() != JsonToken.END_OBJECT) { @@ -124,4 +125,71 @@ private static HttpHost readHost(String nodeId, JsonParser parser, String scheme } return httpHost; } + + /** + * Returns a new {@link Builder} to help with {@link HostsSniffer} creation. + */ + public static Builder builder(RestClient restClient) { + return new Builder(restClient); + } + + public enum Scheme { + HTTP("http"), HTTPS("https"); + + private final String name; + + Scheme(String name) { + this.name = name; + } + + @Override + public String toString() { + return name; + } + } + + /** + * HostsSniffer builder. Helps creating a new {@link HostsSniffer}. + */ + public static class Builder { + public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); + + private final RestClient restClient; + private long sniffRequestTimeout = DEFAULT_SNIFF_REQUEST_TIMEOUT; + private Scheme scheme; + + private Builder(RestClient restClient) { + Objects.requireNonNull(restClient, "restClient cannot be null"); + this.restClient = restClient; + } + + /** + * Sets the sniff request timeout to be passed in as a query string parameter to elasticsearch. + * Allows to halt the request without any failure, as only the nodes that have responded + * within this timeout will be returned. + */ + public Builder setSniffRequestTimeout(int sniffRequestTimeout) { + if (sniffRequestTimeout <= 0) { + throw new IllegalArgumentException("sniffRequestTimeout must be greater than 0"); + } + this.sniffRequestTimeout = sniffRequestTimeout; + return this; + } + + /** + * Sets the scheme to associate sniffed nodes with (as it is not returned by elasticsearch) + */ + public Builder setScheme(Scheme scheme) { + Objects.requireNonNull(scheme, "scheme cannot be null"); + this.scheme = scheme; + return this; + } + + /** + * Creates a new {@link HostsSniffer} instance given the provided configuration + */ + public HostsSniffer build() { + return new HostsSniffer(restClient, sniffRequestTimeout, scheme); + } + } } diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 7ae6b0502e51e..cd5d1d93148c9 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -48,9 +48,8 @@ public final class Sniffer extends RestClient.FailureListener implements Closeab private final boolean sniffOnFailure; private final Task task; - private Sniffer(RestClient restClient, long sniffRequestTimeout, String scheme, long sniffInterval, - boolean sniffOnFailure, long sniffAfterFailureDelay) { - HostsSniffer hostsSniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme); + private Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, + boolean sniffOnFailure, long sniffAfterFailureDelay) { this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); this.sniffOnFailure = sniffOnFailure; restClient.setFailureListener(this); @@ -77,8 +76,7 @@ private static class Task implements Runnable { private final long sniffAfterFailureDelay; private final ScheduledExecutorService scheduledExecutorService; private final AtomicBoolean running = new AtomicBoolean(false); - private volatile long nextSniffDelay; - private volatile ScheduledFuture scheduledFuture; + private ScheduledFuture scheduledFuture; private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffInterval, long sniffAfterFailureDelay) { this.hostsSniffer = hostsSniffer; @@ -86,21 +84,34 @@ private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffInterva this.sniffInterval = sniffInterval; this.sniffAfterFailureDelay = sniffAfterFailureDelay; this.scheduledExecutorService = Executors.newScheduledThreadPool(1); - this.scheduledFuture = this.scheduledExecutorService.schedule(this, 0, TimeUnit.MILLISECONDS); - this.nextSniffDelay = sniffInterval; + scheduleNextRun(0); + } + + synchronized void scheduleNextRun(long delayMillis) { + if (scheduledExecutorService.isShutdown() == false) { + try { + if (scheduledFuture != null) { + //regardless of when the next sniff is scheduled, cancel it and schedule a new one with updated delay + this.scheduledFuture.cancel(false); + } + logger.debug("scheduling next sniff in " + delayMillis + " ms"); + this.scheduledFuture = this.scheduledExecutorService.schedule(this, delayMillis, TimeUnit.MILLISECONDS); + } catch(Throwable t) { + logger.error("error while scheduling next sniffer task", t); + } + } } @Override public void run() { - sniff(null); + sniff(null, sniffInterval); } void sniffOnFailure(HttpHost failedHost) { - this.nextSniffDelay = sniffAfterFailureDelay; - sniff(failedHost); + sniff(failedHost, sniffAfterFailureDelay); } - void sniff(HttpHost excludeHost) { + void sniff(HttpHost excludeHost, long nextSniffDelayMillis) { if (running.compareAndSet(false, true)) { try { List sniffedNodes = hostsSniffer.sniffHosts(); @@ -112,22 +123,13 @@ void sniff(HttpHost excludeHost) { } catch (Throwable t) { logger.error("error while sniffing nodes", t); } finally { - try { - //regardless of whether and when the next sniff is scheduled, cancel it and schedule a new one with updated delay - this.scheduledFuture.cancel(false); - logger.debug("scheduling next sniff in " + nextSniffDelay + " ms"); - this.scheduledFuture = this.scheduledExecutorService.schedule(this, nextSniffDelay, TimeUnit.MILLISECONDS); - } catch (Throwable t) { - logger.error("error while scheduling next sniffer task", t); - } finally { - this.nextSniffDelay = sniffInterval; - running.set(false); - } + scheduleNextRun(nextSniffDelayMillis); + running.set(false); } } } - void shutdown() { + synchronized void shutdown() { scheduledExecutorService.shutdown(); try { if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) { @@ -143,8 +145,8 @@ void shutdown() { /** * Returns a new {@link Builder} to help with {@link Sniffer} creation. */ - public static Builder builder(RestClient restClient) { - return new Builder(restClient); + public static Builder builder(RestClient restClient, HostsSniffer hostsSniffer) { + return new Builder(restClient, hostsSniffer); } /** @@ -153,21 +155,22 @@ public static Builder builder(RestClient restClient) { public static final class Builder { public static final long DEFAULT_SNIFF_INTERVAL = TimeUnit.MINUTES.toMillis(5); public static final long DEFAULT_SNIFF_AFTER_FAILURE_DELAY = TimeUnit.MINUTES.toMillis(1); - public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); private final RestClient restClient; - private long sniffRequestTimeout = DEFAULT_SNIFF_REQUEST_TIMEOUT; + private final HostsSniffer hostsSniffer; private long sniffInterval = DEFAULT_SNIFF_INTERVAL; private boolean sniffOnFailure = true; private long sniffAfterFailureDelay = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; - private String scheme = "http"; /** - * Creates a new builder instance and sets the {@link RestClient} that will be used to communicate with elasticsearch. + * Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch, + * and the */ - private Builder(RestClient restClient) { + private Builder(RestClient restClient, HostsSniffer hostsSniffer) { Objects.requireNonNull(restClient, "restClient cannot be null"); this.restClient = restClient; + Objects.requireNonNull(hostsSniffer, "hostsSniffer cannot be null"); + this.hostsSniffer = hostsSniffer; } /** @@ -203,37 +206,11 @@ public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { return this; } - /** - * Sets the sniff request timeout to be passed in as a query string parameter to elasticsearch. - * Allows to halt the request without any failure, as only the nodes that have responded - * within this timeout will be returned. - */ - public Builder setSniffRequestTimeout(int sniffRequestTimeout) { - if (sniffRequestTimeout <= 0) { - throw new IllegalArgumentException("sniffRequestTimeout must be greater than 0"); - } - this.sniffRequestTimeout = sniffRequestTimeout; - return this; - } - - /** - * Sets the scheme to be used for sniffed nodes. This information is not returned by elasticsearch, - * default is http but should be customized if https is needed/enabled. - */ - public Builder setScheme(String scheme) { - Objects.requireNonNull(scheme, "scheme cannot be null"); - if (scheme.equals("http") == false && scheme.equals("https") == false) { - throw new IllegalArgumentException("scheme must be either http or https"); - } - this.scheme = scheme; - return this; - } - /** * Creates the {@link Sniffer} based on the provided configuration. */ public Sniffer build() { - return new Sniffer(restClient, sniffRequestTimeout, scheme, sniffInterval, sniffOnFailure, sniffAfterFailureDelay); + return new Sniffer(restClient, hostsSniffer, sniffInterval, sniffOnFailure, sniffAfterFailureDelay); } } } diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java new file mode 100644 index 0000000000000..8f355ecc47ae5 --- /dev/null +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java @@ -0,0 +1,69 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import com.carrotsearch.randomizedtesting.generators.RandomInts; +import com.carrotsearch.randomizedtesting.generators.RandomPicks; +import org.apache.http.HttpHost; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.client.RestClient; + +public class HostsSnifferBuilderTests extends LuceneTestCase { + + public void testBuild() throws Exception { + try { + HostsSniffer.builder(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "restClient cannot be null"); + } + + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); + HttpHost[] hosts = new HttpHost[numNodes]; + for (int i = 0; i < numNodes; i++) { + hosts[i] = new HttpHost("localhost", 9200 + i); + } + + try (RestClient client = RestClient.builder(hosts).build()) { + try { + HostsSniffer.builder(client).setScheme(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals(e.getMessage(), "scheme cannot be null"); + } + + try { + HostsSniffer.builder(client).setSniffRequestTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + fail("should have failed"); + } catch(IllegalArgumentException e) { + assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); + } + + HostsSniffer.Builder builder = HostsSniffer.builder(client); + if (random().nextBoolean()) { + builder.setScheme(RandomPicks.randomFrom(random(), HostsSniffer.Scheme.values())); + } + if (random().nextBoolean()) { + builder.setSniffRequestTimeout(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + } + assertNotNull(builder.build()); + } + } +} diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index 826d7fb35f895..86602be81fe57 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -43,7 +43,6 @@ import java.net.InetSocketAddress; import java.net.URISyntaxException; import java.util.ArrayList; -import java.util.Arrays; import java.util.Collections; import java.util.HashMap; import java.util.HashSet; @@ -58,14 +57,14 @@ public class HostsSnifferTests extends LuceneTestCase { private int sniffRequestTimeout; - private String scheme; + private HostsSniffer.Scheme scheme; private SniffResponse sniffResponse; private HttpServer httpServer; @Before public void startHttpServer() throws IOException { this.sniffRequestTimeout = RandomInts.randomIntBetween(random(), 1000, 10000); - this.scheme = RandomPicks.randomFrom(random(), Arrays.asList("http", "https")); + this.scheme = RandomPicks.randomFrom(random(), HostsSniffer.Scheme.values()); if (rarely()) { this.sniffResponse = SniffResponse.buildFailure(); } else { @@ -132,7 +131,7 @@ public void handle(HttpExchange httpExchange) throws IOException { return httpServer; } - private static SniffResponse buildSniffResponse(String scheme) throws IOException { + private static SniffResponse buildSniffResponse(HostsSniffer.Scheme scheme) throws IOException { int numNodes = RandomInts.randomIntBetween(random(), 1, 5); List hosts = new ArrayList<>(numNodes); JsonFactory jsonFactory = new JsonFactory(); @@ -164,7 +163,7 @@ private static SniffResponse buildSniffResponse(String scheme) throws IOExceptio if (isHttpEnabled) { String host = "host" + i; int port = RandomInts.randomIntBetween(random(), 9200, 9299); - HttpHost httpHost = new HttpHost(host, port, scheme); + HttpHost httpHost = new HttpHost(host, port, scheme.toString()); hosts.add(httpHost); generator.writeObjectFieldStart("http"); if (random().nextBoolean()) { diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index c0bb9def96457..863830370b6a5 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -20,87 +20,65 @@ package org.elasticsearch.client.sniff; import com.carrotsearch.randomizedtesting.generators.RandomInts; -import com.carrotsearch.randomizedtesting.generators.RandomPicks; import org.apache.http.HttpHost; import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.RestClient; -import java.util.Arrays; -import java.util.logging.LogManager; +import java.io.IOException; +import java.util.Collections; +import java.util.List; public class SnifferBuilderTests extends LuceneTestCase { - static { - LogManager.getLogManager().reset(); - } - public void testBuild() throws Exception { int numNodes = RandomInts.randomIntBetween(random(), 1, 5); HttpHost[] hosts = new HttpHost[numNodes]; for (int i = 0; i < numNodes; i++) { hosts[i] = new HttpHost("localhost", 9200 + i); } - try (RestClient client = RestClient.builder(hosts).build()) { + HostsSniffer hostsSniffer = new MockHostsSniffer(); + + try (RestClient client = RestClient.builder(hosts).build()) { try { - Sniffer.builder(client).setScheme(null); + Sniffer.builder(null, hostsSniffer).build(); fail("should have failed"); } catch(NullPointerException e) { - assertEquals("scheme cannot be null", e.getMessage()); + assertEquals("restClient cannot be null", e.getMessage()); } try { - Sniffer.builder(client).setScheme("whatever"); + Sniffer.builder(client, null).build(); fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals("scheme must be either http or https", e.getMessage()); + } catch(NullPointerException e) { + assertEquals("hostsSniffer cannot be null", e.getMessage()); } try { - Sniffer.builder(client).setSniffInterval(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + Sniffer.builder(client, hostsSniffer).setSniffInterval(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("sniffInterval must be greater than 0", e.getMessage()); } try { - Sniffer.builder(client).setSniffRequestTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); - fail("should have failed"); - } catch(IllegalArgumentException e) { - assertEquals("sniffRequestTimeout must be greater than 0", e.getMessage()); - } - - try { - Sniffer.builder(client).setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + Sniffer.builder(client, hostsSniffer).setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("sniffAfterFailureDelay must be greater than 0", e.getMessage()); } - try { - Sniffer.builder(null).build(); - fail("should have failed"); - } catch(NullPointerException e) { - assertEquals("restClient cannot be null", e.getMessage()); - } - - try (Sniffer sniffer = Sniffer.builder(client).build()) { + try (Sniffer sniffer = Sniffer.builder(client, hostsSniffer).build()) { assertNotNull(sniffer); } - Sniffer.Builder builder = Sniffer.builder(client); - if (random().nextBoolean()) { - builder.setScheme(RandomPicks.randomFrom(random(), Arrays.asList("http", "https"))); - } + Sniffer.Builder builder = Sniffer.builder(client, hostsSniffer); if (random().nextBoolean()) { builder.setSniffInterval(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); } if (random().nextBoolean()) { builder.setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); } - if (random().nextBoolean()) { - builder.setSniffRequestTimeout(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); - } if (random().nextBoolean()) { builder.setSniffOnFailure(random().nextBoolean()); } @@ -109,4 +87,15 @@ public void testBuild() throws Exception { } } } + + private static class MockHostsSniffer extends HostsSniffer { + MockHostsSniffer() { + super(null, -1, null); + } + + @Override + public List sniffHosts() throws IOException { + return Collections.singletonList(new HttpHost("localhost", 9200)); + } + } } From 437c4f210bc3efe640938e463b5d3c3336ff1438 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 14:38:32 +0200 Subject: [PATCH 071/103] rename ElasticsearchResponse to Response and ElasticsearchResponseException to ResponseException --- .../client/sniff/HostsSniffer.java | 4 +-- .../client/sniff/HostsSnifferTests.java | 8 ++--- ...asticsearchResponse.java => Response.java} | 6 ++-- ...eException.java => ResponseException.java} | 22 ++++++------- .../org/elasticsearch/client/RestClient.java | 12 +++---- .../client/RestClientIntegTests.java | 16 +++++----- .../client/RestClientMultipleHostsTests.java | 32 +++++++++---------- .../client/RestClientSingleHostTests.java | 24 +++++++------- .../http/netty/NettyHttpCompressionIT.java | 10 +++--- .../DetailedErrorsDisabledIT.java | 8 ++--- .../DetailedErrorsEnabledIT.java | 12 +++---- .../plugins/ResponseHeaderPluginIT.java | 10 +++--- .../org/elasticsearch/rest/CorsNotSetIT.java | 6 ++-- .../org/elasticsearch/rest/CorsRegexIT.java | 24 +++++++------- .../rest/action/main/RestMainActionIT.java | 6 ++-- .../ContextAndHeaderTransportIT.java | 4 +-- .../test/rest/ESRestTestCase.java | 6 ++-- .../test/rest/RestTestExecutionContext.java | 4 +-- .../test/rest/client/RestTestClient.java | 16 +++++----- .../test/rest/client/RestTestResponse.java | 12 +++---- .../test/rest/section/DoSection.java | 6 ++-- 21 files changed, 124 insertions(+), 124 deletions(-) rename client/src/main/java/org/elasticsearch/client/{ElasticsearchResponse.java => Response.java} (94%) rename client/src/main/java/org/elasticsearch/client/{ElasticsearchResponseException.java => ResponseException.java} (70%) diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java index 340ff89459da7..3da6e2b95cd99 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java @@ -26,7 +26,7 @@ import org.apache.commons.logging.LogFactory; import org.apache.http.HttpEntity; import org.apache.http.HttpHost; -import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import java.io.IOException; @@ -62,7 +62,7 @@ protected HostsSniffer(RestClient restClient, long sniffRequestTimeout, Scheme s * Calls the elasticsearch nodes info api, parses the response and returns all the found http hosts */ public List sniffHosts() throws IOException { - try (ElasticsearchResponse response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams, null)) { + try (Response response = restClient.performRequest("get", "/_nodes/http", sniffRequestParams, null)) { return readHosts(response.getEntity()); } } diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index 86602be81fe57..d714e89eeb304 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -31,8 +31,8 @@ import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; import org.apache.lucene.util.LuceneTestCase; -import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.junit.After; import org.junit.Before; @@ -93,8 +93,8 @@ public void testSniffNodes() throws IOException, URISyntaxException { for (HttpHost sniffedHost : sniffedHosts) { assertEquals(sniffedHost, responseHostsIterator.next()); } - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); if (sniffResponse.isFailure) { assertThat(e.getMessage(), containsString("GET " + httpHost + "/_nodes/http?timeout=" + sniffRequestTimeout + "ms")); assertThat(e.getMessage(), containsString(Integer.toString(sniffResponse.nodesInfoResponseCode))); diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java b/client/src/main/java/org/elasticsearch/client/Response.java similarity index 94% rename from client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java rename to client/src/main/java/org/elasticsearch/client/Response.java index 037f7412f66e9..f7685b27bb94c 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponse.java +++ b/client/src/main/java/org/elasticsearch/client/Response.java @@ -35,13 +35,13 @@ * its corresponding {@link RequestLine} and {@link HttpHost}. * It must be closed to free any resource held by it, as well as the corresponding connection in the connection pool. */ -public class ElasticsearchResponse implements Closeable { +public class Response implements Closeable { private final RequestLine requestLine; private final HttpHost host; private final CloseableHttpResponse response; - ElasticsearchResponse(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) { + Response(RequestLine requestLine, HttpHost host, CloseableHttpResponse response) { Objects.requireNonNull(requestLine, "requestLine cannot be null"); Objects.requireNonNull(host, "node cannot be null"); Objects.requireNonNull(response, "response cannot be null"); @@ -101,7 +101,7 @@ public HttpEntity getEntity() { @Override public String toString() { - return "ElasticsearchResponse{" + + return "Response{" + "requestLine=" + requestLine + ", host=" + host + ", response=" + response.getStatusLine() + diff --git a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java b/client/src/main/java/org/elasticsearch/client/ResponseException.java similarity index 70% rename from client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java rename to client/src/main/java/org/elasticsearch/client/ResponseException.java index 2e9d42797de3c..44f59cce7db94 100644 --- a/client/src/main/java/org/elasticsearch/client/ElasticsearchResponseException.java +++ b/client/src/main/java/org/elasticsearch/client/ResponseException.java @@ -23,21 +23,21 @@ /** * Exception thrown when an elasticsearch node responds to a request with a status code that indicates an error. - * Note that the response body gets passed in as a string and read eagerly, which means that the ElasticsearchResponse object + * Note that the response body gets passed in as a string and read eagerly, which means that the Response object * is expected to be closed and available only to read metadata like status line, request line, response headers. */ -public class ElasticsearchResponseException extends IOException { +public class ResponseException extends IOException { - private ElasticsearchResponse elasticsearchResponse; + private Response response; private final String responseBody; - ElasticsearchResponseException(ElasticsearchResponse elasticsearchResponse, String responseBody) throws IOException { - super(buildMessage(elasticsearchResponse,responseBody)); - this.elasticsearchResponse = elasticsearchResponse; + ResponseException(Response response, String responseBody) throws IOException { + super(buildMessage(response,responseBody)); + this.response = response; this.responseBody = responseBody; } - private static String buildMessage(ElasticsearchResponse response, String responseBody) { + private static String buildMessage(Response response, String responseBody) { String message = response.getRequestLine().getMethod() + " " + response.getHost() + response.getRequestLine().getUri() + ": " + response.getStatusLine().toString(); if (responseBody != null) { @@ -47,17 +47,17 @@ private static String buildMessage(ElasticsearchResponse response, String respon } /** - * Returns the {@link ElasticsearchResponse} that caused this exception to be thrown. + * Returns the {@link Response} that caused this exception to be thrown. * Expected to be used only to read metadata like status line, request line, response headers. The response body should * be retrieved using {@link #getResponseBody()} */ - public ElasticsearchResponse getElasticsearchResponse() { - return elasticsearchResponse; + public Response getResponse() { + return response; } /** * Returns the response body as a string or null if there wasn't any. - * The body is eagerly consumed when an ElasticsearchResponseException gets created, and its corresponding ElasticsearchResponse + * The body is eagerly consumed when an ResponseException gets created, and its corresponding Response * gets closed straightaway so this method is the only way to get back the response body that was returned. */ public String getResponseBody() { diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index a01ad694b9623..f0e4d702f5748 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -131,10 +131,10 @@ public synchronized void setHosts(HttpHost... hosts) { * @return the response returned by elasticsearch * @throws IOException in case of a problem or the connection was aborted * @throws ClientProtocolException in case of an http protocol error - * @throws ElasticsearchResponseException in case elasticsearch responded with a status code that indicated an error + * @throws ResponseException in case elasticsearch responded with a status code that indicated an error */ - public ElasticsearchResponse performRequest(String method, String endpoint, Map params, - HttpEntity entity, Header... headers) throws IOException { + public Response performRequest(String method, String endpoint, Map params, + HttpEntity entity, Header... headers) throws IOException { URI uri = buildUri(endpoint, params); HttpRequestBase request = createHttpRequest(method, uri, entity); setHeaders(request, headers); @@ -167,7 +167,7 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< lastSeenException = addSuppressedException(lastSeenException, e); continue; } - ElasticsearchResponse elasticsearchResponse = new ElasticsearchResponse(request.getRequestLine(), host, response); + Response elasticsearchResponse = new Response(request.getRequestLine(), host, response); int statusCode = response.getStatusLine().getStatusCode(); if (statusCode < 300 || (request.getMethod().equals(HttpHead.METHOD_NAME) && statusCode == 404) ) { RequestLogger.log(logger, "request succeeded", request, host, response); @@ -185,9 +185,9 @@ public ElasticsearchResponse performRequest(String method, String endpoint, Map< } finally { elasticsearchResponse.close(); } - ElasticsearchResponseException elasticsearchResponseException = new ElasticsearchResponseException( + ResponseException responseException = new ResponseException( elasticsearchResponse, responseBody); - lastSeenException = addSuppressedException(lastSeenException, elasticsearchResponseException); + lastSeenException = addSuppressedException(lastSeenException, responseException); //clients don't retry on 500 because elasticsearch still misuses it instead of 400 in some places if (statusCode == 502 || statusCode == 503 || statusCode == 504) { onFailure(host); diff --git a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java index c5067dca63f52..4dadfc1a918dd 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -144,12 +144,12 @@ public void testHeaders() throws Exception { } int statusCode = randomStatusCode(random()); - ElasticsearchResponse esResponse; - try (ElasticsearchResponse response = restClient.performRequest(method, "/" + statusCode, + Response esResponse; + try (Response response = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), null, headers)) { esResponse = response; - } catch(ElasticsearchResponseException e) { - esResponse = e.getElasticsearchResponse(); + } catch(ResponseException e) { + esResponse = e.getResponse(); } assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); for (Header responseHeader : esResponse.getHeaders()) { @@ -187,16 +187,16 @@ public void testGetWithBody() throws Exception { private void testBody(String method) throws Exception { String requestBody = "{ \"field\": \"value\" }"; StringEntity entity = new StringEntity(requestBody); - ElasticsearchResponse esResponse; + Response esResponse; String responseBody; int statusCode = randomStatusCode(random()); - try (ElasticsearchResponse response = restClient.performRequest(method, "/" + statusCode, + try (Response response = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), entity)) { responseBody = EntityUtils.toString(response.getEntity()); esResponse = response; - } catch(ElasticsearchResponseException e) { + } catch(ResponseException e) { responseBody = e.getResponseBody(); - esResponse = e.getElasticsearchResponse(); + esResponse = e.getResponse(); } assertEquals(statusCode, esResponse.getStatusLine().getStatusCode()); assertEquals(requestBody, responseBody); diff --git a/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index 7ea24cef8e363..64792c2bb1b86 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -102,7 +102,7 @@ public void testRoundRobinOkStatusCodes() throws Exception { Collections.addAll(hostsSet, httpHosts); for (int j = 0; j < httpHosts.length; j++) { int statusCode = randomOkStatusCode(random()); - try (ElasticsearchResponse response = restClient.performRequest(randomHttpMethod(random()), "/" + statusCode, + try (Response response = restClient.performRequest(randomHttpMethod(random()), "/" + statusCode, Collections.emptyMap(), null)) { assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); @@ -121,7 +121,7 @@ public void testRoundRobinNoRetryErrors() throws Exception { for (int j = 0; j < httpHosts.length; j++) { String method = randomHttpMethod(random()); int statusCode = randomErrorNoRetryStatusCode(random()); - try (ElasticsearchResponse response = restClient.performRequest(method, "/" + statusCode, + try (Response response = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), null)) { if (method.equals("HEAD") && statusCode == 404) { //no exception gets thrown although we got a 404 @@ -131,11 +131,11 @@ public void testRoundRobinNoRetryErrors() throws Exception { } else { fail("request should have failed"); } - } catch(ElasticsearchResponseException e) { + } catch(ResponseException e) { if (method.equals("HEAD") && statusCode == 404) { throw e; } - ElasticsearchResponse response = e.getElasticsearchResponse(); + Response response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); assertTrue("host not found: " + response.getHost(), hostsSet.remove(response.getHost())); assertEquals(0, e.getSuppressed().length); @@ -151,21 +151,21 @@ public void testRoundRobinRetryErrors() throws Exception { try { restClient.performRequest(randomHttpMethod(random()), retryEndpoint, Collections.emptyMap(), null); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { + } catch(ResponseException e) { Set hostsSet = new HashSet<>(); Collections.addAll(hostsSet, httpHosts); //first request causes all the hosts to be blacklisted, the returned exception holds one suppressed exception each failureListener.assertCalled(httpHosts); do { - ElasticsearchResponse response = e.getElasticsearchResponse(); + Response response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times", hostsSet.remove(response.getHost())); if (e.getSuppressed().length > 0) { assertEquals(1, e.getSuppressed().length); Throwable suppressed = e.getSuppressed()[0]; - assertThat(suppressed, instanceOf(ElasticsearchResponseException.class)); - e = (ElasticsearchResponseException)suppressed; + assertThat(suppressed, instanceOf(ResponseException.class)); + e = (ResponseException)suppressed; } else { e = null; } @@ -201,8 +201,8 @@ public void testRoundRobinRetryErrors() throws Exception { try { restClient.performRequest(randomHttpMethod(random()), retryEndpoint, Collections.emptyMap(), null); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); assertTrue("host [" + response.getHost() + "] not found, most likely used multiple times", hostsSet.remove(response.getHost())); @@ -224,13 +224,13 @@ public void testRoundRobinRetryErrors() throws Exception { int iters = RandomInts.randomIntBetween(random(), 2, 10); for (int y = 0; y < iters; y++) { int statusCode = randomErrorNoRetryStatusCode(random()); - ElasticsearchResponse response; - try (ElasticsearchResponse esResponse = restClient.performRequest(randomHttpMethod(random()), "/" + statusCode, + Response response; + try (Response esResponse = restClient.performRequest(randomHttpMethod(random()), "/" + statusCode, Collections.emptyMap(), null)) { response = esResponse; } - catch(ElasticsearchResponseException e) { - response = e.getElasticsearchResponse(); + catch(ResponseException e) { + response = e.getResponse(); } assertThat(response.getStatusLine().getStatusCode(), equalTo(statusCode)); if (selectedHost == null) { @@ -248,8 +248,8 @@ public void testRoundRobinRetryErrors() throws Exception { restClient.performRequest(randomHttpMethod(random()), retryEndpoint, Collections.emptyMap(), null); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(Integer.parseInt(retryEndpoint.substring(1)))); assertThat(response.getHost(), equalTo(selectedHost)); failureListener.assertCalled(selectedHost); diff --git a/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index 1112f38bbee00..e265772270134 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -183,7 +183,7 @@ public void testSetNodes() throws IOException { public void testOkStatusCodes() throws Exception { for (String method : getHttpMethods()) { for (int okStatusCode : getOkStatusCodes()) { - ElasticsearchResponse response = restClient.performRequest(method, "/" + okStatusCode, + Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.emptyMap(), null); assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); } @@ -198,7 +198,7 @@ public void testErrorStatusCodes() throws Exception { for (String method : getHttpMethods()) { //error status codes should cause an exception to be thrown for (int errorStatusCode : getAllErrorStatusCodes()) { - try (ElasticsearchResponse response = restClient.performRequest(method, "/" + errorStatusCode, + try (Response response = restClient.performRequest(method, "/" + errorStatusCode, Collections.emptyMap(), null)) { if (method.equals("HEAD") && errorStatusCode == 404) { //no exception gets thrown although we got a 404 @@ -206,11 +206,11 @@ public void testErrorStatusCodes() throws Exception { } else { fail("request should have failed"); } - } catch(ElasticsearchResponseException e) { + } catch(ResponseException e) { if (method.equals("HEAD") && errorStatusCode == 404) { throw e; } - assertThat(e.getElasticsearchResponse().getStatusLine().getStatusCode(), equalTo(errorStatusCode)); + assertThat(e.getResponse().getStatusLine().getStatusCode(), equalTo(errorStatusCode)); } if (errorStatusCode <= 500) { failureListener.assertNotCalled(); @@ -250,7 +250,7 @@ public void testBody() throws Exception { StringEntity entity = new StringEntity(body); for (String method : Arrays.asList("DELETE", "GET", "PATCH", "POST", "PUT")) { for (int okStatusCode : getOkStatusCodes()) { - try (ElasticsearchResponse response = restClient.performRequest(method, "/" + okStatusCode, + try (Response response = restClient.performRequest(method, "/" + okStatusCode, Collections.emptyMap(), entity)) { assertThat(response.getStatusLine().getStatusCode(), equalTo(okStatusCode)); assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); @@ -260,8 +260,8 @@ public void testBody() throws Exception { try { restClient.performRequest(method, "/" + errorStatusCode, Collections.emptyMap(), entity); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(errorStatusCode)); assertThat(EntityUtils.toString(response.getEntity()), equalTo(body)); } @@ -326,12 +326,12 @@ public void testHeaders() throws Exception { } int statusCode = randomStatusCode(random()); - ElasticsearchResponse esResponse; - try (ElasticsearchResponse response = restClient.performRequest(method, "/" + statusCode, + Response esResponse; + try (Response response = restClient.performRequest(method, "/" + statusCode, Collections.emptyMap(), null, headers)) { esResponse = response; - } catch(ElasticsearchResponseException e) { - esResponse = e.getElasticsearchResponse(); + } catch(ResponseException e) { + esResponse = e.getResponse(); } assertThat(esResponse.getStatusLine().getStatusCode(), equalTo(statusCode)); for (Header responseHeader : esResponse.getHeaders()) { @@ -413,7 +413,7 @@ private HttpUriRequest performRandomRequest(String method) throws IOException, U try { restClient.performRequest(method, uriAsString, params, entity, headers); - } catch(ElasticsearchResponseException e) { + } catch(ResponseException e) { //all good } return request; diff --git a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java index fb616a87810ba..4e69624376d00 100644 --- a/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java +++ b/core/src/test/java/org/elasticsearch/http/netty/NettyHttpCompressionIT.java @@ -27,7 +27,7 @@ import org.apache.http.impl.client.HttpClients; import org.apache.http.message.BasicHeader; import org.apache.http.protocol.HttpContext; -import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.Response; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; @@ -62,7 +62,7 @@ public void testCompressesResponseIfRequested() throws Exception { // we need to intercept early, otherwise internal logic in HttpClient will just remove the header and we cannot verify it ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); try (RestClient client = createRestClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { - try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null, + try (Response response = client.performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader(HttpHeaders.ACCEPT_ENCODING, GZIP_ENCODING))) { assertEquals(200, response.getStatusLine().getStatusCode()); assertTrue(headerExtractor.hasContentEncodingHeader()); @@ -76,7 +76,7 @@ public void testUncompressedResponseByDefault() throws Exception { ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); CloseableHttpClient httpClient = HttpClients.custom().disableContentCompression().addInterceptorFirst(headerExtractor).build(); try (RestClient client = createRestClient(httpClient)) { - try (ElasticsearchResponse response = client.performRequest("GET", "/", Collections.emptyMap(), null)) { + try (Response response = client.performRequest("GET", "/", Collections.emptyMap(), null)) { assertEquals(200, response.getStatusLine().getStatusCode()); assertFalse(headerExtractor.hasContentEncodingHeader()); } @@ -89,7 +89,7 @@ public void testCanInterpretUncompressedRequest() throws Exception { // this disable content compression in both directions (request and response) CloseableHttpClient httpClient = HttpClients.custom().disableContentCompression().addInterceptorFirst(headerExtractor).build(); try (RestClient client = createRestClient(httpClient)) { - try (ElasticsearchResponse response = client.performRequest("POST", "/company/employees/1", + try (Response response = client.performRequest("POST", "/company/employees/1", Collections.emptyMap(), SAMPLE_DOCUMENT)) { assertEquals(201, response.getStatusLine().getStatusCode()); assertFalse(headerExtractor.hasContentEncodingHeader()); @@ -102,7 +102,7 @@ public void testCanInterpretCompressedRequest() throws Exception { ContentEncodingHeaderExtractor headerExtractor = new ContentEncodingHeaderExtractor(); // we don't call #disableContentCompression() hence the client will send the content compressed try (RestClient client = createRestClient(HttpClients.custom().addInterceptorFirst(headerExtractor).build())) { - try (ElasticsearchResponse response = client.performRequest("POST", "/company/employees/2", + try (Response response = client.performRequest("POST", "/company/employees/2", Collections.emptyMap(), SAMPLE_DOCUMENT)) { assertEquals(201, response.getStatusLine().getStatusCode()); assertEquals(GZIP_ENCODING, headerExtractor.getContentEncodingHeader().getValue()); diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java index fb38f774c3d1f..21b8706abfba8 100644 --- a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java +++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsDisabledIT.java @@ -19,8 +19,8 @@ package org.elasticsearch.options.detailederrors; -import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.http.HttpTransportSettings; @@ -51,8 +51,8 @@ public void testThatErrorTraceParamReturns400() throws Exception { try { getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"), null); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); assertThat(response.getHeader("Content-Type"), is("application/json")); assertThat(e.getResponseBody(), is("{\"error\":\"error traces in responses are disabled.\"}")); assertThat(response.getStatusLine().getStatusCode(), is(400)); diff --git a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java index 39aeb00d05d1a..269dc72b25356 100644 --- a/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java +++ b/core/src/test/java/org/elasticsearch/options/detailederrors/DetailedErrorsEnabledIT.java @@ -19,8 +19,8 @@ package org.elasticsearch.options.detailederrors; -import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; @@ -49,8 +49,8 @@ public void testThatErrorTraceWorksByDefault() throws Exception { try { getRestClient().performRequest("DELETE", "/", Collections.singletonMap("error_trace", "true"), null); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); assertThat(response.getHeader("Content-Type"), containsString("application/json")); assertThat(e.getResponseBody(), containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " + "nested: ActionRequestValidationException[Validation Failed: 1:")); @@ -59,8 +59,8 @@ public void testThatErrorTraceWorksByDefault() throws Exception { try { getRestClient().performRequest("DELETE", "/", Collections.emptyMap(), null); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); assertThat(response.getHeader("Content-Type"), containsString("application/json")); assertThat(e.getResponseBody(), not(containsString("\"stack_trace\":\"[Validation Failed: 1: index / indices is missing;]; " + "nested: ActionRequestValidationException[Validation Failed: 1:"))); diff --git a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java index 6b4fdee47c694..6a3513416f805 100644 --- a/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java +++ b/core/src/test/java/org/elasticsearch/plugins/ResponseHeaderPluginIT.java @@ -19,8 +19,8 @@ package org.elasticsearch.plugins; import org.apache.http.message.BasicHeader; -import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.plugins.responseheader.TestResponseHeaderPlugin; import org.elasticsearch.test.ESIntegTestCase; @@ -55,13 +55,13 @@ public void testThatSettingHeadersWorks() throws Exception { try { getRestClient().performRequest("GET", "/_protected", Collections.emptyMap(), null); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); assertThat(response.getStatusLine().getStatusCode(), equalTo(401)); assertThat(response.getHeader("Secret"), equalTo("required")); } - try (ElasticsearchResponse authResponse = getRestClient().performRequest("GET", "/_protected", Collections.emptyMap(), null, + try (Response authResponse = getRestClient().performRequest("GET", "/_protected", Collections.emptyMap(), null, new BasicHeader("Secret", "password"))) { assertThat(authResponse.getStatusLine().getStatusCode(), equalTo(200)); assertThat(authResponse.getHeader("Secret"), equalTo("granted")); diff --git a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java index 2a38f3bcd3acb..5f272d3e7e223 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsNotSetIT.java @@ -20,7 +20,7 @@ package org.elasticsearch.rest; import org.apache.http.message.BasicHeader; -import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.Response; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; @@ -46,7 +46,7 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws Exception { String corsValue = "http://localhost:9200"; - try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, + try (Response response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue))) { assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); @@ -55,7 +55,7 @@ public void testCorsSettingDefaultBehaviourDoesNotReturnAnything() throws Except } public void testThatOmittingCorsHeaderDoesNotReturnAnything() throws Exception { - try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { + try (Response response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); assertThat(response.getHeader("Access-Control-Allow-Credentials"), nullValue()); diff --git a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java index 4da0b675d02d3..e6c85553231dc 100644 --- a/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java +++ b/core/src/test/java/org/elasticsearch/rest/CorsRegexIT.java @@ -19,8 +19,8 @@ package org.elasticsearch.rest; import org.apache.http.message.BasicHeader; -import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.network.NetworkModule; @@ -61,12 +61,12 @@ protected Settings nodeSettings(int nodeOrdinal) { public void testThatRegularExpressionWorksOnMatch() throws Exception { String corsValue = "http://localhost:9200"; - try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, + try (Response response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue))) { assertResponseWithOriginheader(response, corsValue); } corsValue = "https://localhost:9200"; - try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, + try (Response response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue));) { assertResponseWithOriginheader(response, corsValue); assertThat(response.getHeader("Access-Control-Allow-Credentials"), is("true")); @@ -78,8 +78,8 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws Excepti getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", "http://evil-host:9200")); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); // a rejected origin gets a FORBIDDEN - 403 assertThat(response.getStatusLine().getStatusCode(), is(403)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); @@ -87,7 +87,7 @@ public void testThatRegularExpressionReturnsForbiddenOnNonMatch() throws Excepti } public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws Exception { - try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, + try (Response response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"))) { assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); @@ -95,7 +95,7 @@ public void testThatSendingNoOriginHeaderReturnsNoAccessControlHeader() throws E } public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() throws Exception { - try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { + try (Response response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); } @@ -103,7 +103,7 @@ public void testThatRegularExpressionIsNotAppliedWithoutCorrectBrowserOnMatch() public void testThatPreFlightRequestWorksOnMatch() throws Exception { String corsValue = "http://localhost:9200"; - try (ElasticsearchResponse response = getRestClient().performRequest("OPTIONS", "/", Collections.emptyMap(), null, + try (Response response = getRestClient().performRequest("OPTIONS", "/", Collections.emptyMap(), null, new BasicHeader("User-Agent", "Mozilla Bar"), new BasicHeader("Origin", corsValue), new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET"));) { assertResponseWithOriginheader(response, corsValue); @@ -117,8 +117,8 @@ public void testThatPreFlightRequestReturnsNullOnNonMatch() throws Exception { new BasicHeader("Origin", "http://evil-host:9200"), new BasicHeader(HttpHeaders.Names.ACCESS_CONTROL_REQUEST_METHOD, "GET")); fail("request should have failed"); - } catch(ElasticsearchResponseException e) { - ElasticsearchResponse response = e.getElasticsearchResponse(); + } catch(ResponseException e) { + Response response = e.getResponse(); // a rejected origin gets a FORBIDDEN - 403 assertThat(response.getStatusLine().getStatusCode(), is(403)); assertThat(response.getHeader("Access-Control-Allow-Origin"), nullValue()); @@ -126,7 +126,7 @@ public void testThatPreFlightRequestReturnsNullOnNonMatch() throws Exception { } } - protected static void assertResponseWithOriginheader(ElasticsearchResponse response, String expectedCorsHeader) { + protected static void assertResponseWithOriginheader(Response response, String expectedCorsHeader) { assertThat(response.getStatusLine().getStatusCode(), is(200)); assertThat(response.getHeader("Access-Control-Allow-Origin"), is(expectedCorsHeader)); } diff --git a/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java b/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java index edf9b5b2c0633..72f8733292c22 100644 --- a/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java +++ b/core/src/test/java/org/elasticsearch/rest/action/main/RestMainActionIT.java @@ -19,7 +19,7 @@ package org.elasticsearch.rest.action.main; import org.apache.http.util.EntityUtils; -import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.Response; import org.elasticsearch.common.network.NetworkModule; import org.elasticsearch.common.settings.Settings; import org.elasticsearch.test.ESIntegTestCase; @@ -40,14 +40,14 @@ protected Settings nodeSettings(int nodeOrdinal) { } public void testHeadRequest() throws IOException { - try (ElasticsearchResponse response = getRestClient().performRequest("HEAD", "/", Collections.emptyMap(), null)) { + try (Response response = getRestClient().performRequest("HEAD", "/", Collections.emptyMap(), null)) { assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); assertNull(response.getEntity()); } } public void testGetRequest() throws IOException { - try (ElasticsearchResponse response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { + try (Response response = getRestClient().performRequest("GET", "/", Collections.emptyMap(), null)) { assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); assertNotNull(response.getEntity()); assertThat(EntityUtils.toString(response.getEntity()), containsString("cluster_name")); diff --git a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java index 9d25414f4d8e6..4af8b4dd701ca 100644 --- a/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java +++ b/core/src/test/java/org/elasticsearch/transport/ContextAndHeaderTransportIT.java @@ -32,7 +32,7 @@ import org.elasticsearch.action.support.ActionFilter; import org.elasticsearch.action.termvectors.MultiTermVectorsRequest; import org.elasticsearch.client.Client; -import org.elasticsearch.client.ElasticsearchResponse; +import org.elasticsearch.client.Response; import org.elasticsearch.common.inject.AbstractModule; import org.elasticsearch.common.inject.Inject; import org.elasticsearch.common.inject.Module; @@ -218,7 +218,7 @@ public void testThatRelevantHttpHeadersBecomeRequestHeaders() throws Exception { restController.registerRelevantHeaders(relevantHeaderName); } - try (ElasticsearchResponse response = getRestClient().performRequest( + try (Response response = getRestClient().performRequest( "GET", "/" + queryIndex + "/_search", Collections.emptyMap(), null, new BasicHeader(randomHeaderKey, randomHeaderValue), new BasicHeader(relevantHeaderName, randomHeaderValue))) { assertThat(response.getStatusLine().getStatusCode(), equalTo(200)); diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java index c27d3ed560474..e7fb35cabea59 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/ESRestTestCase.java @@ -22,7 +22,7 @@ import com.carrotsearch.randomizedtesting.RandomizedTest; import org.apache.lucene.util.IOUtils; import org.elasticsearch.action.admin.cluster.node.tasks.list.ListTasksAction; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.SuppressForbidden; import org.elasticsearch.common.settings.Settings; @@ -276,9 +276,9 @@ public void wipeCluster() throws Exception { deleteIndicesArgs.put("index", "*"); try { adminExecutionContext.callApi("indices.delete", deleteIndicesArgs, Collections.emptyList(), Collections.emptyMap()); - } catch (ElasticsearchResponseException e) { + } catch (ResponseException e) { // 404 here just means we had no indexes - if (e.getElasticsearchResponse().getStatusLine().getStatusCode() != 404) { + if (e.getResponse().getStatusLine().getStatusCode() != 404) { throw e; } } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java index 1fa3dba5d83ef..34397f03d9419 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/RestTestExecutionContext.java @@ -19,7 +19,7 @@ package org.elasticsearch.test.rest; import org.elasticsearch.Version; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.logging.ESLogger; import org.elasticsearch.common.logging.Loggers; import org.elasticsearch.common.settings.Settings; @@ -78,7 +78,7 @@ public RestTestResponse callApi(String apiName, Map params, List //we always stash the last response body stash.stashResponse(response); return response; - } catch(ElasticsearchResponseException e) { + } catch(ResponseException e) { response = new RestTestResponse(e); throw e; } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java index d21e302168bfb..6a9dd8885049c 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestClient.java @@ -33,8 +33,8 @@ import org.apache.http.ssl.SSLContexts; import org.apache.lucene.util.IOUtils; import org.elasticsearch.Version; -import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; import org.elasticsearch.common.Strings; import org.elasticsearch.common.io.PathUtils; @@ -106,8 +106,8 @@ private Version readAndCheckVersion(URL[] urls) throws IOException { //we don't really use the urls here, we rely on the client doing round-robin to touch all the nodes in the cluster String method = restApi.getMethods().get(0); String endpoint = restApi.getPaths().get(0); - ElasticsearchResponse elasticsearchResponse = restClient.performRequest(method, endpoint, Collections.emptyMap(), null); - RestTestResponse restTestResponse = new RestTestResponse(elasticsearchResponse); + Response response = restClient.performRequest(method, endpoint, Collections.emptyMap(), null); + RestTestResponse restTestResponse = new RestTestResponse(response); Object latestVersion = restTestResponse.evaluate("version.number"); if (latestVersion == null) { throw new RuntimeException("elasticsearch version not found in the response"); @@ -143,7 +143,7 @@ public RestTestResponse callApi(String apiName, Map params, Stri entity = new StringEntity(body, RestClient.JSON_CONTENT_TYPE); } // And everything else is a url parameter! - ElasticsearchResponse response = restClient.performRequest(method, path, queryStringParams, entity); + Response response = restClient.performRequest(method, path, queryStringParams, entity); return new RestTestResponse(response); } @@ -247,11 +247,11 @@ public RestTestResponse callApi(String apiName, Map params, Stri logger.debug("calling api [{}]", apiName); try { - ElasticsearchResponse response = restClient.performRequest(requestMethod, requestPath, + Response response = restClient.performRequest(requestMethod, requestPath, queryStringParams, requestBody, requestHeaders); return new RestTestResponse(response); - } catch(ElasticsearchResponseException e) { - if (ignores.contains(e.getElasticsearchResponse().getStatusLine().getStatusCode())) { + } catch(ResponseException e) { + if (ignores.contains(e.getResponse().getStatusLine().getStatusCode())) { return new RestTestResponse(e); } throw e; diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java index 5406fb7944344..5b5773d6fdc5f 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/client/RestTestResponse.java @@ -21,8 +21,8 @@ import org.apache.http.client.methods.HttpHead; import org.apache.http.util.EntityUtils; import org.apache.lucene.util.IOUtils; -import org.elasticsearch.client.ElasticsearchResponse; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.Response; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.test.rest.Stash; import org.elasticsearch.test.rest.json.JsonPath; @@ -35,11 +35,11 @@ */ public class RestTestResponse { - private final ElasticsearchResponse response; + private final Response response; private final String body; private JsonPath parsedResponse; - public RestTestResponse(ElasticsearchResponse response) { + public RestTestResponse(Response response) { this.response = response; if (response.getEntity() != null) { try { @@ -55,8 +55,8 @@ public RestTestResponse(ElasticsearchResponse response) { } } - public RestTestResponse(ElasticsearchResponseException responseException) { - this.response = responseException.getElasticsearchResponse(); + public RestTestResponse(ResponseException responseException) { + this.response = responseException.getResponse(); this.body = responseException.getResponseBody(); } diff --git a/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java b/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java index 76593d4af9daf..2547d6beceac6 100644 --- a/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java +++ b/test/framework/src/main/java/org/elasticsearch/test/rest/section/DoSection.java @@ -18,7 +18,7 @@ */ package org.elasticsearch.test.rest.section; -import org.elasticsearch.client.ElasticsearchResponseException; +import org.elasticsearch.client.ResponseException; import org.elasticsearch.common.Strings; import org.elasticsearch.common.collect.Tuple; import org.elasticsearch.common.logging.ESLogger; @@ -102,7 +102,7 @@ public void execute(RestTestExecutionContext executionContext) throws IOExceptio } fail(formatStatusCodeMessage(restTestResponse, catchStatusCode)); } - } catch(ElasticsearchResponseException e) { + } catch(ResponseException e) { RestTestResponse restTestResponse = new RestTestResponse(e); if (!Strings.hasLength(catchParam)) { fail(formatStatusCodeMessage(restTestResponse, "2xx")); @@ -111,7 +111,7 @@ public void execute(RestTestExecutionContext executionContext) throws IOExceptio } else if (catchParam.length() > 2 && catchParam.startsWith("/") && catchParam.endsWith("/")) { //the text of the error message matches regular expression assertThat(formatStatusCodeMessage(restTestResponse, "4xx|5xx"), - e.getElasticsearchResponse().getStatusLine().getStatusCode(), greaterThanOrEqualTo(400)); + e.getResponse().getStatusLine().getStatusCode(), greaterThanOrEqualTo(400)); Object error = executionContext.response("error"); assertThat("error was expected in the response", error, notNullValue()); //remove delimiters from regex From 85606e8a4b60f86e9fe0e95bb8e7c426c5013358 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 16:27:52 +0200 Subject: [PATCH 072/103] renamed all time intervals arguments and members to include the time unit in their name --- .../client/sniff/HostsSniffer.java | 21 ++++----- .../elasticsearch/client/sniff/Sniffer.java | 46 +++++++++---------- .../sniff/HostsSnifferBuilderTests.java | 4 +- .../client/sniff/SnifferBuilderTests.java | 8 ++-- .../org/elasticsearch/client/RestClient.java | 38 +++++++-------- .../client/RestClientBuilderTests.java | 4 +- 6 files changed, 60 insertions(+), 61 deletions(-) diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java index 3da6e2b95cd99..7cdb51066cb58 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/HostsSniffer.java @@ -52,9 +52,9 @@ public class HostsSniffer { private final Scheme scheme; private final JsonFactory jsonFactory = new JsonFactory(); - protected HostsSniffer(RestClient restClient, long sniffRequestTimeout, Scheme scheme) { + protected HostsSniffer(RestClient restClient, long sniffRequestTimeoutMillis, Scheme scheme) { this.restClient = restClient; - this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeout + "ms"); + this.sniffRequestParams = Collections.singletonMap("timeout", sniffRequestTimeoutMillis + "ms"); this.scheme = scheme; } @@ -155,7 +155,7 @@ public static class Builder { public static final long DEFAULT_SNIFF_REQUEST_TIMEOUT = TimeUnit.SECONDS.toMillis(1); private final RestClient restClient; - private long sniffRequestTimeout = DEFAULT_SNIFF_REQUEST_TIMEOUT; + private long sniffRequestTimeoutMillis = DEFAULT_SNIFF_REQUEST_TIMEOUT; private Scheme scheme; private Builder(RestClient restClient) { @@ -164,15 +164,14 @@ private Builder(RestClient restClient) { } /** - * Sets the sniff request timeout to be passed in as a query string parameter to elasticsearch. - * Allows to halt the request without any failure, as only the nodes that have responded - * within this timeout will be returned. + * Sets the sniff request timeout (in milliseconds) to be passed in as a query string parameter to elasticsearch. + * Allows to halt the request without any failure, as only the nodes that have responded within this timeout will be returned. */ - public Builder setSniffRequestTimeout(int sniffRequestTimeout) { - if (sniffRequestTimeout <= 0) { - throw new IllegalArgumentException("sniffRequestTimeout must be greater than 0"); + public Builder setSniffRequestTimeoutMillis(int sniffRequestTimeoutMillis) { + if (sniffRequestTimeoutMillis <= 0) { + throw new IllegalArgumentException("sniffRequestTimeoutMillis must be greater than 0"); } - this.sniffRequestTimeout = sniffRequestTimeout; + this.sniffRequestTimeoutMillis = sniffRequestTimeoutMillis; return this; } @@ -189,7 +188,7 @@ public Builder setScheme(Scheme scheme) { * Creates a new {@link HostsSniffer} instance given the provided configuration */ public HostsSniffer build() { - return new HostsSniffer(restClient, sniffRequestTimeout, scheme); + return new HostsSniffer(restClient, sniffRequestTimeoutMillis, scheme); } } } diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index cd5d1d93148c9..37240741b3d85 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -72,17 +72,17 @@ private static class Task implements Runnable { private final HostsSniffer hostsSniffer; private final RestClient restClient; - private final long sniffInterval; - private final long sniffAfterFailureDelay; + private final long sniffIntervalMillis; + private final long sniffAfterFailureDelayMillis; private final ScheduledExecutorService scheduledExecutorService; private final AtomicBoolean running = new AtomicBoolean(false); private ScheduledFuture scheduledFuture; - private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffInterval, long sniffAfterFailureDelay) { + private Task(HostsSniffer hostsSniffer, RestClient restClient, long sniffIntervalMillis, long sniffAfterFailureDelayMillis) { this.hostsSniffer = hostsSniffer; this.restClient = restClient; - this.sniffInterval = sniffInterval; - this.sniffAfterFailureDelay = sniffAfterFailureDelay; + this.sniffIntervalMillis = sniffIntervalMillis; + this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis; this.scheduledExecutorService = Executors.newScheduledThreadPool(1); scheduleNextRun(0); } @@ -104,11 +104,11 @@ synchronized void scheduleNextRun(long delayMillis) { @Override public void run() { - sniff(null, sniffInterval); + sniff(null, sniffIntervalMillis); } void sniffOnFailure(HttpHost failedHost) { - sniff(failedHost, sniffAfterFailureDelay); + sniff(failedHost, sniffAfterFailureDelayMillis); } void sniff(HttpHost excludeHost, long nextSniffDelayMillis) { @@ -158,9 +158,9 @@ public static final class Builder { private final RestClient restClient; private final HostsSniffer hostsSniffer; - private long sniffInterval = DEFAULT_SNIFF_INTERVAL; + private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL; private boolean sniffOnFailure = true; - private long sniffAfterFailureDelay = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; + private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; /** * Creates a new builder instance by providing the {@link RestClient} that will be used to communicate with elasticsearch, @@ -174,21 +174,21 @@ private Builder(RestClient restClient, HostsSniffer hostsSniffer) { } /** - * Sets the interval between consecutive ordinary sniff executions. Will be honoured when sniffOnFailure is disabled or - * when there are no failures between consecutive sniff executions. - * @throws IllegalArgumentException if sniffInterval is not greater than 0 + * Sets the interval between consecutive ordinary sniff executions in milliseconds. Will be honoured when + * sniffOnFailure is disabled or when there are no failures between consecutive sniff executions. + * @throws IllegalArgumentException if sniffIntervalMillis is not greater than 0 */ - public Builder setSniffInterval(int sniffInterval) { - if (sniffInterval <= 0) { - throw new IllegalArgumentException("sniffInterval must be greater than 0"); + public Builder setSniffIntervalMillis(int sniffIntervalMillis) { + if (sniffIntervalMillis <= 0) { + throw new IllegalArgumentException("sniffIntervalMillis must be greater than 0"); } - this.sniffInterval = sniffInterval; + this.sniffIntervalMillis = sniffIntervalMillis; return this; } /** * Enables/disables sniffing on failure. If enabled, at each failure nodes will be reloaded, and a new sniff execution will - * be scheduled after a shorter time than usual (sniffAfterFailureDelay). + * be scheduled after a shorter time than usual (sniffAfterFailureDelayMillis). */ public Builder setSniffOnFailure(boolean sniffOnFailure) { this.sniffOnFailure = sniffOnFailure; @@ -196,13 +196,13 @@ public Builder setSniffOnFailure(boolean sniffOnFailure) { } /** - * Sets the delay of a sniff execution scheduled after a failure. + * Sets the delay of a sniff execution scheduled after a failure (in milliseconds) */ - public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { - if (sniffAfterFailureDelay <= 0) { - throw new IllegalArgumentException("sniffAfterFailureDelay must be greater than 0"); + public Builder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelayMillis) { + if (sniffAfterFailureDelayMillis <= 0) { + throw new IllegalArgumentException("sniffAfterFailureDelayMillis must be greater than 0"); } - this.sniffAfterFailureDelay = sniffAfterFailureDelay; + this.sniffAfterFailureDelayMillis = sniffAfterFailureDelayMillis; return this; } @@ -210,7 +210,7 @@ public Builder setSniffAfterFailureDelay(int sniffAfterFailureDelay) { * Creates the {@link Sniffer} based on the provided configuration. */ public Sniffer build() { - return new Sniffer(restClient, hostsSniffer, sniffInterval, sniffOnFailure, sniffAfterFailureDelay); + return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffOnFailure, sniffAfterFailureDelayMillis); } } } diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java index 8f355ecc47ae5..38baccf931ead 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java @@ -50,7 +50,7 @@ public void testBuild() throws Exception { } try { - HostsSniffer.builder(client).setSniffRequestTimeout(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + HostsSniffer.builder(client).setSniffRequestTimeoutMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); @@ -61,7 +61,7 @@ public void testBuild() throws Exception { builder.setScheme(RandomPicks.randomFrom(random(), HostsSniffer.Scheme.values())); } if (random().nextBoolean()) { - builder.setSniffRequestTimeout(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + builder.setSniffRequestTimeoutMillis(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); } assertNotNull(builder.build()); } diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index 863830370b6a5..3fb152906b4d3 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -55,14 +55,14 @@ public void testBuild() throws Exception { } try { - Sniffer.builder(client, hostsSniffer).setSniffInterval(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + Sniffer.builder(client, hostsSniffer).setSniffIntervalMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("sniffInterval must be greater than 0", e.getMessage()); } try { - Sniffer.builder(client, hostsSniffer).setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + Sniffer.builder(client, hostsSniffer).setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("sniffAfterFailureDelay must be greater than 0", e.getMessage()); @@ -74,10 +74,10 @@ public void testBuild() throws Exception { Sniffer.Builder builder = Sniffer.builder(client, hostsSniffer); if (random().nextBoolean()) { - builder.setSniffInterval(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + builder.setSniffIntervalMillis(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); } if (random().nextBoolean()) { - builder.setSniffAfterFailureDelay(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); + builder.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); } if (random().nextBoolean()) { builder.setSniffOnFailure(random().nextBoolean()); diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index f0e4d702f5748..81354ae7a8513 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -86,15 +86,15 @@ public final class RestClient implements Closeable { //we don't rely on default headers supported by HttpClient as those cannot be replaced, plus it would get hairy //when we create the HttpClient instance on our own as there would be two different ways to set the default headers. private final Header[] defaultHeaders; - private final long maxRetryTimeout; + private final long maxRetryTimeoutMillis; private final AtomicInteger lastHostIndex = new AtomicInteger(0); private volatile Set hosts; private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); private volatile FailureListener failureListener = new FailureListener(); - private RestClient(CloseableHttpClient client, long maxRetryTimeout, Header[] defaultHeaders, HttpHost[] hosts) { + private RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, HttpHost[] hosts) { this.client = client; - this.maxRetryTimeout = maxRetryTimeout; + this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; this.defaultHeaders = defaultHeaders; setHosts(hosts); } @@ -139,7 +139,7 @@ public Response performRequest(String method, String endpoint, Map hostIterator = nextHost(); @@ -358,16 +358,16 @@ public static Builder builder(HttpHost... hosts) { * Rest client builder. Helps creating a new {@link RestClient}. */ public static final class Builder { - public static final int DEFAULT_CONNECT_TIMEOUT = 1000; - public static final int DEFAULT_SOCKET_TIMEOUT = 10000; - public static final int DEFAULT_MAX_RETRY_TIMEOUT = DEFAULT_SOCKET_TIMEOUT; - public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT = 500; + public static final int DEFAULT_CONNECT_TIMEOUT_MILLIS = 1000; + public static final int DEFAULT_SOCKET_TIMEOUT_MILLIS = 10000; + public static final int DEFAULT_MAX_RETRY_TIMEOUT_MILLIS = DEFAULT_SOCKET_TIMEOUT_MILLIS; + public static final int DEFAULT_CONNECTION_REQUEST_TIMEOUT_MILLIS = 500; private static final Header[] EMPTY_HEADERS = new Header[0]; private final HttpHost[] hosts; private CloseableHttpClient httpClient; - private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT; + private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS; private Header[] defaultHeaders = EMPTY_HEADERS; /** @@ -392,16 +392,16 @@ public Builder setHttpClient(CloseableHttpClient httpClient) { } /** - * Sets the maximum timeout to honour in case of multiple retries of the same request. - * {@link #DEFAULT_MAX_RETRY_TIMEOUT} if not specified. + * Sets the maximum timeout (in milliseconds) to honour in case of multiple retries of the same request. + * {@link #DEFAULT_MAX_RETRY_TIMEOUT_MILLIS} if not specified. * - * @throws IllegalArgumentException if maxRetryTimeout is not greater than 0 + * @throws IllegalArgumentException if maxRetryTimeoutMillis is not greater than 0 */ - public Builder setMaxRetryTimeout(int maxRetryTimeout) { - if (maxRetryTimeout <= 0) { - throw new IllegalArgumentException("maxRetryTimeout must be greater than 0"); + public Builder setMaxRetryTimeoutMillis(int maxRetryTimeoutMillis) { + if (maxRetryTimeoutMillis <= 0) { + throw new IllegalArgumentException("maxRetryTimeoutMillis must be greater than 0"); } - this.maxRetryTimeout = maxRetryTimeout; + this.maxRetryTimeout = maxRetryTimeoutMillis; return this; } @@ -446,9 +446,9 @@ public static CloseableHttpClient createDefaultHttpClient(Registry Date: Thu, 9 Jun 2016 16:31:09 +0200 Subject: [PATCH 073/103] rename deadUntil to deadUntilNanos --- .../java/org/elasticsearch/client/DeadHostState.java | 12 ++++++------ .../java/org/elasticsearch/client/RestClient.java | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/src/main/java/org/elasticsearch/client/DeadHostState.java index 52ef6e2bc6bf2..89a1720d46e18 100644 --- a/client/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -34,17 +34,17 @@ class DeadHostState { static final DeadHostState INITIAL_DEAD_STATE = new DeadHostState(); private final int failedAttempts; - private final long deadUntil; + private final long deadUntilNanos; private DeadHostState() { this.failedAttempts = 1; - this.deadUntil = System.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS; + this.deadUntilNanos = System.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS; } DeadHostState(DeadHostState previousDeadHostState) { long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), MAX_CONNECTION_TIMEOUT_NANOS); - this.deadUntil = System.nanoTime() + timeoutNanos; + this.deadUntilNanos = System.nanoTime() + timeoutNanos; this.failedAttempts = previousDeadHostState.failedAttempts + 1; } @@ -52,15 +52,15 @@ private DeadHostState() { * Returns the timestamp (nanos) till the host is supposed to stay dead without being retried. * After that the host should be retried. */ - long getDeadUntil() { - return deadUntil; + long getDeadUntilNanos() { + return deadUntilNanos; } @Override public String toString() { return "DeadHostState{" + "failedAttempts=" + failedAttempts + - ", deadUntil=" + deadUntil + + ", deadUntilNanos=" + deadUntilNanos + '}'; } } diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 81354ae7a8513..c14ca8e965c69 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -224,7 +224,7 @@ private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) { private Iterator nextHost() { Set filteredHosts = new HashSet<>(hosts); for (Map.Entry entry : blacklist.entrySet()) { - if (System.nanoTime() - entry.getValue().getDeadUntil() < 0) { + if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) { filteredHosts.remove(entry.getKey()); } } @@ -235,7 +235,7 @@ private Iterator nextHost() { Collections.sort(sortedHosts, new Comparator>() { @Override public int compare(Map.Entry o1, Map.Entry o2) { - return Long.compare(o1.getValue().getDeadUntil(), o2.getValue().getDeadUntil()); + return Long.compare(o1.getValue().getDeadUntilNanos(), o2.getValue().getDeadUntilNanos()); } }); HttpHost deadHost = sortedHosts.get(0).getKey(); From 742f9c6eaa54f9959787e68b6a23c968d875cba2 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 16:34:01 +0200 Subject: [PATCH 074/103] nextHost to return Iterable rather than Iterator --- .../java/org/elasticsearch/client/RestClient.java | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index c14ca8e965c69..88adf83893cd0 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -53,7 +53,6 @@ import java.util.Collections; import java.util.Comparator; import java.util.HashSet; -import java.util.Iterator; import java.util.List; import java.util.Locale; import java.util.Map; @@ -142,10 +141,7 @@ public Response performRequest(String method, String endpoint, Map hostIterator = nextHost(); - while (hostIterator.hasNext()) { - HttpHost host = hostIterator.next(); - + for (HttpHost host : nextHost()) { if (lastSeenException != null) { long timeElapsed = TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - startTime); long timeout = retryTimeout - timeElapsed; @@ -221,7 +217,7 @@ private void setHeaders(HttpRequest httpRequest, Header[] requestHeaders) { * The iterator returned will never be empty, rather an {@link IllegalStateException} in case there are no hosts. * In case there are no healthy hosts available, or dead ones to be be retried, one dead host gets returned. */ - private Iterator nextHost() { + private Iterable nextHost() { Set filteredHosts = new HashSet<>(hosts); for (Map.Entry entry : blacklist.entrySet()) { if (System.nanoTime() - entry.getValue().getDeadUntilNanos() < 0) { @@ -240,13 +236,13 @@ public int compare(Map.Entry o1, Map.Entry rotatedHosts = new ArrayList<>(filteredHosts); //TODO is it possible to make this O(1)? (rotate is O(n)) Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement()); - return rotatedHosts.iterator(); + return rotatedHosts; } /** From 853ea9385bde2f20dee2723adc1e9805f59c06b1 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 16:38:43 +0200 Subject: [PATCH 075/103] add comments on retries --- .../main/java/org/elasticsearch/client/RestClient.java | 10 ++++++---- 1 file changed, 6 insertions(+), 4 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 88adf83893cd0..3eb36b3787808 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -138,19 +138,21 @@ public Response performRequest(String method, String endpoint, Map Date: Thu, 9 Jun 2016 16:39:59 +0200 Subject: [PATCH 076/103] s/elasticsearchResponse/response --- .../org/elasticsearch/client/RestClient.java | 20 +++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 3eb36b3787808..3539c84293aa0 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -156,35 +156,35 @@ public Response performRequest(String method, String endpoint, Map Date: Thu, 9 Jun 2016 16:51:08 +0200 Subject: [PATCH 077/103] use switch for status codes and add comments --- .../org/elasticsearch/client/RestClient.java | 42 ++++++++++--------- 1 file changed, 22 insertions(+), 20 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 3539c84293aa0..350b750415d99 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -171,31 +171,33 @@ public Response performRequest(String method, String endpoint, Map Date: Thu, 9 Jun 2016 16:57:57 +0200 Subject: [PATCH 078/103] fix line length --- .../java/org/elasticsearch/client/RestClientBuilderTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index e4e024457a42f..0269e6ada6042 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -53,7 +53,8 @@ public void testBuild() throws IOException { } try { - RestClient.builder(new HttpHost("localhost", 9200)).setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + RestClient.builder(new HttpHost("localhost", 9200)) + .setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("maxRetryTimeout must be greater than 0", e.getMessage()); From d8c0fad08f93580c94966a5151e17d03fff99b2a Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 17:06:58 +0200 Subject: [PATCH 079/103] fix failing tests --- .../elasticsearch/client/sniff/HostsSnifferBuilderTests.java | 2 +- .../org/elasticsearch/client/sniff/SnifferBuilderTests.java | 4 ++-- .../java/org/elasticsearch/client/RestClientBuilderTests.java | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java index 38baccf931ead..ff9c150d954e0 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferBuilderTests.java @@ -53,7 +53,7 @@ public void testBuild() throws Exception { HostsSniffer.builder(client).setSniffRequestTimeoutMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals(e.getMessage(), "sniffRequestTimeout must be greater than 0"); + assertEquals(e.getMessage(), "sniffRequestTimeoutMillis must be greater than 0"); } HostsSniffer.Builder builder = HostsSniffer.builder(client); diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index 3fb152906b4d3..23d74e9f4f5a8 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -58,14 +58,14 @@ public void testBuild() throws Exception { Sniffer.builder(client, hostsSniffer).setSniffIntervalMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals("sniffInterval must be greater than 0", e.getMessage()); + assertEquals("sniffIntervalMillis must be greater than 0", e.getMessage()); } try { Sniffer.builder(client, hostsSniffer).setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals("sniffAfterFailureDelay must be greater than 0", e.getMessage()); + assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage()); } try (Sniffer sniffer = Sniffer.builder(client, hostsSniffer).build()) { diff --git a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 0269e6ada6042..88b1406d92595 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -57,7 +57,7 @@ public void testBuild() throws IOException { .setMaxRetryTimeoutMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { - assertEquals("maxRetryTimeout must be greater than 0", e.getMessage()); + assertEquals("maxRetryTimeoutMillis must be greater than 0", e.getMessage()); } try { From 3474a145b0a259d0194a70203e7db7785998b3bb Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 17:09:15 +0200 Subject: [PATCH 080/103] fix line length --- .../org/elasticsearch/client/sniff/SnifferBuilderTests.java | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index 23d74e9f4f5a8..f09ef77f5cfd2 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -62,7 +62,8 @@ public void testBuild() throws Exception { } try { - Sniffer.builder(client, hostsSniffer).setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); + Sniffer.builder(client, hostsSniffer) + .setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(random(), Integer.MIN_VALUE, 0)); fail("should have failed"); } catch(IllegalArgumentException e) { assertEquals("sniffAfterFailureDelayMillis must be greater than 0", e.getMessage()); From 33bdab1a5acbb92fc0b08bad52259675bbee7a9c Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 17:32:51 +0200 Subject: [PATCH 081/103] Build: temporarily disable dependency licenses check (till our own get filtered out) --- client-sniffer/build.gradle | 3 + .../licenses/client-5.0.0-SNAPSHOT.jar.sha1 | 1 - client-sniffer/licenses/client-LICENSE.txt | 202 ------------------ client-sniffer/licenses/client-NOTICE.txt | 5 - 4 files changed, 3 insertions(+), 208 deletions(-) delete mode 100644 client-sniffer/licenses/client-5.0.0-SNAPSHOT.jar.sha1 delete mode 100644 client-sniffer/licenses/client-LICENSE.txt delete mode 100644 client-sniffer/licenses/client-NOTICE.txt diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index d561e00718479..7c6527abb4c67 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -65,6 +65,9 @@ jarHell.enabled=false //NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core namingConventions.enabled=false +//TODO re-enable once our own jars are properly filtered out +dependencyLicenses.enabled=false + thirdPartyAudit.excludes = [ //commons-logging optional dependencies 'org.apache.avalon.framework.logger.Logger', diff --git a/client-sniffer/licenses/client-5.0.0-SNAPSHOT.jar.sha1 b/client-sniffer/licenses/client-5.0.0-SNAPSHOT.jar.sha1 deleted file mode 100644 index de30dc58fe6d0..0000000000000 --- a/client-sniffer/licenses/client-5.0.0-SNAPSHOT.jar.sha1 +++ /dev/null @@ -1 +0,0 @@ -4ba4746aa38f81ec7e8341da8c86784bf5384046 \ No newline at end of file diff --git a/client-sniffer/licenses/client-LICENSE.txt b/client-sniffer/licenses/client-LICENSE.txt deleted file mode 100644 index d645695673349..0000000000000 --- a/client-sniffer/licenses/client-LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/client-sniffer/licenses/client-NOTICE.txt b/client-sniffer/licenses/client-NOTICE.txt deleted file mode 100644 index c99b958193198..0000000000000 --- a/client-sniffer/licenses/client-NOTICE.txt +++ /dev/null @@ -1,5 +0,0 @@ -Elasticsearch -Copyright 2009-2016 Elasticsearch - -This product includes software developed by The Apache Software -Foundation (http://www.apache.org/). From bd773359d5b603bcb5db8a41bbbda6da5b6c8dd1 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 17:39:52 +0200 Subject: [PATCH 082/103] [TEST] add SuppresForbidden annotation for client project --- client/build.gradle | 7 ++-- .../client/RestClientIntegTests.java | 1 + .../client/SuppressForbidden.java | 34 +++++++++++++++++++ 3 files changed, 39 insertions(+), 3 deletions(-) create mode 100644 client/src/test/java/org/elasticsearch/client/SuppressForbidden.java diff --git a/client/build.gradle b/client/build.gradle index 2c82fd74569e5..c947f94ff2691 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -53,10 +53,11 @@ forbiddenApisMain { //excludes don't seem to work though and we don't want to have our own @SuppressForbidden forbiddenApisTest.enabled=false -//forbiddenApisTest { +forbiddenApisTest { //client does not depend on core, so only jdk signatures should be checked - //signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] -//} + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + suppressAnnotations = ['**.SuppressForbidden'] +} //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false diff --git a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java index 4dadfc1a918dd..1505ccd55d283 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -56,6 +56,7 @@ * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. * Works against a real http server, one single host. */ +@SuppressForbidden(reason = "uses sun HttpServer") public class RestClientIntegTests extends LuceneTestCase { private static HttpServer httpServer; diff --git a/client/src/test/java/org/elasticsearch/client/SuppressForbidden.java b/client/src/test/java/org/elasticsearch/client/SuppressForbidden.java new file mode 100644 index 0000000000000..309cc666bc4e4 --- /dev/null +++ b/client/src/test/java/org/elasticsearch/client/SuppressForbidden.java @@ -0,0 +1,34 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client; + +import java.lang.annotation.ElementType; +import java.lang.annotation.Retention; +import java.lang.annotation.RetentionPolicy; +import java.lang.annotation.Target; + +/** + * Annotation to suppress forbidden-apis errors inside a whole class, a method, or a field. + */ +@Retention(RetentionPolicy.CLASS) +@Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) +public @interface SuppressForbidden { + String reason(); +} \ No newline at end of file From f38ce72004b9041b9f3b54af5b011b43b48275e3 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 23:44:34 +0200 Subject: [PATCH 083/103] make forbiddenApisTest work for client and client-sniffer --- client-sniffer/build.gradle | 10 +--- .../client/sniff/HostsSnifferTests.java | 45 +++++++++------ client/build.gradle | 5 -- .../client/RestClientIntegTests.java | 56 +++++++++++-------- .../client/SuppressForbidden.java | 34 ----------- 5 files changed, 65 insertions(+), 85 deletions(-) delete mode 100644 client/src/test/java/org/elasticsearch/client/SuppressForbidden.java diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index 7c6527abb4c67..3f1b8c37765e1 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -51,14 +51,10 @@ forbiddenApisMain { signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } -//TODO would be nice to just exclude the classes where we use com.sun.net.httpserver.* classes -//excludes don't seem to work though and we don't want to have our own @SuppressForbidden -forbiddenApisTest.enabled=false - -//forbiddenApisTest { +forbiddenApisTest { //client does not depend on core, so only jdk signatures should be checked - //signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] -//} + signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] +} //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index d714e89eeb304..4ff11a24ea556 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -31,6 +31,7 @@ import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.SuppressForbidden; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -54,6 +55,7 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +@SuppressForbidden(reason = "uses sun HttpServer") public class HostsSnifferTests extends LuceneTestCase { private int sniffRequestTimeout; @@ -109,26 +111,37 @@ public void testSniffNodes() throws IOException, URISyntaxException { } } - private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeout) throws IOException { + private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeoutMillis) throws IOException { HttpServer httpServer = HttpServer.create(new InetSocketAddress(0), 0); - httpServer.createContext("/_nodes/http", new HttpHandler() { - @Override - public void handle(HttpExchange httpExchange) throws IOException { - if (httpExchange.getRequestMethod().equals(HttpGet.METHOD_NAME)) { - if (httpExchange.getRequestURI().getRawQuery().equals("timeout=" + sniffTimeout + "ms")) { - String nodesInfoBody = sniffResponse.nodesInfoBody; - httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length()); - try (OutputStream out = httpExchange.getResponseBody()) { - out.write(nodesInfoBody.getBytes(Consts.UTF_8)); - return; - } + httpServer.createContext("/_nodes/http", new ResponseHandler(sniffTimeoutMillis, sniffResponse)); + return httpServer; + } + + @SuppressForbidden(reason = "uses sun HttpServer") + private static class ResponseHandler implements HttpHandler { + private final int sniffTimeoutMillis; + private final SniffResponse sniffResponse; + + ResponseHandler(int sniffTimeoutMillis, SniffResponse sniffResponse) { + this.sniffTimeoutMillis = sniffTimeoutMillis; + this.sniffResponse = sniffResponse; + } + + @Override + public void handle(HttpExchange httpExchange) throws IOException { + if (httpExchange.getRequestMethod().equals(HttpGet.METHOD_NAME)) { + if (httpExchange.getRequestURI().getRawQuery().equals("timeout=" + sniffTimeoutMillis + "ms")) { + String nodesInfoBody = sniffResponse.nodesInfoBody; + httpExchange.sendResponseHeaders(sniffResponse.nodesInfoResponseCode, nodesInfoBody.length()); + try (OutputStream out = httpExchange.getResponseBody()) { + out.write(nodesInfoBody.getBytes(Consts.UTF_8)); + return; } } - httpExchange.sendResponseHeaders(404, 0); - httpExchange.close(); } - }); - return httpServer; + httpExchange.sendResponseHeaders(404, 0); + httpExchange.close(); + } } private static SniffResponse buildSniffResponse(HostsSniffer.Scheme scheme) throws IOException { diff --git a/client/build.gradle b/client/build.gradle index c947f94ff2691..6cdc0d818ac76 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -49,14 +49,9 @@ forbiddenApisMain { signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } -//TODO would be nice to just exclude the classes where we use com.sun.net.httpserver.* classes -//excludes don't seem to work though and we don't want to have our own @SuppressForbidden -forbiddenApisTest.enabled=false - forbiddenApisTest { //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] - suppressAnnotations = ['**.SuppressForbidden'] } //JarHell is part of es core, which we don't want to pull in diff --git a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java index 1505ccd55d283..36a7e45b72854 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -32,6 +32,7 @@ import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; import org.apache.lucene.util.LuceneTestCase; +import org.apache.lucene.util.SuppressForbidden; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -83,32 +84,41 @@ public static void startHttpServer() throws Exception { } private static void createStatusCodeContext(HttpServer httpServer, final int statusCode) { - httpServer.createContext("/" + statusCode, new HttpHandler() { - @Override - public void handle(HttpExchange httpExchange) throws IOException { - StringBuilder body = new StringBuilder(); - try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) { - char[] buffer = new char[256]; - int read; - while ((read = reader.read(buffer)) != -1) { - body.append(buffer, 0, read); - } - } - Headers requestHeaders = httpExchange.getRequestHeaders(); - Headers responseHeaders = httpExchange.getResponseHeaders(); - for (Map.Entry> header : requestHeaders.entrySet()) { - responseHeaders.put(header.getKey(), header.getValue()); + httpServer.createContext("/" + statusCode, new ResponseHandler(statusCode)); + } + + @SuppressForbidden(reason = "uses sun HttpServer") + private static class ResponseHandler implements HttpHandler { + private final int statusCode; + + ResponseHandler(int statusCode) { + this.statusCode = statusCode; + } + + @Override + public void handle(HttpExchange httpExchange) throws IOException { + StringBuilder body = new StringBuilder(); + try (InputStreamReader reader = new InputStreamReader(httpExchange.getRequestBody(), Consts.UTF_8)) { + char[] buffer = new char[256]; + int read; + while ((read = reader.read(buffer)) != -1) { + body.append(buffer, 0, read); } - httpExchange.getRequestBody().close(); - httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length()); - if (body.length() > 0) { - try (OutputStream out = httpExchange.getResponseBody()) { - out.write(body.toString().getBytes(Consts.UTF_8)); - } + } + Headers requestHeaders = httpExchange.getRequestHeaders(); + Headers responseHeaders = httpExchange.getResponseHeaders(); + for (Map.Entry> header : requestHeaders.entrySet()) { + responseHeaders.put(header.getKey(), header.getValue()); + } + httpExchange.getRequestBody().close(); + httpExchange.sendResponseHeaders(statusCode, body.length() == 0 ? -1 : body.length()); + if (body.length() > 0) { + try (OutputStream out = httpExchange.getResponseBody()) { + out.write(body.toString().getBytes(Consts.UTF_8)); } - httpExchange.close(); } - }); + httpExchange.close(); + } } @AfterClass diff --git a/client/src/test/java/org/elasticsearch/client/SuppressForbidden.java b/client/src/test/java/org/elasticsearch/client/SuppressForbidden.java deleted file mode 100644 index 309cc666bc4e4..0000000000000 --- a/client/src/test/java/org/elasticsearch/client/SuppressForbidden.java +++ /dev/null @@ -1,34 +0,0 @@ -/* - * Licensed to Elasticsearch under one or more contributor - * license agreements. See the NOTICE file distributed with - * this work for additional information regarding copyright - * ownership. Elasticsearch licenses this file to you under - * the Apache License, Version 2.0 (the "License"); you may - * not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, - * software distributed under the License is distributed on an - * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY - * KIND, either express or implied. See the License for the - * specific language governing permissions and limitations - * under the License. - */ - -package org.elasticsearch.client; - -import java.lang.annotation.ElementType; -import java.lang.annotation.Retention; -import java.lang.annotation.RetentionPolicy; -import java.lang.annotation.Target; - -/** - * Annotation to suppress forbidden-apis errors inside a whole class, a method, or a field. - */ -@Retention(RetentionPolicy.CLASS) -@Target({ ElementType.CONSTRUCTOR, ElementType.FIELD, ElementType.METHOD, ElementType.TYPE }) -public @interface SuppressForbidden { - String reason(); -} \ No newline at end of file From 2856ab07dee401e79719f0985fd0a1dfc6d32fb0 Mon Sep 17 00:00:00 2001 From: javanna Date: Thu, 9 Jun 2016 23:48:49 +0200 Subject: [PATCH 084/103] Build: enable dependency licenses check for client-sniffer, exclude org.elasticsearch jars from checks --- client-sniffer/build.gradle | 7 +++++-- 1 file changed, 5 insertions(+), 2 deletions(-) diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index 3f1b8c37765e1..0677007a635f3 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -61,8 +61,11 @@ jarHell.enabled=false //NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core namingConventions.enabled=false -//TODO re-enable once our own jars are properly filtered out -dependencyLicenses.enabled=false +dependencyLicenses { + dependencies = project.configurations.runtime.fileCollection { + it.group.startsWith('org.elasticsearch') == false + } +} thirdPartyAudit.excludes = [ //commons-logging optional dependencies From 0af9d2c7677eebbbe9d23ca75a65aa585759dbc8 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 10 Jun 2016 09:51:47 +0200 Subject: [PATCH 085/103] Build: add animalsniffer to detect usage of java8 apis in client and client-sniffer --- buildSrc/build.gradle | 1 + client-sniffer/build.gradle | 8 +++++++- client/build.gradle | 8 +++++++- 3 files changed, 15 insertions(+), 2 deletions(-) diff --git a/buildSrc/build.gradle b/buildSrc/build.gradle index 623fdab3e3e57..ce29769715cc4 100644 --- a/buildSrc/build.gradle +++ b/buildSrc/build.gradle @@ -87,6 +87,7 @@ dependencies { compile 'de.thetaphi:forbiddenapis:2.1' compile 'com.bmuschko:gradle-nexus-plugin:2.3.1' compile 'org.apache.rat:apache-rat:0.11' + compile 'ru.vyarus:gradle-animalsniffer-plugin:1.0.1' } diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index 0677007a635f3..c772cfd084bee 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.JavaVersion apply plugin: 'elasticsearch.build' +apply plugin: 'ru.vyarus.animalsniffer' targetCompatibility = JavaVersion.VERSION_1_7 sourceCompatibility = JavaVersion.VERSION_1_7 @@ -40,9 +41,9 @@ dependencies { testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" testCompile "org.elasticsearch:securemock:${versions.securemock}" + signature "org.codehaus.mojo.signature:java17:1.0@signature" } -//TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible compileJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' << '-Xlint:all,-path,-serial,-options' compileTestJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' @@ -56,6 +57,11 @@ forbiddenApisTest { signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } +animalsnifferTest { + //animalsniffer doesn't like sun classes usages (e.g. com.sun.net.httpserver.HttpServer) + exclude('org/elasticsearch/client/sniff/HostsSnifferTests.class') +} + //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false //NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core diff --git a/client/build.gradle b/client/build.gradle index 6cdc0d818ac76..609e10e4abd92 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -21,6 +21,7 @@ import org.elasticsearch.gradle.precommit.PrecommitTasks import org.gradle.api.JavaVersion apply plugin: 'elasticsearch.build' +apply plugin: 'ru.vyarus.animalsniffer' targetCompatibility = JavaVersion.VERSION_1_7 sourceCompatibility = JavaVersion.VERSION_1_7 @@ -38,9 +39,9 @@ dependencies { testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" testCompile "org.elasticsearch:securemock:${versions.securemock}" + signature "org.codehaus.mojo.signature:java17:1.0@signature" } -//TODO compiling from 1.8 with target 1.7 and source 1.7 is best effort, not enough to ensure we are java 7 compatible compileJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' << '-Xlint:all,-path,-serial,-options' compileTestJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' @@ -54,6 +55,11 @@ forbiddenApisTest { signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } +animalsnifferTest { + //animalsniffer doesn't like sun classes usages (e.g. com.sun.net.httpserver.HttpServer) + exclude('org/elasticsearch/client/RestClientIntegTests.class') +} + //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false //NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core From a5e329e5634e2b6d438c8d9b21e21153c0aabd49 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 10 Jun 2016 10:11:21 +0200 Subject: [PATCH 086/103] [TEST] use animalsniffer annotation @IgnoreJRERequirement (similar to @SuppressForbidden) rather than exclusion pattern --- client-sniffer/build.gradle | 6 +----- .../org/elasticsearch/client/sniff/HostsSnifferTests.java | 3 +++ client/build.gradle | 6 +----- .../java/org/elasticsearch/client/RestClientIntegTests.java | 3 +++ 4 files changed, 8 insertions(+), 10 deletions(-) diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index c772cfd084bee..af04d85ac6132 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -41,6 +41,7 @@ dependencies { testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" testCompile "org.elasticsearch:securemock:${versions.securemock}" + testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" signature "org.codehaus.mojo.signature:java17:1.0@signature" } @@ -57,11 +58,6 @@ forbiddenApisTest { signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } -animalsnifferTest { - //animalsniffer doesn't like sun classes usages (e.g. com.sun.net.httpserver.HttpServer) - exclude('org/elasticsearch/client/sniff/HostsSnifferTests.class') -} - //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false //NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index 4ff11a24ea556..af1b878cfd4b0 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -32,6 +32,7 @@ import org.apache.http.client.methods.HttpGet; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.SuppressForbidden; +import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; import org.elasticsearch.client.RestClient; @@ -55,6 +56,7 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +@IgnoreJRERequirement @SuppressForbidden(reason = "uses sun HttpServer") public class HostsSnifferTests extends LuceneTestCase { @@ -117,6 +119,7 @@ private static HttpServer createHttpServer(final SniffResponse sniffResponse, fi return httpServer; } + @IgnoreJRERequirement @SuppressForbidden(reason = "uses sun HttpServer") private static class ResponseHandler implements HttpHandler { private final int sniffTimeoutMillis; diff --git a/client/build.gradle b/client/build.gradle index 609e10e4abd92..f905114fd0fb7 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -39,6 +39,7 @@ dependencies { testCompile "org.apache.lucene:lucene-core:${versions.lucene}" testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" testCompile "org.elasticsearch:securemock:${versions.securemock}" + testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" signature "org.codehaus.mojo.signature:java17:1.0@signature" } @@ -55,11 +56,6 @@ forbiddenApisTest { signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } -animalsnifferTest { - //animalsniffer doesn't like sun classes usages (e.g. com.sun.net.httpserver.HttpServer) - exclude('org/elasticsearch/client/RestClientIntegTests.class') -} - //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false //NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core diff --git a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java index 36a7e45b72854..6ee2dc028446d 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -33,6 +33,7 @@ import org.apache.http.util.EntityUtils; import org.apache.lucene.util.LuceneTestCase; import org.apache.lucene.util.SuppressForbidden; +import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -57,6 +58,7 @@ * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. * Works against a real http server, one single host. */ +@IgnoreJRERequirement @SuppressForbidden(reason = "uses sun HttpServer") public class RestClientIntegTests extends LuceneTestCase { @@ -87,6 +89,7 @@ private static void createStatusCodeContext(HttpServer httpServer, final int sta httpServer.createContext("/" + statusCode, new ResponseHandler(statusCode)); } + @IgnoreJRERequirement @SuppressForbidden(reason = "uses sun HttpServer") private static class ResponseHandler implements HttpHandler { private final int statusCode; From 432efc75fbd46a86955d0d42fd4d38bfd799dbbd Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 10 Jun 2016 11:39:08 +0200 Subject: [PATCH 087/103] catch Exception rather than Throwable --- .../main/java/org/elasticsearch/client/sniff/Sniffer.java | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 37240741b3d85..483b1498b5799 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -96,8 +96,8 @@ synchronized void scheduleNextRun(long delayMillis) { } logger.debug("scheduling next sniff in " + delayMillis + " ms"); this.scheduledFuture = this.scheduledExecutorService.schedule(this, delayMillis, TimeUnit.MILLISECONDS); - } catch(Throwable t) { - logger.error("error while scheduling next sniffer task", t); + } catch(Exception e) { + logger.error("error while scheduling next sniffer task", e); } } } @@ -120,8 +120,8 @@ void sniff(HttpHost excludeHost, long nextSniffDelayMillis) { } logger.debug("sniffed nodes: " + sniffedNodes); this.restClient.setHosts(sniffedNodes.toArray(new HttpHost[sniffedNodes.size()])); - } catch (Throwable t) { - logger.error("error while sniffing nodes", t); + } catch (Exception e) { + logger.error("error while sniffing nodes", e); } finally { scheduleNextRun(nextSniffDelayMillis); running.set(false); From 6db90da25e9c5c918116102662915b87cc464aec Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 10 Jun 2016 11:40:13 +0200 Subject: [PATCH 088/103] [TEST] Remove usage of @SuppressForbidden due to sun HttpServer usage, resolve some violations that were hidden by it --- client-sniffer/build.gradle | 3 +++ .../org/elasticsearch/client/sniff/HostsSnifferTests.java | 8 +++----- client/build.gradle | 3 +++ .../org/elasticsearch/client/RestClientIntegTests.java | 8 +++----- 4 files changed, 12 insertions(+), 10 deletions(-) diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index af04d85ac6132..a8e4b684bf6d1 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -54,6 +54,9 @@ forbiddenApisMain { } forbiddenApisTest { + //we are excluding jdk-non-portable to allow for com.sun.net.httpserver.* usage + //TODO remove this line once https://github.com/policeman-tools/forbidden-apis/issues/103 gets solved + bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out'] //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index af1b878cfd4b0..06f8fd6940e13 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -31,7 +31,6 @@ import org.apache.http.HttpHost; import org.apache.http.client.methods.HttpGet; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.SuppressForbidden; import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; import org.elasticsearch.client.Response; import org.elasticsearch.client.ResponseException; @@ -42,6 +41,7 @@ import java.io.IOException; import java.io.OutputStream; import java.io.StringWriter; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.URISyntaxException; import java.util.ArrayList; @@ -57,7 +57,6 @@ import static org.hamcrest.CoreMatchers.equalTo; @IgnoreJRERequirement -@SuppressForbidden(reason = "uses sun HttpServer") public class HostsSnifferTests extends LuceneTestCase { private int sniffRequestTimeout; @@ -84,7 +83,7 @@ public void stopHttpServer() throws IOException { } public void testSniffNodes() throws IOException, URISyntaxException { - HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostName(), httpServer.getAddress().getPort()); + HttpHost httpHost = new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort()); try (RestClient restClient = RestClient.builder(httpHost).build()) { HostsSniffer sniffer = new HostsSniffer(restClient, sniffRequestTimeout, scheme); try { @@ -114,13 +113,12 @@ public void testSniffNodes() throws IOException, URISyntaxException { } private static HttpServer createHttpServer(final SniffResponse sniffResponse, final int sniffTimeoutMillis) throws IOException { - HttpServer httpServer = HttpServer.create(new InetSocketAddress(0), 0); + HttpServer httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.createContext("/_nodes/http", new ResponseHandler(sniffTimeoutMillis, sniffResponse)); return httpServer; } @IgnoreJRERequirement - @SuppressForbidden(reason = "uses sun HttpServer") private static class ResponseHandler implements HttpHandler { private final int sniffTimeoutMillis; private final SniffResponse sniffResponse; diff --git a/client/build.gradle b/client/build.gradle index f905114fd0fb7..c2479b31d1eee 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -52,6 +52,9 @@ forbiddenApisMain { } forbiddenApisTest { + //we are excluding jdk-non-portable to allow for com.sun.net.httpserver.* usage + //TODO remove this line once https://github.com/policeman-tools/forbidden-apis/issues/103 gets solved + bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out'] //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } diff --git a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java index 6ee2dc028446d..7c4be56040b33 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -32,7 +32,6 @@ import org.apache.http.message.BasicHeader; import org.apache.http.util.EntityUtils; import org.apache.lucene.util.LuceneTestCase; -import org.apache.lucene.util.SuppressForbidden; import org.codehaus.mojo.animal_sniffer.IgnoreJRERequirement; import org.junit.AfterClass; import org.junit.BeforeClass; @@ -40,6 +39,7 @@ import java.io.IOException; import java.io.InputStreamReader; import java.io.OutputStream; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.util.Arrays; import java.util.Collections; @@ -59,7 +59,6 @@ * Works against a real http server, one single host. */ @IgnoreJRERequirement -@SuppressForbidden(reason = "uses sun HttpServer") public class RestClientIntegTests extends LuceneTestCase { private static HttpServer httpServer; @@ -68,7 +67,7 @@ public class RestClientIntegTests extends LuceneTestCase { @BeforeClass public static void startHttpServer() throws Exception { - httpServer = HttpServer.create(new InetSocketAddress(0), 0); + httpServer = HttpServer.create(new InetSocketAddress(InetAddress.getLoopbackAddress(), 0), 0); httpServer.start(); //returns a different status code depending on the path for (int statusCode : getAllStatusCodes()) { @@ -81,7 +80,7 @@ public static void startHttpServer() throws Exception { String headerValue = RandomStrings.randomAsciiOfLengthBetween(random(), 3, 10); defaultHeaders[i] = new BasicHeader(headerName, headerValue); } - restClient = RestClient.builder(new HttpHost(httpServer.getAddress().getHostName(), httpServer.getAddress().getPort())) + restClient = RestClient.builder(new HttpHost(httpServer.getAddress().getHostString(), httpServer.getAddress().getPort())) .setDefaultHeaders(defaultHeaders).build(); } @@ -90,7 +89,6 @@ private static void createStatusCodeContext(HttpServer httpServer, final int sta } @IgnoreJRERequirement - @SuppressForbidden(reason = "uses sun HttpServer") private static class ResponseHandler implements HttpHandler { private final int statusCode; From 3d7186c81fe260d54dacc12bb3573a6c28ce2155 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 10 Jun 2016 11:40:35 +0200 Subject: [PATCH 089/103] make DeadHostState final --- .../src/main/java/org/elasticsearch/client/DeadHostState.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/src/main/java/org/elasticsearch/client/DeadHostState.java index 89a1720d46e18..44b7532d5ded9 100644 --- a/client/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -26,7 +26,7 @@ * when the host should be retried (based on number of previous failed attempts). * Class is immutable, a new copy of it should be created each time the state has to be changed. */ -class DeadHostState { +final class DeadHostState { private static final long MIN_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(1); private static final long MAX_CONNECTION_TIMEOUT_NANOS = TimeUnit.MINUTES.toNanos(30); From 656422cff62600d166dfbf4962dd9400ae50e7c9 Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 10 Jun 2016 11:42:25 +0200 Subject: [PATCH 090/103] move shutdownNow within try block --- .../src/main/java/org/elasticsearch/client/sniff/Sniffer.java | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index 483b1498b5799..c1aeb3efe3666 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -135,10 +135,10 @@ synchronized void shutdown() { if (scheduledExecutorService.awaitTermination(1000, TimeUnit.MILLISECONDS)) { return; } + scheduledExecutorService.shutdownNow(); } catch (InterruptedException e) { Thread.currentThread().interrupt(); } - scheduledExecutorService.shutdownNow(); } } From 777d438f4861dbbf50cc191d58c4688ecd72324b Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 10 Jun 2016 12:41:49 +0200 Subject: [PATCH 091/103] [TEST] use jdk-internal bundled signature (rather than the previously removed jdk-non-portable) --- client-sniffer/build.gradle | 6 +++--- client/build.gradle | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index a8e4b684bf6d1..a29f07f1c422a 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -54,9 +54,9 @@ forbiddenApisMain { } forbiddenApisTest { - //we are excluding jdk-non-portable to allow for com.sun.net.httpserver.* usage - //TODO remove this line once https://github.com/policeman-tools/forbidden-apis/issues/103 gets solved - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out'] + //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } diff --git a/client/build.gradle b/client/build.gradle index c2479b31d1eee..7a685fabfaf08 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -52,9 +52,9 @@ forbiddenApisMain { } forbiddenApisTest { - //we are excluding jdk-non-portable to allow for com.sun.net.httpserver.* usage - //TODO remove this line once https://github.com/policeman-tools/forbidden-apis/issues/103 gets solved - bundledSignatures = ['jdk-unsafe', 'jdk-deprecated', 'jdk-system-out'] + //we are using jdk-internal instead of jdk-non-portable to allow for com.sun.net.httpserver.* usage + bundledSignatures -= 'jdk-non-portable' + bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] } From 50b6f4c02fe3f995c152c02cdb47fa65111460f4 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 13 Jun 2016 09:20:39 +0200 Subject: [PATCH 092/103] Build: changed forbidden-apis targetCompatibility to 1.7 for client and client-sniffer --- client-sniffer/build.gradle | 2 ++ client/build.gradle | 2 ++ 2 files changed, 4 insertions(+) diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index a29f07f1c422a..af46a5a452c8f 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -51,6 +51,7 @@ compileTestJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + targetCompatibility = 1.7 } forbiddenApisTest { @@ -59,6 +60,7 @@ forbiddenApisTest { bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + targetCompatibility = 1.7 } //JarHell is part of es core, which we don't want to pull in diff --git a/client/build.gradle b/client/build.gradle index 7a685fabfaf08..c9c619f80ea13 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -49,6 +49,7 @@ compileTestJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + targetCompatibility = 1.7 } forbiddenApisTest { @@ -57,6 +58,7 @@ forbiddenApisTest { bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] + targetCompatibility = 1.7 } //JarHell is part of es core, which we don't want to pull in From 3cd201e67e49e1fd6cd36ab77777eb49737c8e1b Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 13 Jun 2016 12:02:09 +0200 Subject: [PATCH 093/103] [TEST] add comment on using animal-sniffer suppress annotation --- .../java/org/elasticsearch/client/sniff/HostsSnifferTests.java | 2 ++ .../java/org/elasticsearch/client/RestClientIntegTests.java | 2 ++ 2 files changed, 4 insertions(+) diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java index 06f8fd6940e13..c6daf9c6f40c3 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/HostsSnifferTests.java @@ -56,6 +56,7 @@ import static org.hamcrest.CoreMatchers.containsString; import static org.hamcrest.CoreMatchers.equalTo; +//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes @IgnoreJRERequirement public class HostsSnifferTests extends LuceneTestCase { @@ -118,6 +119,7 @@ private static HttpServer createHttpServer(final SniffResponse sniffResponse, fi return httpServer; } + //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes @IgnoreJRERequirement private static class ResponseHandler implements HttpHandler { private final int sniffTimeoutMillis; diff --git a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java index 7c4be56040b33..7bc6dc76c91b6 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientIntegTests.java @@ -58,6 +58,7 @@ * Integration test to check interaction between {@link RestClient} and {@link org.apache.http.client.HttpClient}. * Works against a real http server, one single host. */ +//animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes @IgnoreJRERequirement public class RestClientIntegTests extends LuceneTestCase { @@ -88,6 +89,7 @@ private static void createStatusCodeContext(HttpServer httpServer, final int sta httpServer.createContext("/" + statusCode, new ResponseHandler(statusCode)); } + //animal-sniffer doesn't like our usage of com.sun.net.httpserver.* classes @IgnoreJRERequirement private static class ResponseHandler implements HttpHandler { private final int statusCode; From 8f7b7fb81312fbf43d4e6767bd5639e5fecd59b7 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 13 Jun 2016 12:59:30 +0200 Subject: [PATCH 094/103] added comments to clarify RequestLogger and DeadHostState --- .../main/java/org/elasticsearch/client/DeadHostState.java | 5 +++++ .../main/java/org/elasticsearch/client/RequestLogger.java | 2 ++ 2 files changed, 7 insertions(+) diff --git a/client/src/main/java/org/elasticsearch/client/DeadHostState.java b/client/src/main/java/org/elasticsearch/client/DeadHostState.java index 44b7532d5ded9..a7b222da70e1d 100644 --- a/client/src/main/java/org/elasticsearch/client/DeadHostState.java +++ b/client/src/main/java/org/elasticsearch/client/DeadHostState.java @@ -41,6 +41,11 @@ private DeadHostState() { this.deadUntilNanos = System.nanoTime() + MIN_CONNECTION_TIMEOUT_NANOS; } + /** + * We keep track of how many times a certain node fails consecutively. The higher that number is the longer we will wait + * to retry that same node again. Minimum is 1 minute (for a node the only failed once), maximum is 30 minutes (for a node + * that failed many consecutive times). + */ DeadHostState(DeadHostState previousDeadHostState) { long timeoutNanos = (long)Math.min(MIN_CONNECTION_TIMEOUT_NANOS * 2 * Math.pow(2, previousDeadHostState.failedAttempts * 0.5 - 1), MAX_CONNECTION_TIMEOUT_NANOS); diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java index f1b406b5cf32b..c08e0c6994194 100644 --- a/client/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -40,6 +40,8 @@ /** * Helper class that exposes static methods to unify the way requests are logged. * Includes trace logging to log complete requests and responses in curl format. + * Useful for debugging, manually sending logged requests via curl and checking their responses. + * Trace logging is a feature that all the language clients provide. */ final class RequestLogger { From 116805b28b8eaf386e39ca1c132704b7ea94fab7 Mon Sep 17 00:00:00 2001 From: javanna Date: Mon, 13 Jun 2016 14:02:01 +0200 Subject: [PATCH 095/103] remove TODO around copying hosts when rotating the collection, it's not a problem for now --- client/src/main/java/org/elasticsearch/client/RestClient.java | 1 - 1 file changed, 1 deletion(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 350b750415d99..b871e2e134649 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -244,7 +244,6 @@ public int compare(Map.Entry o1, Map.Entry rotatedHosts = new ArrayList<>(filteredHosts); - //TODO is it possible to make this O(1)? (rotate is O(n)) Collections.rotate(rotatedHosts, rotatedHosts.size() - lastHostIndex.getAndIncrement()); return rotatedHosts; } From caa6c962596af6c1eb54ed636440aab320ce227f Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 14 Jun 2016 09:43:31 +0200 Subject: [PATCH 096/103] Build: make client and client-sniffer depend on lucene-test version 5, last 1.7 compatible version --- client-sniffer/build.gradle | 7 ++++--- client/build.gradle | 7 ++++--- 2 files changed, 8 insertions(+), 6 deletions(-) diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index af46a5a452c8f..5b224168492d5 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -37,9 +37,10 @@ dependencies { testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" - testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" - testCompile "org.apache.lucene:lucene-core:${versions.lucene}" - testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" + //we use the last lucene-test version that is compatible with java 1.7 + testCompile "org.apache.lucene:lucene-test-framework:5.5.1" + testCompile "org.apache.lucene:lucene-core:5.5.1" + testCompile "org.apache.lucene:lucene-codecs:5.5.1" testCompile "org.elasticsearch:securemock:${versions.securemock}" testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" signature "org.codehaus.mojo.signature:java17:1.0@signature" diff --git a/client/build.gradle b/client/build.gradle index c9c619f80ea13..83c96d07d3436 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -35,9 +35,10 @@ dependencies { testCompile "com.carrotsearch.randomizedtesting:randomizedtesting-runner:${versions.randomizedrunner}" testCompile "junit:junit:${versions.junit}" testCompile "org.hamcrest:hamcrest-all:${versions.hamcrest}" - testCompile "org.apache.lucene:lucene-test-framework:${versions.lucene}" - testCompile "org.apache.lucene:lucene-core:${versions.lucene}" - testCompile "org.apache.lucene:lucene-codecs:${versions.lucene}" + //we use the last lucene-test version that is compatible with java 1.7 + testCompile "org.apache.lucene:lucene-test-framework:5.5.1" + testCompile "org.apache.lucene:lucene-core:5.5.1" + testCompile "org.apache.lucene:lucene-codecs:5.5.1" testCompile "org.elasticsearch:securemock:${versions.securemock}" testCompile "org.codehaus.mojo:animal-sniffer-annotations:1.15" signature "org.codehaus.mojo.signature:java17:1.0@signature" From 1932f6bc7c4c5d77f79ad27320e3dcdc3162ccfc Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 14 Jun 2016 13:45:38 +0200 Subject: [PATCH 097/103] Rename RequestLogger#log methods to distinguish between the two One method is to log a request that yielded a response, the other one for a failed request --- .../main/java/org/elasticsearch/client/RequestLogger.java | 4 ++-- .../src/main/java/org/elasticsearch/client/RestClient.java | 6 +++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java index c08e0c6994194..7268a44998149 100644 --- a/client/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -53,7 +53,7 @@ private RequestLogger() { /** * Logs a request that yielded a response */ - static void log(Log logger, String message, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { + static void logResponse(Log logger, String message, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { if (logger.isDebugEnabled()) { logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "] [" + httpResponse.getStatusLine() + "]"); @@ -81,7 +81,7 @@ static void log(Log logger, String message, HttpUriRequest request, HttpHost hos /** * Logs a request that failed */ - static void log(Log logger, String message, HttpUriRequest request, HttpHost host, IOException e) { + static void logFailedRequest(Log logger, String message, HttpUriRequest request, HttpHost host, IOException e) { logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "]", e); if (logger.isTraceEnabled()) { String traceRequest; diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index b871e2e134649..b2bd603068117 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -160,7 +160,7 @@ public Response performRequest(String method, String endpoint, Map Date: Tue, 14 Jun 2016 14:14:03 +0200 Subject: [PATCH 098/103] remove message parameter from RequestLogger methods This prevents useless string allocation. --- .../java/org/elasticsearch/client/RequestLogger.java | 11 +++++------ .../java/org/elasticsearch/client/RestClient.java | 6 +++--- 2 files changed, 8 insertions(+), 9 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RequestLogger.java b/client/src/main/java/org/elasticsearch/client/RequestLogger.java index 7268a44998149..ad8204be2bb47 100644 --- a/client/src/main/java/org/elasticsearch/client/RequestLogger.java +++ b/client/src/main/java/org/elasticsearch/client/RequestLogger.java @@ -53,12 +53,11 @@ private RequestLogger() { /** * Logs a request that yielded a response */ - static void logResponse(Log logger, String message, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { + static void logResponse(Log logger, HttpUriRequest request, HttpHost host, HttpResponse httpResponse) { if (logger.isDebugEnabled()) { - logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + - "] [" + httpResponse.getStatusLine() + "]"); + logger.debug("request [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + + "] returned [" + httpResponse.getStatusLine() + "]"); } - if (tracer.isTraceEnabled()) { String requestLine; try { @@ -81,8 +80,8 @@ static void logResponse(Log logger, String message, HttpUriRequest request, Http /** * Logs a request that failed */ - static void logFailedRequest(Log logger, String message, HttpUriRequest request, HttpHost host, IOException e) { - logger.debug(message + " [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "]", e); + static void logFailedRequest(Log logger, HttpUriRequest request, HttpHost host, IOException e) { + logger.debug("request [" + request.getMethod() + " " + host + request.getRequestLine().getUri() + "] failed", e); if (logger.isTraceEnabled()) { String traceRequest; try { diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index b2bd603068117..2d1c50af3cb8d 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -160,7 +160,7 @@ public Response performRequest(String method, String endpoint, Map Date: Tue, 14 Jun 2016 23:16:38 +0200 Subject: [PATCH 099/103] Build: targetCompatibility and sourceCompatibility are enough to make sure we compile client and client-sniffer with target and source 1.7 --- client-sniffer/build.gradle | 3 --- client/build.gradle | 3 --- 2 files changed, 6 deletions(-) diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index 5b224168492d5..c01f730e09494 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -46,9 +46,6 @@ dependencies { signature "org.codehaus.mojo.signature:java17:1.0@signature" } -compileJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' << '-Xlint:all,-path,-serial,-options' -compileTestJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' - forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] diff --git a/client/build.gradle b/client/build.gradle index 83c96d07d3436..30225382e41bb 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -44,9 +44,6 @@ dependencies { signature "org.codehaus.mojo.signature:java17:1.0@signature" } -compileJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' << '-Xlint:all,-path,-serial,-options' -compileTestJava.options.compilerArgs << '-target' << '1.7' << '-source' << '1.7' - forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] From 7c8013f9fdfe7a7e6754d1ebe4f968a7d6c84506 Mon Sep 17 00:00:00 2001 From: javanna Date: Wed, 15 Jun 2016 11:43:48 +0200 Subject: [PATCH 100/103] Build: remove explicit targetCompatibility from forbiddenapis config targetCompatibility set to the project is enough. --- client-sniffer/build.gradle | 2 -- client/build.gradle | 2 -- 2 files changed, 4 deletions(-) diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index c01f730e09494..368cdaa073f9c 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -49,7 +49,6 @@ dependencies { forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] - targetCompatibility = 1.7 } forbiddenApisTest { @@ -58,7 +57,6 @@ forbiddenApisTest { bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] - targetCompatibility = 1.7 } //JarHell is part of es core, which we don't want to pull in diff --git a/client/build.gradle b/client/build.gradle index 30225382e41bb..afc2e6e1e7b8b 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -47,7 +47,6 @@ dependencies { forbiddenApisMain { //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] - targetCompatibility = 1.7 } forbiddenApisTest { @@ -56,7 +55,6 @@ forbiddenApisTest { bundledSignatures += 'jdk-internal' //client does not depend on core, so only jdk signatures should be checked signaturesURLs = [PrecommitTasks.getResource('/forbidden/jdk-signatures.txt')] - targetCompatibility = 1.7 } //JarHell is part of es core, which we don't want to pull in From 8c6037428490f266b3a73133e37acd44949c192a Mon Sep 17 00:00:00 2001 From: javanna Date: Fri, 17 Jun 2016 11:37:01 +0200 Subject: [PATCH 101/103] Build: do not load integ test class if --skip-integ-tests-in-disguise is specified in NamingConventionsCheck Projects that don't depend on elasticsearch-test fail otherwise because org.elasticsearch.test.EsIntegTestCase (default integ test class) is not in the classpath. They should provide their onw integ test base class, but having integration tests should not be mandatory. One can simply set skipIntegTestsInDisguise to true to prevent loading of integ test class. --- .../gradle/plugin/PluginBuildPlugin.groovy | 4 +- .../precommit/NamingConventionsTask.groovy | 5 +- .../test/NamingConventionsCheck.java | 57 ++++++++++--------- client-sniffer/build.gradle | 7 ++- client/build.gradle | 7 ++- 5 files changed, 45 insertions(+), 35 deletions(-) diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy index 7ff83f4eae3f1..ba013da31e9db 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/plugin/PluginBuildPlugin.groovy @@ -19,7 +19,6 @@ package org.elasticsearch.gradle.plugin import nebula.plugin.publishing.maven.MavenBasePublishPlugin -import nebula.plugin.publishing.maven.MavenManifestPlugin import nebula.plugin.publishing.maven.MavenScmPlugin import org.elasticsearch.gradle.BuildPlugin import org.elasticsearch.gradle.test.RestIntegTestTask @@ -27,7 +26,6 @@ import org.elasticsearch.gradle.test.RunTask import org.gradle.api.Project import org.gradle.api.tasks.SourceSet import org.gradle.api.tasks.bundling.Zip - /** * Encapsulates build configuration for an Elasticsearch plugin. */ @@ -56,7 +54,7 @@ public class PluginBuildPlugin extends BuildPlugin { } project.namingConventions { - // Plugins decalare extensions of ESIntegTestCase as "Tests" instead of IT. + // Plugins declare integration tests as "Tests" instead of IT. skipIntegTestInDisguise = true } } diff --git a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy index fe7f13f29e659..52de7dac2d5a3 100644 --- a/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy +++ b/buildSrc/src/main/groovy/org/elasticsearch/gradle/precommit/NamingConventionsTask.groovy @@ -26,7 +26,6 @@ import org.gradle.api.file.FileCollection import org.gradle.api.tasks.Input import org.gradle.api.tasks.InputFiles import org.gradle.api.tasks.OutputFile - /** * Runs NamingConventionsCheck on a classpath/directory combo to verify that * tests are named according to our conventions so they'll be picked up by @@ -90,9 +89,11 @@ public class NamingConventionsTask extends LoggedExec { doFirst { args('-Djna.nosys=true') args('-cp', (classpath + extraClasspath).asPath, 'org.elasticsearch.test.NamingConventionsCheck') - args(testClass, integTestClass) + args('--test-class', testClass) if (skipIntegTestInDisguise) { args('--skip-integ-tests-in-disguise') + } else { + args('--integ-test-class', integTestClass) } /* * The test framework has classes that fail the checks to validate that the checks fail properly. diff --git a/buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java b/buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java index ed25d52739c85..cbfa31d1aaf5b 100644 --- a/buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java +++ b/buildSrc/src/main/java/org/elasticsearch/test/NamingConventionsCheck.java @@ -44,31 +44,36 @@ */ public class NamingConventionsCheck { public static void main(String[] args) throws IOException { - int i = 0; - NamingConventionsCheck check = new NamingConventionsCheck( - loadClassWithoutInitializing(args[i++]), - loadClassWithoutInitializing(args[i++])); + Class testClass = null; + Class integTestClass = null; + Path rootPath = null; boolean skipIntegTestsInDisguise = false; boolean selfTest = false; - while (true) { - switch (args[i]) { - case "--skip-integ-tests-in-disguise": - skipIntegTestsInDisguise = true; - i++; - continue; - case "--self-test": - selfTest = true; - i++; - continue; - case "--": - i++; - break; - default: - fail("Expected -- before a path."); + for (int i = 0; i < args.length; i++) { + String arg = args[i]; + switch (arg) { + case "--test-class": + testClass = loadClassWithoutInitializing(args[++i]); + break; + case "--integ-test-class": + integTestClass = loadClassWithoutInitializing(args[++i]); + break; + case "--skip-integ-tests-in-disguise": + skipIntegTestsInDisguise = true; + break; + case "--self-test": + selfTest = true; + break; + case "--": + rootPath = Paths.get(args[++i]); + break; + default: + fail("unsupported argument '" + arg + "'"); } - break; } - check.check(Paths.get(args[i])); + + NamingConventionsCheck check = new NamingConventionsCheck(testClass, integTestClass); + check.check(rootPath, skipIntegTestsInDisguise); if (selfTest) { assertViolation("WrongName", check.missingSuffix); @@ -87,9 +92,9 @@ public static void main(String[] args) throws IOException { assertNoViolations("Found inner classes that are tests, which are excluded from the test runner", check.innerClasses); assertNoViolations("Pure Unit-Test found must subclass [" + check.testClass.getSimpleName() + "]", check.pureUnitTest); assertNoViolations("Classes ending with [Tests] must subclass [" + check.testClass.getSimpleName() + "]", check.notImplementing); - if (!skipIntegTestsInDisguise) { - assertNoViolations("Subclasses of ESIntegTestCase should end with IT as they are integration tests", - check.integTestsInDisguise); + if (skipIntegTestsInDisguise == false) { + assertNoViolations("Subclasses of " + check.integTestClass.getSimpleName() + + " should end with IT as they are integration tests", check.integTestsInDisguise); } } @@ -108,7 +113,7 @@ public NamingConventionsCheck(Class testClass, Class integTestClass) { this.integTestClass = integTestClass; } - public void check(Path rootPath) throws IOException { + public void check(Path rootPath, boolean skipTestsInDisguised) throws IOException { Files.walkFileTree(rootPath, new FileVisitor() { /** * The package name of the directory we are currently visiting. Kept as a string rather than something fancy because we load @@ -143,7 +148,7 @@ public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IO String className = filename.substring(0, filename.length() - ".class".length()); Class clazz = loadClassWithoutInitializing(packageName + className); if (clazz.getName().endsWith("Tests")) { - if (integTestClass.isAssignableFrom(clazz)) { + if (skipTestsInDisguised == false && integTestClass.isAssignableFrom(clazz)) { integTestsInDisguise.add(clazz); } if (Modifier.isAbstract(clazz.getModifiers()) || Modifier.isInterface(clazz.getModifiers())) { diff --git a/client-sniffer/build.gradle b/client-sniffer/build.gradle index 368cdaa073f9c..402bc2f61b4fc 100644 --- a/client-sniffer/build.gradle +++ b/client-sniffer/build.gradle @@ -61,8 +61,11 @@ forbiddenApisTest { //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false -//NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core -namingConventions.enabled=false + +namingConventions { + //we don't have integration tests + skipIntegTestInDisguise = true +} dependencyLicenses { dependencies = project.configurations.runtime.fileCollection { diff --git a/client/build.gradle b/client/build.gradle index afc2e6e1e7b8b..a91c224c745a6 100644 --- a/client/build.gradle +++ b/client/build.gradle @@ -59,8 +59,11 @@ forbiddenApisTest { //JarHell is part of es core, which we don't want to pull in jarHell.enabled=false -//NamingConventionCheck is part of test-framework, which we don't want to pull in as it depends on es core -namingConventions.enabled=false + +namingConventions { + //we don't have integration tests + skipIntegTestInDisguise = true +} thirdPartyAudit.excludes = [ //commons-logging optional dependencies From cb4bfcb864576df8b8735a107783c97f346b9f53 Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 21 Jun 2016 15:29:59 +0200 Subject: [PATCH 102/103] Take SniffOnFailureListener out of Sniffer and make FailureListener final on RestClient --- .../client/sniff/SniffOnFailureListener.java | 65 +++++++++++++++++++ .../elasticsearch/client/sniff/Sniffer.java | 50 ++++++-------- .../client/sniff/MockHostsSniffer.java | 39 +++++++++++ .../sniff/SniffOnFailureListenerTests.java | 57 ++++++++++++++++ .../client/sniff/SnifferBuilderTests.java | 18 ----- .../org/elasticsearch/client/RestClient.java | 28 +++++--- .../client/RestClientBuilderTests.java | 7 ++ .../client/RestClientMultipleHostsTests.java | 6 +- .../client/RestClientSingleHostTests.java | 4 +- 9 files changed, 209 insertions(+), 65 deletions(-) create mode 100644 client-sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java create mode 100644 client-sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java create mode 100644 client-sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java new file mode 100644 index 0000000000000..76350057141c3 --- /dev/null +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/SniffOnFailureListener.java @@ -0,0 +1,65 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpHost; +import org.elasticsearch.client.RestClient; + +import java.io.IOException; +import java.util.Objects; +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * {@link org.elasticsearch.client.RestClient.FailureListener} implementation that allows to perform + * sniffing on failure. Gets notified whenever a failure happens and uses a {@link Sniffer} instance + * to manually reload hosts and sets them back to the {@link RestClient}. The {@link Sniffer} instance + * needs to be lazily set through {@link #setSniffer(Sniffer)}. + */ +public class SniffOnFailureListener extends RestClient.FailureListener { + + private volatile Sniffer sniffer; + private final AtomicBoolean set; + + public SniffOnFailureListener() { + this.set = new AtomicBoolean(false); + } + + /** + * Sets the {@link Sniffer} instance used to perform sniffing + * @throws IllegalStateException if the sniffer was already set, as it can only be set once + */ + public void setSniffer(Sniffer sniffer) { + Objects.requireNonNull(sniffer, "sniffer must not be null"); + if (set.compareAndSet(false, true)) { + this.sniffer = sniffer; + } else { + throw new IllegalStateException("sniffer can only be set once"); + } + } + + @Override + public void onFailure(HttpHost host) throws IOException { + if (sniffer == null) { + throw new IllegalStateException("sniffer was not set, unable to sniff on failure"); + } + //re-sniff immediately but take out the node that failed + sniffer.sniffOnFailure(host); + } +} diff --git a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java index c1aeb3efe3666..74a28cdd2229d 100644 --- a/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java +++ b/client-sniffer/src/main/java/org/elasticsearch/client/sniff/Sniffer.java @@ -37,30 +37,26 @@ /** * Class responsible for sniffing nodes from an elasticsearch cluster and setting them to a provided instance of {@link RestClient}. * Must be created via {@link Builder}, which allows to set all of the different options or rely on defaults. - * A background task fetches the nodes from elasticsearch and updates them periodically. - * Supports sniffing on failure, meaning that the client will notify the sniffer at each host failure, so that nodes can be updated - * straightaway. + * A background task fetches the nodes through the {@link HostsSniffer} and sets them to the {@link RestClient} instance. + * It is possible to perform sniffing on failure by creating a {@link SniffOnFailureListener} and providing it as an argument to + * {@link org.elasticsearch.client.RestClient.Builder#setFailureListener(RestClient.FailureListener)}. The Sniffer implementation + * needs to be lazily set to the previously created SniffOnFailureListener through {@link SniffOnFailureListener#setSniffer(Sniffer)}. */ -public final class Sniffer extends RestClient.FailureListener implements Closeable { +public final class Sniffer implements Closeable { private static final Log logger = LogFactory.getLog(Sniffer.class); - private final boolean sniffOnFailure; private final Task task; - private Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, - boolean sniffOnFailure, long sniffAfterFailureDelay) { + private Sniffer(RestClient restClient, HostsSniffer hostsSniffer, long sniffInterval, long sniffAfterFailureDelay) { this.task = new Task(hostsSniffer, restClient, sniffInterval, sniffAfterFailureDelay); - this.sniffOnFailure = sniffOnFailure; - restClient.setFailureListener(this); } - @Override - public void onFailure(HttpHost host) throws IOException { - if (sniffOnFailure) { - //re-sniff immediately but take out the node that failed - task.sniffOnFailure(host); - } + /** + * Triggers a new sniffing round and explicitly takes out the failed host provided as argument + */ + public void sniffOnFailure(HttpHost failedHost) { + this.task.sniffOnFailure(failedHost); } @Override @@ -114,12 +110,16 @@ void sniffOnFailure(HttpHost failedHost) { void sniff(HttpHost excludeHost, long nextSniffDelayMillis) { if (running.compareAndSet(false, true)) { try { - List sniffedNodes = hostsSniffer.sniffHosts(); + List sniffedHosts = hostsSniffer.sniffHosts(); + logger.debug("sniffed hosts: " + sniffedHosts); if (excludeHost != null) { - sniffedNodes.remove(excludeHost); + sniffedHosts.remove(excludeHost); + } + if (sniffedHosts.isEmpty()) { + logger.warn("no hosts to set, hosts will be updated at the next sniffing round"); + } else { + this.restClient.setHosts(sniffedHosts.toArray(new HttpHost[sniffedHosts.size()])); } - logger.debug("sniffed nodes: " + sniffedNodes); - this.restClient.setHosts(sniffedNodes.toArray(new HttpHost[sniffedNodes.size()])); } catch (Exception e) { logger.error("error while sniffing nodes", e); } finally { @@ -159,7 +159,6 @@ public static final class Builder { private final RestClient restClient; private final HostsSniffer hostsSniffer; private long sniffIntervalMillis = DEFAULT_SNIFF_INTERVAL; - private boolean sniffOnFailure = true; private long sniffAfterFailureDelayMillis = DEFAULT_SNIFF_AFTER_FAILURE_DELAY; /** @@ -186,15 +185,6 @@ public Builder setSniffIntervalMillis(int sniffIntervalMillis) { return this; } - /** - * Enables/disables sniffing on failure. If enabled, at each failure nodes will be reloaded, and a new sniff execution will - * be scheduled after a shorter time than usual (sniffAfterFailureDelayMillis). - */ - public Builder setSniffOnFailure(boolean sniffOnFailure) { - this.sniffOnFailure = sniffOnFailure; - return this; - } - /** * Sets the delay of a sniff execution scheduled after a failure (in milliseconds) */ @@ -210,7 +200,7 @@ public Builder setSniffAfterFailureDelayMillis(int sniffAfterFailureDelayMillis) * Creates the {@link Sniffer} based on the provided configuration. */ public Sniffer build() { - return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffOnFailure, sniffAfterFailureDelayMillis); + return new Sniffer(restClient, hostsSniffer, sniffIntervalMillis, sniffAfterFailureDelayMillis); } } } diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java new file mode 100644 index 0000000000000..bdc052d07c8d5 --- /dev/null +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/MockHostsSniffer.java @@ -0,0 +1,39 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpHost; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.List; + +class MockHostsSniffer extends HostsSniffer { + MockHostsSniffer() { + super(null, -1, null); + } + + @Override + public List sniffHosts() throws IOException { + List hosts = new ArrayList<>(); + hosts.add(new HttpHost("localhost", 9200)); + return hosts; + } +} diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java new file mode 100644 index 0000000000000..fe2555763acf0 --- /dev/null +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SniffOnFailureListenerTests.java @@ -0,0 +1,57 @@ +/* + * Licensed to Elasticsearch under one or more contributor + * license agreements. See the NOTICE file distributed with + * this work for additional information regarding copyright + * ownership. Elasticsearch licenses this file to you under + * the Apache License, Version 2.0 (the "License"); you may + * not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + */ + +package org.elasticsearch.client.sniff; + +import org.apache.http.HttpHost; +import org.apache.lucene.util.LuceneTestCase; +import org.elasticsearch.client.RestClient; + +public class SniffOnFailureListenerTests extends LuceneTestCase { + + public void testSetSniffer() throws Exception { + SniffOnFailureListener listener = new SniffOnFailureListener(); + + try { + listener.onFailure(null); + fail("should have failed"); + } catch(IllegalStateException e) { + assertEquals("sniffer was not set, unable to sniff on failure", e.getMessage()); + } + + try { + listener.setSniffer(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals("sniffer must not be null", e.getMessage()); + } + + RestClient restClient = RestClient.builder(new HttpHost("localhost", 9200)).build(); + try (Sniffer sniffer = Sniffer.builder(restClient, new MockHostsSniffer()).build()) { + listener.setSniffer(sniffer); + try { + listener.setSniffer(sniffer); + fail("should have failed"); + } catch(IllegalStateException e) { + assertEquals("sniffer can only be set once", e.getMessage()); + } + listener.onFailure(new HttpHost("localhost", 9200)); + } + } +} diff --git a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java index f09ef77f5cfd2..a3e8d8e80e7d0 100644 --- a/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java +++ b/client-sniffer/src/test/java/org/elasticsearch/client/sniff/SnifferBuilderTests.java @@ -24,10 +24,6 @@ import org.apache.lucene.util.LuceneTestCase; import org.elasticsearch.client.RestClient; -import java.io.IOException; -import java.util.Collections; -import java.util.List; - public class SnifferBuilderTests extends LuceneTestCase { public void testBuild() throws Exception { @@ -80,23 +76,9 @@ public void testBuild() throws Exception { if (random().nextBoolean()) { builder.setSniffAfterFailureDelayMillis(RandomInts.randomIntBetween(random(), 1, Integer.MAX_VALUE)); } - if (random().nextBoolean()) { - builder.setSniffOnFailure(random().nextBoolean()); - } try (Sniffer sniffer = builder.build()) { assertNotNull(sniffer); } } } - - private static class MockHostsSniffer extends HostsSniffer { - MockHostsSniffer() { - super(null, -1, null); - } - - @Override - public List sniffHosts() throws IOException { - return Collections.singletonList(new HttpHost("localhost", 9200)); - } - } } diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 2d1c50af3cb8d..9baafb2b5b712 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -89,12 +89,14 @@ public final class RestClient implements Closeable { private final AtomicInteger lastHostIndex = new AtomicInteger(0); private volatile Set hosts; private final ConcurrentMap blacklist = new ConcurrentHashMap<>(); - private volatile FailureListener failureListener = new FailureListener(); + private final FailureListener failureListener; - private RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, HttpHost[] hosts) { + private RestClient(CloseableHttpClient client, long maxRetryTimeoutMillis, Header[] defaultHeaders, + HttpHost[] hosts, FailureListener failureListener) { this.client = client; this.maxRetryTimeoutMillis = maxRetryTimeoutMillis; this.defaultHeaders = defaultHeaders; + this.failureListener = failureListener; setHosts(hosts); } @@ -278,13 +280,6 @@ private void onFailure(HttpHost host) throws IOException { failureListener.onFailure(host); } - /** - * Sets a {@link FailureListener} to be notified each and every time a host fails - */ - public synchronized void setFailureListener(FailureListener failureListener) { - this.failureListener = failureListener; - } - @Override public void close() throws IOException { client.close(); @@ -368,6 +363,7 @@ public static final class Builder { private CloseableHttpClient httpClient; private int maxRetryTimeout = DEFAULT_MAX_RETRY_TIMEOUT_MILLIS; private Header[] defaultHeaders = EMPTY_HEADERS; + private FailureListener failureListener; /** * Creates a new builder instance and sets the hosts that the client will send requests to. @@ -418,6 +414,15 @@ public Builder setDefaultHeaders(Header[] defaultHeaders) { return this; } + /** + * Sets the {@link FailureListener} to be notified for each request failure + */ + public Builder setFailureListener(FailureListener failureListener) { + Objects.requireNonNull(failureListener, "failure listener must not be null"); + this.failureListener = failureListener; + return this; + } + /** * Creates a new {@link RestClient} based on the provided configuration. */ @@ -425,7 +430,10 @@ public RestClient build() { if (httpClient == null) { httpClient = createDefaultHttpClient(null); } - return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts); + if (failureListener == null) { + failureListener = new FailureListener(); + } + return new RestClient(httpClient, maxRetryTimeout, defaultHeaders, hosts, failureListener); } /** diff --git a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java index 88b1406d92595..cb7de44cd5e19 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientBuilderTests.java @@ -74,6 +74,13 @@ public void testBuild() throws IOException { assertEquals("default header must not be null", e.getMessage()); } + try { + RestClient.builder(new HttpHost("localhost", 9200)).setFailureListener(null); + fail("should have failed"); + } catch(NullPointerException e) { + assertEquals("failure listener must not be null", e.getMessage()); + } + int numNodes = RandomInts.randomIntBetween(random(), 1, 5); HttpHost[] hosts = new HttpHost[numNodes]; for (int i = 0; i < numNodes; i++) { diff --git a/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java b/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java index 64792c2bb1b86..beae3cfc9f1e0 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientMultipleHostsTests.java @@ -87,14 +87,10 @@ public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Th for (int i = 0; i < numHosts; i++) { httpHosts[i] = new HttpHost("localhost", 9200 + i); } - restClient = RestClient.builder(httpHosts).setHttpClient(httpClient).build(); failureListener = new TrackingFailureListener(); - restClient.setFailureListener(failureListener); + restClient = RestClient.builder(httpHosts).setHttpClient(httpClient).setFailureListener(failureListener).build(); } - /** - * Test that - */ public void testRoundRobinOkStatusCodes() throws Exception { int numIters = RandomInts.randomIntBetween(random(), 1, 5); for (int i = 0; i < numIters; i++) { diff --git a/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java b/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java index e265772270134..ef2a09fb570ca 100644 --- a/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java +++ b/client/src/test/java/org/elasticsearch/client/RestClientSingleHostTests.java @@ -122,9 +122,9 @@ public CloseableHttpResponse answer(InvocationOnMock invocationOnMock) throws Th defaultHeaders[i] = new BasicHeader(headerName, headerValue); } httpHost = new HttpHost("localhost", 9200); - restClient = RestClient.builder(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders).build(); failureListener = new TrackingFailureListener(); - restClient.setFailureListener(failureListener); + restClient = RestClient.builder(httpHost).setHttpClient(httpClient).setDefaultHeaders(defaultHeaders) + .setFailureListener(failureListener).build(); } /** From f0b6abe439928eb1ec89eb9167deb5d60f5f2cc3 Mon Sep 17 00:00:00 2001 From: javanna Date: Tue, 21 Jun 2016 17:10:18 +0200 Subject: [PATCH 103/103] rename onSuccess to onResponse That makes it a bit clearer that it's about the response and whether we decide if it was a good one or a failure (based on status code) --- .../src/main/java/org/elasticsearch/client/RestClient.java | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/client/src/main/java/org/elasticsearch/client/RestClient.java b/client/src/main/java/org/elasticsearch/client/RestClient.java index 9baafb2b5b712..a8f46ee6b0e22 100644 --- a/client/src/main/java/org/elasticsearch/client/RestClient.java +++ b/client/src/main/java/org/elasticsearch/client/RestClient.java @@ -171,7 +171,7 @@ public Response performRequest(String method, String endpoint, Map o1, Map.Entry