Skip to content

Commit

Permalink
Rest: Add json in request body to scroll, clear scroll, and analyze API
Browse files Browse the repository at this point in the history
 Add json support to scroll, clear scroll, and analyze

Closes #5866
  • Loading branch information
johtani committed Mar 23, 2015
1 parent 56117e3 commit 16083d4
Show file tree
Hide file tree
Showing 20 changed files with 831 additions and 209 deletions.
13 changes: 13 additions & 0 deletions docs/reference/indices/analyze.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -52,3 +52,16 @@ mapping for `obj1.field1` (and if not, the default index analyzer).

Also, the text can be provided as part of the request body, and not as a
parameter.

And all parameter can be provided as JSON of the request body.
[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/test/_analyze' -d '
{
"text" : "this is a <b>test</b>",
"tokenizer" : "keyword",
"token_filters" : [ "lowercase" ],
"char_filters" : [ "html_strip" ]
}
'
--------------------------------------------------
21 changes: 21 additions & 0 deletions docs/reference/search/request/scroll.asciidoc
Original file line number Diff line number Diff line change
Expand Up @@ -69,6 +69,18 @@ curl -XGET <1> 'localhost:9200/_search/scroll?scroll=1m' <2> <3> \
Each call to the `scroll` API returns the next batch of results until there
are no more results left to return, ie the `hits` array is empty.

And you can use JSON in the request body.

[source,js]
--------------------------------------------------
curl -XGET 'localhost:9200/_search/scroll' -d'
{
"scroll_id" : "c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1",
"scroll" : "1m"
}
'
--------------------------------------------------

IMPORTANT: The initial search request and each subsequent scroll request
returns a new `scroll_id` -- only the most recent `scroll_id` should be
used.
Expand Down Expand Up @@ -184,3 +196,12 @@ All search contexts can be cleared with the `_all` parameter:
curl -XDELETE localhost:9200/_search/scroll/_all
---------------------------------------

And you can use JSON in the request body.

[source,js]
---------------------------------------
curl -XDELETE localhost:9200/_search/scroll -d '
{
"scroll_id" : ["c2Nhbjs2OzM0NDg1ODpzRlBLc0FXNlNyNm5JWUc1", "aGVuRmV0Y2g7NTsxOnkxaDZ"]
}
---------------------------------------
Original file line number Diff line number Diff line change
Expand Up @@ -18,11 +18,12 @@
*/
package org.elasticsearch.action.admin.indices.analyze;

import org.elasticsearch.Version;
import org.elasticsearch.action.ActionRequestValidationException;
import org.elasticsearch.action.support.single.custom.SingleCustomOperationRequest;
import org.elasticsearch.client.Requests;
import org.elasticsearch.common.Nullable;
import org.elasticsearch.common.Strings;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.bytes.BytesReference;
import org.elasticsearch.common.io.stream.StreamInput;
import org.elasticsearch.common.io.stream.StreamOutput;

Expand All @@ -36,125 +37,70 @@
*/
public class AnalyzeRequest extends SingleCustomOperationRequest<AnalyzeRequest> {

private String text;

private String analyzer;

private String tokenizer;

private String[] tokenFilters = Strings.EMPTY_ARRAY;

private String[] charFilters = Strings.EMPTY_ARRAY;

private String field;
private BytesReference source;
private boolean unsafe;

AnalyzeRequest() {

}

/**
* Constructs a new analyzer request for the provided text.
*
* @param text The text to analyze
*/
public AnalyzeRequest(String text) {
this.text = text;
}

/**
* Constructs a new analyzer request for the provided index and text.
*
* @param index The index name
* @param text The text to analyze
*/
public AnalyzeRequest(@Nullable String index, String text) {
public AnalyzeRequest(@Nullable String index) {
this.index(index);
this.text = text;
}

public String text() {
return this.text;
public BytesReference source() {
return source;
}

public AnalyzeRequest analyzer(String analyzer) {
this.analyzer = analyzer;
return this;
}

public String analyzer() {
return this.analyzer;
}

public AnalyzeRequest tokenizer(String tokenizer) {
this.tokenizer = tokenizer;
return this;
}

public String tokenizer() {
return this.tokenizer;
@Override
public void beforeLocalFork() {
if (unsafe) {
source = source.copyBytesArray();
unsafe = false;
}
}

public AnalyzeRequest tokenFilters(String... tokenFilters) {
this.tokenFilters = tokenFilters;
public AnalyzeRequest source(String source) {
this.source = new BytesArray(source);
this.unsafe = false;
return this;
}

public String[] tokenFilters() {
return this.tokenFilters;
}

public AnalyzeRequest charFilters(String... charFilters) {
this.charFilters = charFilters;
public AnalyzeRequest source(BytesReference source, boolean unsafe) {
this.source = source;
this.unsafe = unsafe;
return this;
}

public String[] charFilters() {
return this.charFilters;
}

public AnalyzeRequest field(String field) {
this.field = field;
public AnalyzeRequest source(AnalyzeSourceBuilder sourceBuilder) {
this.source = sourceBuilder.buildAsBytes(Requests.CONTENT_TYPE);
this.unsafe = false;
return this;
}

public String field() {
return this.field;
}

@Override
public ActionRequestValidationException validate() {
ActionRequestValidationException validationException = super.validate();
if (text == null) {
validationException = addValidationError("text is missing", validationException);
}
if (tokenFilters == null) {
validationException = addValidationError("token filters must not be null", validationException);
}
if (charFilters == null) {
validationException = addValidationError("char filters must not be null", validationException);
if (source == null) {
validationException = addValidationError("source is missing", validationException);
}
return validationException;
}

@Override
public void readFrom(StreamInput in) throws IOException {
super.readFrom(in);
text = in.readString();
analyzer = in.readOptionalString();
tokenizer = in.readOptionalString();
tokenFilters = in.readStringArray();
charFilters = in.readStringArray();
field = in.readOptionalString();
unsafe = false;
source = in.readBytesReference();
}

@Override
public void writeTo(StreamOutput out) throws IOException {
super.writeTo(out);
out.writeString(text);
out.writeOptionalString(analyzer);
out.writeOptionalString(tokenizer);
out.writeStringArray(tokenFilters);
out.writeStringArray(charFilters);
out.writeOptionalString(field);
out.writeBytesReference(source);
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -21,18 +21,22 @@
import org.elasticsearch.action.ActionListener;
import org.elasticsearch.action.support.single.custom.SingleCustomOperationRequestBuilder;
import org.elasticsearch.client.IndicesAdminClient;
import org.elasticsearch.common.bytes.BytesReference;

/**
*
*/
public class AnalyzeRequestBuilder extends SingleCustomOperationRequestBuilder<AnalyzeRequest, AnalyzeResponse, AnalyzeRequestBuilder> {

private AnalyzeSourceBuilder builder;

public AnalyzeRequestBuilder(IndicesAdminClient indicesClient) {
super(indicesClient, new AnalyzeRequest());
}

public AnalyzeRequestBuilder(IndicesAdminClient indicesClient, String index, String text) {
super(indicesClient, new AnalyzeRequest(index, text));
super(indicesClient, new AnalyzeRequest(index));
sourceBuilder().setText(text);
}

/**
Expand All @@ -50,7 +54,7 @@ public AnalyzeRequestBuilder setIndex(String index) {
* @param analyzer The analyzer name.
*/
public AnalyzeRequestBuilder setAnalyzer(String analyzer) {
request.analyzer(analyzer);
sourceBuilder().setAnalyzer(analyzer);
return this;
}

Expand All @@ -59,7 +63,7 @@ public AnalyzeRequestBuilder setAnalyzer(String analyzer) {
* to be set.
*/
public AnalyzeRequestBuilder setField(String field) {
request.field(field);
sourceBuilder().setField(field);
return this;
}

Expand All @@ -68,28 +72,58 @@ public AnalyzeRequestBuilder setField(String field) {
* analyzer.
*/
public AnalyzeRequestBuilder setTokenizer(String tokenizer) {
request.tokenizer(tokenizer);
sourceBuilder().setTokenizer(tokenizer);
return this;
}

/**
* Sets token filters that will be used on top of a tokenizer provided.
*/
public AnalyzeRequestBuilder setTokenFilters(String... tokenFilters) {
request.tokenFilters(tokenFilters);
sourceBuilder().setTokenFilters(tokenFilters);
return this;
}

/**
* Sets char filters that will be used before the tokenizer.
*/
public AnalyzeRequestBuilder setCharFilters(String... charFilters) {
request.charFilters(charFilters);
sourceBuilder().setCharFilters(charFilters);
return this;
}

public AnalyzeRequestBuilder setSource(String source) {
request.source(source);
return this;
}

public AnalyzeRequestBuilder setSource(BytesReference source, boolean unsafe) {
request.source(source, unsafe);
return this;
}

public AnalyzeRequestBuilder setSource(AnalyzeSourceBuilder sourceBuilder) {
request.source(sourceBuilder);
return this;
}

public AnalyzeRequestBuilder setText(String text) {
sourceBuilder().setText(text);
return this;
}

private AnalyzeSourceBuilder sourceBuilder() {
if (builder == null) {
builder = new AnalyzeSourceBuilder();
}
return builder;
}

@Override
protected void doExecute(ActionListener<AnalyzeResponse> listener) {
if (builder != null && request.source() == null) {
request.source(builder);
}
client.analyze(request, listener);
}
}
Loading

0 comments on commit 16083d4

Please sign in to comment.