Skip to content

Commit

Permalink
Fix Chunked APIs sending incorrect responses to HEAD requests (elasti…
Browse files Browse the repository at this point in the history
…c#92042)

Response bodies must always be empty for HEAD requests.
Since the request encoder does not know that its dealing with a response
to a HEAD request we have to indicate this fact to it.
Also, needed to adjust the test http client to use the http-codec so it is able
to correlate what responses are meant for HEAD requests and will correctly read
responses for HEAD requests.
Without this change the added test reproduces the extra bytes and fails with an assert
about more than one response received.

closes elastic#92032
  • Loading branch information
original-brownbear committed Dec 1, 2022
1 parent aa7aa64 commit 2c93715
Show file tree
Hide file tree
Showing 4 changed files with 86 additions and 10 deletions.
6 changes: 6 additions & 0 deletions docs/changelog/92042.yaml
@@ -0,0 +1,6 @@
pr: 92042
summary: Fix Chunked APIs sending incorrect responses to HEAD requests
area: Network
type: bug
issues:
- 92032
Expand Up @@ -24,7 +24,9 @@
import io.netty.handler.codec.http.HttpContentDecompressor;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpRequestDecoder;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpResponseEncoder;
import io.netty.handler.codec.http.HttpUtil;
import io.netty.handler.timeout.ReadTimeoutException;
import io.netty.handler.timeout.ReadTimeoutHandler;
import io.netty.util.AttributeKey;
Expand Down Expand Up @@ -324,7 +326,18 @@ protected void initChannel(Channel ch) throws Exception {
ch.pipeline()
.addLast("decoder", decoder)
.addLast("decoder_compress", new HttpContentDecompressor())
.addLast("encoder", new HttpResponseEncoder())
.addLast("encoder", new HttpResponseEncoder() {
@Override
protected boolean isContentAlwaysEmpty(HttpResponse msg) {
// non-chunked responses (Netty4HttpResponse extends Netty's DefaultFullHttpResponse) with chunked transfer
// encoding are only sent by us in response to HEAD requests and must always have an empty body
if (msg instanceof Netty4HttpResponse netty4HttpResponse && HttpUtil.isTransferEncodingChunked(msg)) {
assert netty4HttpResponse.content().isReadable() == false;
return true;
}
return super.isContentAlwaysEmpty(msg);
}
})
.addLast("aggregator", aggregator);
if (handlingSettings.compression()) {
ch.pipeline().addLast("encoder_compress", new HttpContentCompressor(handlingSettings.compressionLevel()));
Expand Down
Expand Up @@ -21,15 +21,14 @@
import io.netty.handler.codec.http.DefaultFullHttpRequest;
import io.netty.handler.codec.http.FullHttpRequest;
import io.netty.handler.codec.http.FullHttpResponse;
import io.netty.handler.codec.http.HttpClientCodec;
import io.netty.handler.codec.http.HttpContentDecompressor;
import io.netty.handler.codec.http.HttpHeaderNames;
import io.netty.handler.codec.http.HttpMethod;
import io.netty.handler.codec.http.HttpObject;
import io.netty.handler.codec.http.HttpObjectAggregator;
import io.netty.handler.codec.http.HttpRequest;
import io.netty.handler.codec.http.HttpRequestEncoder;
import io.netty.handler.codec.http.HttpResponse;
import io.netty.handler.codec.http.HttpResponseDecoder;
import io.netty.handler.codec.http.HttpVersion;

import org.elasticsearch.common.unit.ByteSizeUnit;
Expand Down Expand Up @@ -175,8 +174,7 @@ private static class CountDownLatchHandler extends ChannelInitializer<SocketChan
@Override
protected void initChannel(SocketChannel ch) {
final int maxContentLength = new ByteSizeValue(100, ByteSizeUnit.MB).bytesAsInt();
ch.pipeline().addLast(new HttpResponseDecoder());
ch.pipeline().addLast(new HttpRequestEncoder());
ch.pipeline().addLast(new HttpClientCodec());
ch.pipeline().addLast(new HttpContentDecompressor());
ch.pipeline().addLast(new HttpObjectAggregator(maxContentLength));
ch.pipeline().addLast(new SimpleChannelInboundHandler<HttpObject>() {
Expand Down
Expand Up @@ -40,15 +40,14 @@

import org.elasticsearch.ElasticsearchException;
import org.elasticsearch.common.bytes.BytesArray;
import org.elasticsearch.common.collect.Iterators;
import org.elasticsearch.common.network.NetworkAddress;
import org.elasticsearch.common.network.NetworkService;
import org.elasticsearch.common.settings.ClusterSettings;
import org.elasticsearch.common.settings.Setting;
import org.elasticsearch.common.settings.Settings;
import org.elasticsearch.common.transport.TransportAddress;
import org.elasticsearch.common.unit.ByteSizeValue;
import org.elasticsearch.common.util.MockPageCacheRecycler;
import org.elasticsearch.common.util.PageCacheRecycler;
import org.elasticsearch.common.util.concurrent.ThreadContext;
import org.elasticsearch.core.TimeValue;
import org.elasticsearch.http.AbstractHttpServerTransportTestCase;
Expand All @@ -57,6 +56,7 @@
import org.elasticsearch.http.HttpServerTransport;
import org.elasticsearch.http.HttpTransportSettings;
import org.elasticsearch.http.NullDispatcher;
import org.elasticsearch.rest.ChunkedRestResponseBody;
import org.elasticsearch.rest.RestChannel;
import org.elasticsearch.rest.RestRequest;
import org.elasticsearch.rest.RestResponse;
Expand All @@ -66,6 +66,7 @@
import org.elasticsearch.tracing.Tracer;
import org.elasticsearch.transport.netty4.NettyAllocator;
import org.elasticsearch.transport.netty4.SharedGroupFactory;
import org.elasticsearch.xcontent.ToXContent;
import org.junit.After;
import org.junit.Before;

Expand Down Expand Up @@ -94,14 +95,12 @@ public class Netty4HttpServerTransportTests extends AbstractHttpServerTransportT

private NetworkService networkService;
private ThreadPool threadPool;
private PageCacheRecycler recycler;
private ClusterSettings clusterSettings;

@Before
public void setup() throws Exception {
networkService = new NetworkService(Collections.emptyList());
threadPool = new TestThreadPool("test");
recycler = new MockPageCacheRecycler(Settings.EMPTY);
clusterSettings = randomClusterSettings();
}

Expand All @@ -112,7 +111,6 @@ public void shutdown() throws Exception {
}
threadPool = null;
networkService = null;
recycler = null;
clusterSettings = null;
}

Expand Down Expand Up @@ -560,6 +558,67 @@ protected void initChannel(SocketChannel ch) {
}
}

public void testHeadRequestToChunkedApi() throws InterruptedException {
final HttpServerTransport.Dispatcher dispatcher = new HttpServerTransport.Dispatcher() {

@Override
public void dispatchRequest(final RestRequest request, final RestChannel channel, final ThreadContext threadContext) {
try {
channel.sendResponse(
new RestResponse(
OK,
ChunkedRestResponseBody.fromXContent(
ignored -> Iterators.single(
(builder, params) -> { throw new AssertionError("should not be called for HEAD REQUEST"); }
),
ToXContent.EMPTY_PARAMS,
channel
)
)
);
} catch (IOException e) {
throw new AssertionError(e);
}
}

@Override
public void dispatchBadRequest(final RestChannel channel, final ThreadContext threadContext, final Throwable cause) {
throw new AssertionError();
}

};

final Settings settings = createSettings();
try (
Netty4HttpServerTransport transport = new Netty4HttpServerTransport(
settings,
networkService,
threadPool,
xContentRegistry(),
dispatcher,
clusterSettings,
new SharedGroupFactory(settings),
Tracer.NOOP
)
) {
transport.start();
final TransportAddress remoteAddress = randomFrom(transport.boundAddress().boundAddresses());

try (Netty4HttpClient client = new Netty4HttpClient()) {
final String url = "/some-head-endpoint";
final FullHttpRequest request = new DefaultFullHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.HEAD, url);

final FullHttpResponse response = client.send(remoteAddress.address(), request);
try {
assertThat(response.status(), equalTo(HttpResponseStatus.OK));
assertFalse(response.content().isReadable());
} finally {
response.release();
}
}
}
}

private Settings createSettings() {
return createBuilderWithPort().build();
}
Expand Down

0 comments on commit 2c93715

Please sign in to comment.