diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java index d825ec0a83f53..0158384b47aa4 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java @@ -54,7 +54,6 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.http.HttpBodyTracer; -import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.plugins.ActionPlugin; @@ -93,10 +92,15 @@ @ESIntegTestCase.ClusterScope(numDataNodes = 1) public class Netty4IncrementalRequestHandlingIT extends ESNetty4IntegTestCase { + private static final int MAX_CONTENT_LENGTH = ByteSizeUnit.MB.toIntBytes(50); + @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); - builder.put(HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), ByteSizeValue.of(50, ByteSizeUnit.MB)); + builder.put( + HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), + ByteSizeValue.of(MAX_CONTENT_LENGTH, ByteSizeUnit.BYTES) + ); return builder.build(); } @@ -135,7 +139,7 @@ public void testReceiveAllChunks() throws Exception { var opaqueId = opaqueId(reqNo); // this dataset will be compared with one on server side - var dataSize = randomIntBetween(1024, maxContentLength()); + var dataSize = randomIntBetween(1024, MAX_CONTENT_LENGTH); var sendData = Unpooled.wrappedBuffer(randomByteArrayOfLength(dataSize)); sendData.retain(); ctx.clientChannel.writeAndFlush(fullHttpRequest(opaqueId, sendData)); @@ -243,7 +247,7 @@ public void testServerExceptionMidStream() throws Exception { public void testClientBackpressure() throws Exception { try (var ctx = setupClientCtx()) { var opaqueId = opaqueId(0); - var payloadSize = maxContentLength(); + var payloadSize = MAX_CONTENT_LENGTH; var totalParts = 10; var partSize = payloadSize / totalParts; ctx.clientChannel.writeAndFlush(httpRequest(opaqueId, payloadSize)); @@ -285,7 +289,7 @@ public void test100Continue() throws Exception { try (var ctx = setupClientCtx()) { for (int reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { var id = opaqueId(reqNo); - var acceptableContentLength = randomIntBetween(0, maxContentLength()); + var acceptableContentLength = randomIntBetween(0, MAX_CONTENT_LENGTH); // send request header and await 100-continue var req = httpRequest(id, acceptableContentLength); @@ -317,7 +321,7 @@ public void test413TooLargeOnExpect100Continue() throws Exception { try (var ctx = setupClientCtx()) { for (int reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { var id = opaqueId(reqNo); - var oversized = maxContentLength() + 1; + var oversized = MAX_CONTENT_LENGTH + 1; // send request header and await 413 too large var req = httpRequest(id, oversized); @@ -333,32 +337,28 @@ public void test413TooLargeOnExpect100Continue() throws Exception { } } - // ensures that oversized chunked encoded request has no limits at http layer - // rest handler is responsible for oversized requests - public void testOversizedChunkedEncodingNoLimits() throws Exception { + // ensures that oversized chunked encoded request has maxContentLength limit and returns 413 + public void testOversizedChunkedEncoding() throws Exception { try (var ctx = setupClientCtx()) { - for (var reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { - var id = opaqueId(reqNo); - var contentSize = maxContentLength() + 1; - var content = randomByteArrayOfLength(contentSize); - var is = new ByteBufInputStream(Unpooled.wrappedBuffer(content)); - var chunkedIs = new ChunkedStream(is); - var httpChunkedIs = new HttpChunkedInput(chunkedIs, LastHttpContent.EMPTY_LAST_CONTENT); - var req = httpRequest(id, 0); - HttpUtil.setTransferEncodingChunked(req, true); - - ctx.clientChannel.pipeline().addLast(new ChunkedWriteHandler()); - ctx.clientChannel.writeAndFlush(req); - ctx.clientChannel.writeAndFlush(httpChunkedIs); - var handler = ctx.awaitRestChannelAccepted(id); - var consumed = handler.readAllBytes(); - assertEquals(contentSize, consumed); - handler.sendResponse(new RestResponse(RestStatus.OK, "")); - - var resp = (FullHttpResponse) safePoll(ctx.clientRespQueue); - assertEquals(HttpResponseStatus.OK, resp.status()); - resp.release(); - } + var id = opaqueId(0); + var contentSize = MAX_CONTENT_LENGTH + 1; + var content = randomByteArrayOfLength(contentSize); + var is = new ByteBufInputStream(Unpooled.wrappedBuffer(content)); + var chunkedIs = new ChunkedStream(is); + var httpChunkedIs = new HttpChunkedInput(chunkedIs, LastHttpContent.EMPTY_LAST_CONTENT); + var req = httpRequest(id, 0); + HttpUtil.setTransferEncodingChunked(req, true); + + ctx.clientChannel.pipeline().addLast(new ChunkedWriteHandler()); + ctx.clientChannel.writeAndFlush(req); + ctx.clientChannel.writeAndFlush(httpChunkedIs); + var handler = ctx.awaitRestChannelAccepted(id); + var consumed = handler.readAllBytes(); + assertTrue(consumed <= MAX_CONTENT_LENGTH); + + var resp = (FullHttpResponse) safePoll(ctx.clientRespQueue); + assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); + resp.release(); } } @@ -369,7 +369,7 @@ public void testBadRequestReleaseQueuedChunks() throws Exception { try (var ctx = setupClientCtx()) { for (var reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { var id = opaqueId(reqNo); - var contentSize = randomIntBetween(0, maxContentLength()); + var contentSize = randomIntBetween(0, MAX_CONTENT_LENGTH); var req = httpRequest(id, contentSize); var content = randomContent(contentSize, true); @@ -405,7 +405,7 @@ public void testHttpClientStats() throws Exception { for (var reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { var id = opaqueId(reqNo); - var contentSize = randomIntBetween(0, maxContentLength()); + var contentSize = randomIntBetween(0, MAX_CONTENT_LENGTH); totalBytesSent += contentSize; ctx.clientChannel.writeAndFlush(httpRequest(id, contentSize)); ctx.clientChannel.writeAndFlush(randomContent(contentSize, true)); @@ -485,10 +485,6 @@ private void assertHttpBodyLogging(Function test) throws Exceptio } } - private int maxContentLength() { - return HttpHandlingSettings.fromSettings(internalCluster().getInstance(Settings.class)).maxContentLength(); - } - private String opaqueId(int reqNo) { return getTestName() + "-" + reqNo; } @@ -658,14 +654,22 @@ void sendResponse(RestResponse response) { int readBytes(int bytes) { var consumed = 0; if (recvLast == false) { - while (consumed < bytes) { - stream.next(); - var recvChunk = safePoll(recvChunks); - consumed += recvChunk.chunk.length(); - recvChunk.chunk.close(); - if (recvChunk.isLast) { - recvLast = true; - break; + stream.next(); + while (consumed < bytes && streamClosed == false) { + try { + var recvChunk = recvChunks.poll(10, TimeUnit.MILLISECONDS); + if (recvChunk != null) { + consumed += recvChunk.chunk.length(); + recvChunk.chunk.close(); + if (recvChunk.isLast) { + recvLast = true; + break; + } + stream.next(); + } + } catch (InterruptedException e) { + Thread.currentThread().interrupt(); + throw new AssertionError(e); } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpAggregator.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpAggregator.java index 021ce09e0ed8e..0294b4626496c 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpAggregator.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpAggregator.java @@ -11,13 +11,10 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.http.FullHttpRequest; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.HttpObject; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpRequestDecoder; import org.elasticsearch.http.HttpPreRequest; import org.elasticsearch.http.netty4.internal.HttpHeadersAuthenticatorUtils; @@ -27,18 +24,19 @@ /** * A wrapper around {@link HttpObjectAggregator}. Provides optional content aggregation based on * predicate. {@link HttpObjectAggregator} also handles Expect: 100-continue and oversized content. - * Unfortunately, Netty does not provide handlers for oversized messages beyond HttpObjectAggregator. + * Provides content size handling for non-aggregated requests too. */ public class Netty4HttpAggregator extends HttpObjectAggregator { private static final Predicate IGNORE_TEST = (req) -> req.uri().startsWith("/_test/request-stream") == false; private final Predicate decider; + private final Netty4HttpContentSizeHandler streamContentSizeHandler; private boolean aggregating = true; - private boolean ignoreContentAfterContinueResponse = false; - public Netty4HttpAggregator(int maxContentLength, Predicate decider) { + public Netty4HttpAggregator(int maxContentLength, Predicate decider, HttpRequestDecoder decoder) { super(maxContentLength); this.decider = decider; + this.streamContentSizeHandler = new Netty4HttpContentSizeHandler(decoder, maxContentLength); } @Override @@ -51,34 +49,7 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception if (aggregating || msg instanceof FullHttpRequest) { super.channelRead(ctx, msg); } else { - handle(ctx, (HttpObject) msg); - } - } - - private void handle(ChannelHandlerContext ctx, HttpObject msg) { - if (msg instanceof HttpRequest request) { - var continueResponse = newContinueResponse(request, maxContentLength(), ctx.pipeline()); - if (continueResponse != null) { - // there are 3 responses expected: 100, 413, 417 - // on 100 we pass request further and reply to client to continue - // on 413/417 we ignore following content - ctx.writeAndFlush(continueResponse); - var resp = (FullHttpResponse) continueResponse; - if (resp.status() != HttpResponseStatus.CONTINUE) { - ignoreContentAfterContinueResponse = true; - return; - } - HttpUtil.set100ContinueExpected(request, false); - } - ignoreContentAfterContinueResponse = false; - ctx.fireChannelRead(msg); - } else { - var httpContent = (HttpContent) msg; - if (ignoreContentAfterContinueResponse) { - httpContent.release(); - } else { - ctx.fireChannelRead(msg); - } + streamContentSizeHandler.channelRead(ctx, msg); } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandler.java new file mode 100644 index 0000000000000..2b322fefa1262 --- /dev/null +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandler.java @@ -0,0 +1,171 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.http.netty4; + +import io.netty.buffer.Unpooled; +import io.netty.channel.ChannelFutureListener; +import io.netty.channel.ChannelHandlerContext; +import io.netty.channel.ChannelInboundHandlerAdapter; +import io.netty.handler.codec.http.DefaultFullHttpResponse; +import io.netty.handler.codec.http.DefaultHttpHeaders; +import io.netty.handler.codec.http.EmptyHttpHeaders; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpObject; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpRequestDecoder; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http.LastHttpContent; + +import org.elasticsearch.core.SuppressForbidden; + +import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION; +import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; + +/** + * Provides handling for 'Expect' header and content size. Implements HTTP1.1 spec. + * Allows {@code Expect: 100-continue} header only. Other 'Expect' headers will be rejected with + * {@code 417 Expectation Failed} reason. + *
+ * Replies {@code 100 Continue} to requests with allowed maxContentLength. + *
+ * Replies {@code 413 Request Entity Too Large} when content size exceeds maxContentLength. + * + * Channel can be reused for requests with "Expect:100-Continue" header that exceed allowed content length, + * as long as request does not include content. If oversized request already contains content then + * we cannot safely proceed and connection will be closed. + *

+ * TODO: move to RestController to allow content limits per RestHandler. + * Ideally we should be able to handle Continue and oversized request in the RestController. + *
    + *
  • + * 100 Continue is interim response, means RestChannel will send 2 responses for a single request. See + * rfc9110.html#status.100 + *
  • + *
  • + * RestChannel should be able to close underlying HTTP channel connection. + *
  • + *
+ */ +@SuppressForbidden(reason = "use of default ChannelFutureListener's CLOSE and CLOSE_ON_FAILURE") +public class Netty4HttpContentSizeHandler extends ChannelInboundHandlerAdapter { + + // copied from netty's HttpObjectAggregator + static final FullHttpResponse CONTINUE = new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.CONTINUE, + Unpooled.EMPTY_BUFFER + ); + static final FullHttpResponse EXPECTATION_FAILED_CLOSE = new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.EXPECTATION_FAILED, + Unpooled.EMPTY_BUFFER, + new DefaultHttpHeaders().add(CONTENT_LENGTH, 0).add(CONNECTION, HttpHeaderValues.CLOSE), + EmptyHttpHeaders.INSTANCE + ); + static final FullHttpResponse TOO_LARGE_CLOSE = new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, + Unpooled.EMPTY_BUFFER, + new DefaultHttpHeaders().add(CONTENT_LENGTH, 0).add(CONNECTION, HttpHeaderValues.CLOSE), + EmptyHttpHeaders.INSTANCE + ); + static final FullHttpResponse TOO_LARGE = new DefaultFullHttpResponse( + HttpVersion.HTTP_1_1, + HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, + Unpooled.EMPTY_BUFFER, + new DefaultHttpHeaders().add(CONTENT_LENGTH, 0), + EmptyHttpHeaders.INSTANCE + ); + + private final int maxContentLength; + private final HttpRequestDecoder decoder; // need to reset decoder after sending 413 + private int currentContentLength; // chunked encoding does not provide content length, need to track actual length + private boolean contentExpected; + + public Netty4HttpContentSizeHandler(HttpRequestDecoder decoder, int maxContentLength) { + this.maxContentLength = maxContentLength; + this.decoder = decoder; + } + + @Override + public void channelRead(ChannelHandlerContext ctx, Object msg) { + assert msg instanceof HttpObject; + if (msg instanceof HttpRequest request) { + handleRequest(ctx, request); + } else { + handleContent(ctx, (HttpContent) msg); + } + } + + private void handleRequest(ChannelHandlerContext ctx, HttpRequest request) { + contentExpected = false; + if (request.decoderResult().isFailure()) { + ctx.fireChannelRead(request); + return; + } + + final var expectValue = request.headers().get(HttpHeaderNames.EXPECT); + boolean isContinueExpected = false; + // Only "Expect: 100-Continue" header is supported + if (expectValue != null) { + if (HttpHeaderValues.CONTINUE.toString().equalsIgnoreCase(expectValue)) { + isContinueExpected = true; + } else { + ctx.writeAndFlush(EXPECTATION_FAILED_CLOSE.retainedDuplicate()).addListener(ChannelFutureListener.CLOSE); + return; + } + } + + boolean isOversized = HttpUtil.getContentLength(request, -1) > maxContentLength; + if (isOversized) { + if (isContinueExpected) { + // Client is allowed to send content without waiting for Continue. + // See https://www.rfc-editor.org/rfc/rfc9110.html#section-10.1.1-11.3 + // this content will result in HttpRequestDecoder failure and send downstream + decoder.reset(); + ctx.writeAndFlush(TOO_LARGE.retainedDuplicate()).addListener(ChannelFutureListener.CLOSE_ON_FAILURE); + } else { + // Client is sending oversized content, we cannot safely take it. Closing channel. + ctx.writeAndFlush(TOO_LARGE_CLOSE.retainedDuplicate()).addListener(ChannelFutureListener.CLOSE); + } + } else { + contentExpected = true; + currentContentLength = 0; + if (isContinueExpected) { + ctx.writeAndFlush(CONTINUE.retainedDuplicate()); + HttpUtil.set100ContinueExpected(request, false); + } + ctx.fireChannelRead(request); + } + } + + private void handleContent(ChannelHandlerContext ctx, HttpContent msg) { + if (contentExpected) { + currentContentLength += msg.content().readableBytes(); + if (currentContentLength > maxContentLength) { + msg.release(); + ctx.writeAndFlush(TOO_LARGE_CLOSE.retainedDuplicate()).addListener(ChannelFutureListener.CLOSE); + } else { + ctx.fireChannelRead(msg); + } + } else { + msg.release(); + if (msg != LastHttpContent.EMPTY_LAST_CONTENT) { + ctx.close(); // there is no reliable recovery from unexpected content, closing channel + } + } + } + +} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 36c860f1fb90b..9ffa4b479be17 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -381,7 +381,8 @@ protected HttpMessage createMessage(String[] initialLine) throws Exception { handlingSettings.maxContentLength(), httpPreRequest -> enabled.get() == false || ((httpPreRequest.rawPath().endsWith("/_bulk") == false) - || httpPreRequest.rawPath().startsWith("/_xpack/monitoring/_bulk")) + || httpPreRequest.rawPath().startsWith("/_xpack/monitoring/_bulk")), + decoder ); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); ch.pipeline() diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandlerTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandlerTests.java new file mode 100644 index 0000000000000..3f8fe0075689f --- /dev/null +++ b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandlerTests.java @@ -0,0 +1,238 @@ +/* + * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one + * or more contributor license agreements. Licensed under the "Elastic License + * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side + * Public License v 1"; you may not use this file except in compliance with, at + * your election, the "Elastic License 2.0", the "GNU Affero General Public + * License v3.0 only", or the "Server Side Public License, v 1". + */ + +package org.elasticsearch.http.netty4; + +import io.netty.buffer.ByteBuf; +import io.netty.buffer.Unpooled; +import io.netty.channel.embedded.EmbeddedChannel; +import io.netty.handler.codec.http.DefaultHttpContent; +import io.netty.handler.codec.http.DefaultHttpRequest; +import io.netty.handler.codec.http.DefaultLastHttpContent; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; +import io.netty.handler.codec.http.HttpHeaderNames; +import io.netty.handler.codec.http.HttpHeaderValues; +import io.netty.handler.codec.http.HttpMethod; +import io.netty.handler.codec.http.HttpObject; +import io.netty.handler.codec.http.HttpRequest; +import io.netty.handler.codec.http.HttpRequestDecoder; +import io.netty.handler.codec.http.HttpRequestEncoder; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; +import io.netty.handler.codec.http.HttpVersion; +import io.netty.handler.codec.http.LastHttpContent; + +import org.elasticsearch.test.ESTestCase; + +import java.util.Arrays; + +public class Netty4HttpContentSizeHandlerTests extends ESTestCase { + + private static final int MAX_CONTENT_LENGTH = 1024; + private static final int OVERSIZED_LENGTH = MAX_CONTENT_LENGTH + 1; + private static final int REPS = 1000; + private EmbeddedChannel channel; + private EmbeddedChannel encoder; // channel to encode HTTP objects into bytes + + private static HttpContent httpContent(int size) { + return new DefaultHttpContent(Unpooled.wrappedBuffer(randomByteArrayOfLength(size))); + } + + private static LastHttpContent lastHttpContent(int size) { + return new DefaultLastHttpContent(Unpooled.wrappedBuffer(randomByteArrayOfLength(size))); + } + + private HttpRequest httpRequest() { + return new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); + } + + // encodes multiple HTTP objects into single ByteBuf + private ByteBuf encode(HttpObject... objs) { + var out = Unpooled.compositeBuffer(); + Arrays.stream(objs).forEach(encoder::writeOutbound); + while (encoder.outboundMessages().isEmpty() == false) { + out.addComponent(true, encoder.readOutbound()); + } + return out; + } + + @Override + public void setUp() throws Exception { + super.setUp(); + var decoder = new HttpRequestDecoder(); + encoder = new EmbeddedChannel(new HttpRequestEncoder()); + channel = new EmbeddedChannel(decoder, new Netty4HttpContentSizeHandler(decoder, MAX_CONTENT_LENGTH)); + } + + /** + * Assert that handler replies 100-continue for acceptable request and pass request further. + */ + public void testContinue() { + for (var i = 0; i < REPS; i++) { + var sendRequest = httpRequest(); + HttpUtil.set100ContinueExpected(sendRequest, true); + channel.writeInbound(encode(sendRequest)); + assertEquals("should send back 100-continue", Netty4HttpContentSizeHandler.CONTINUE, channel.readOutbound()); + var recvRequest = (HttpRequest) channel.readInbound(); + assertNotNull(recvRequest); + assertFalse(HttpUtil.is100ContinueExpected(recvRequest)); + channel.writeInbound(encode(LastHttpContent.EMPTY_LAST_CONTENT)); + assertEquals(LastHttpContent.EMPTY_LAST_CONTENT, channel.readInbound()); + } + } + + /** + * Assert that handler pass through acceptable request. + */ + public void testWithoutContinue() { + for (var i = 0; i < REPS; i++) { + var sendRequest = httpRequest(); + channel.writeInbound(encode(sendRequest)); + assertNull("should not receive response", channel.readOutbound()); + assertNotNull("request should pass", channel.readInbound()); + channel.writeInbound(encode(LastHttpContent.EMPTY_LAST_CONTENT)); + assertEquals(LastHttpContent.EMPTY_LAST_CONTENT, channel.readInbound()); + } + } + + /** + * Assert that handler pass through request and content for acceptable request. + */ + public void testContinueWithContent() { + for (var i = 0; i < REPS; i++) { + var sendRequest = httpRequest(); + HttpUtil.set100ContinueExpected(sendRequest, true); + HttpUtil.setContentLength(sendRequest, MAX_CONTENT_LENGTH); + var sendContent = lastHttpContent(MAX_CONTENT_LENGTH); + channel.writeInbound(encode(sendRequest, sendContent)); + var resp = (FullHttpResponse) channel.readOutbound(); + assertEquals("should send back 100-continue", Netty4HttpContentSizeHandler.CONTINUE, resp); + resp.release(); + var recvRequest = (HttpRequest) channel.readInbound(); + assertNotNull(recvRequest); + var recvContent = (HttpContent) channel.readInbound(); + assertNotNull(recvContent); + assertEquals(MAX_CONTENT_LENGTH, recvContent.content().readableBytes()); + recvContent.release(); + } + } + + /** + * Assert that handler return 417 Expectation Failed and closes channel on request + * with "Expect" header other than "100-Continue". + */ + public void testExpectationFailed() { + var sendRequest = httpRequest(); + sendRequest.headers().set(HttpHeaderNames.EXPECT, randomValueOtherThan(HttpHeaderValues.CONTINUE, ESTestCase::randomIdentifier)); + channel.writeInbound(encode(sendRequest)); + var resp = (FullHttpResponse) channel.readOutbound(); + assertEquals(HttpResponseStatus.EXPECTATION_FAILED, resp.status()); + assertFalse(channel.isOpen()); + resp.release(); + } + + /** + * Assert that handler returns 413 Request Entity Too Large for oversized request + * and does not close channel if following content is not present. + */ + public void testEntityTooLarge() { + for (var i = 0; i < REPS; i++) { + var sendRequest = httpRequest(); + HttpUtil.set100ContinueExpected(sendRequest, true); + HttpUtil.setContentLength(sendRequest, OVERSIZED_LENGTH); + channel.writeInbound(encode(sendRequest, LastHttpContent.EMPTY_LAST_CONTENT)); + var resp = (FullHttpResponse) channel.readOutbound(); + assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); + assertNull("request should not pass", channel.readInbound()); + assertTrue("should not close channel", channel.isOpen()); + resp.release(); + } + } + + /** + * Mixed load of oversized and normal requests with Exepct:100-Continue. + */ + public void testMixedContent() { + for (int i = 0; i < REPS; i++) { + var isOversized = randomBoolean(); + var sendRequest = httpRequest(); + HttpUtil.set100ContinueExpected(sendRequest, true); + if (isOversized) { + HttpUtil.setContentLength(sendRequest, OVERSIZED_LENGTH); + channel.writeInbound(encode(sendRequest)); + var resp = (FullHttpResponse) channel.readOutbound(); + assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); + channel.writeInbound(encode(LastHttpContent.EMPTY_LAST_CONTENT)); // terminate + assertNull(channel.readInbound()); + resp.release(); + } else { + var normalSize = between(1, MAX_CONTENT_LENGTH); + HttpUtil.setContentLength(sendRequest, normalSize); + channel.writeInbound(encode(sendRequest)); + var resp = (FullHttpResponse) channel.readOutbound(); + assertEquals(HttpResponseStatus.CONTINUE, resp.status()); + resp.release(); + var sendContent = lastHttpContent(normalSize); + channel.writeInbound(encode(sendContent)); + var recvRequest = (HttpRequest) channel.readInbound(); + var recvContent = (LastHttpContent) channel.readInbound(); + assertEquals("content length header should match", normalSize, HttpUtil.getContentLength(recvRequest)); + assertFalse("should remove expect header", HttpUtil.is100ContinueExpected(recvRequest)); + assertEquals("actual content size should match", normalSize, recvContent.content().readableBytes()); + recvContent.release(); + } + } + } + + /** + * Assert that handler returns 413 Request Entity Too Large and close channel for + * oversized request with content. + */ + public void testEntityTooLargeWithContentWithoutExpect() { + var sendRequest = httpRequest(); + HttpUtil.setContentLength(sendRequest, OVERSIZED_LENGTH); + var unexpectedContent = lastHttpContent(OVERSIZED_LENGTH); + channel.writeInbound(encode(sendRequest, unexpectedContent)); + var resp = (FullHttpResponse) channel.readOutbound(); + assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); + assertFalse(channel.isOpen()); + resp.release(); + } + + /** + * Assert that handler return 413 Request Entity Too Large and closes channel for oversized + * requests with chunked content. + */ + public void testEntityTooLargeWithChunkedContent() { + var sendRequest = httpRequest(); + HttpUtil.setTransferEncodingChunked(sendRequest, true); + channel.writeInbound(encode(sendRequest)); + assertTrue("request should pass", channel.readInbound() instanceof HttpRequest); + + int contentBytesSent = 0; + do { + var thisPartSize = between(1, MAX_CONTENT_LENGTH * 2); + channel.writeInbound(encode(httpContent(thisPartSize))); + contentBytesSent += thisPartSize; + + if (contentBytesSent <= MAX_CONTENT_LENGTH) { + ((HttpContent) channel.readInbound()).release(); + } else { + break; + } + } while (true); + + var resp = (FullHttpResponse) channel.readOutbound(); + assertEquals("should respond with 413", HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); + assertFalse("should close channel", channel.isOpen()); + resp.release(); + } + +}