diff --git a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java index d714de86b3d8b..33bf7ea0536b6 100644 --- a/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java +++ b/modules/transport-netty4/src/internalClusterTest/java/org/elasticsearch/http/netty4/Netty4IncrementalRequestHandlingIT.java @@ -54,6 +54,7 @@ import org.elasticsearch.common.util.CollectionUtils; import org.elasticsearch.features.NodeFeature; import org.elasticsearch.http.HttpBodyTracer; +import org.elasticsearch.http.HttpHandlingSettings; import org.elasticsearch.http.HttpServerTransport; import org.elasticsearch.http.HttpTransportSettings; import org.elasticsearch.plugins.ActionPlugin; @@ -92,15 +93,13 @@ @ESIntegTestCase.ClusterScope(numDataNodes = 1) public class Netty4IncrementalRequestHandlingIT extends ESNetty4IntegTestCase { - private static final int MAX_CONTENT_LENGTH = ByteSizeUnit.MB.toIntBytes(50); - @Override protected Settings nodeSettings(int nodeOrdinal, Settings otherSettings) { Settings.Builder builder = Settings.builder().put(super.nodeSettings(nodeOrdinal, otherSettings)); builder.put( HttpTransportSettings.SETTING_HTTP_MAX_CONTENT_LENGTH.getKey(), - new ByteSizeValue(MAX_CONTENT_LENGTH, ByteSizeUnit.BYTES) + new ByteSizeValue(ByteSizeUnit.MB.toIntBytes(50), ByteSizeUnit.BYTES) ); return builder.build(); } @@ -140,7 +139,7 @@ public void testReceiveAllChunks() throws Exception { var opaqueId = opaqueId(reqNo); // this dataset will be compared with one on server side - var dataSize = randomIntBetween(1024, MAX_CONTENT_LENGTH); + var dataSize = randomIntBetween(1024, maxContentLength()); var sendData = Unpooled.wrappedBuffer(randomByteArrayOfLength(dataSize)); sendData.retain(); ctx.clientChannel.writeAndFlush(fullHttpRequest(opaqueId, sendData)); @@ -248,7 +247,7 @@ public void testServerExceptionMidStream() throws Exception { public void testClientBackpressure() throws Exception { try (var ctx = setupClientCtx()) { var opaqueId = opaqueId(0); - var payloadSize = MAX_CONTENT_LENGTH; + var payloadSize = maxContentLength(); var totalParts = 10; var partSize = payloadSize / totalParts; ctx.clientChannel.writeAndFlush(httpRequest(opaqueId, payloadSize)); @@ -290,7 +289,7 @@ public void test100Continue() throws Exception { try (var ctx = setupClientCtx()) { for (int reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { var id = opaqueId(reqNo); - var acceptableContentLength = randomIntBetween(0, MAX_CONTENT_LENGTH); + var acceptableContentLength = randomIntBetween(0, maxContentLength()); // send request header and await 100-continue var req = httpRequest(id, acceptableContentLength); @@ -322,7 +321,7 @@ public void test413TooLargeOnExpect100Continue() throws Exception { try (var ctx = setupClientCtx()) { for (int reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { var id = opaqueId(reqNo); - var oversized = MAX_CONTENT_LENGTH + 1; + var oversized = maxContentLength() + 1; // send request header and await 413 too large var req = httpRequest(id, oversized); @@ -338,28 +337,32 @@ public void test413TooLargeOnExpect100Continue() throws Exception { } } - // ensures that oversized chunked encoded request has maxContentLength limit and returns 413 - public void testOversizedChunkedEncoding() throws Exception { + // ensures that oversized chunked encoded request has no limits at http layer + // rest handler is responsible for oversized requests + public void testOversizedChunkedEncodingNoLimits() throws Exception { try (var ctx = setupClientCtx()) { - var id = opaqueId(0); - var contentSize = MAX_CONTENT_LENGTH + 1; - var content = randomByteArrayOfLength(contentSize); - var is = new ByteBufInputStream(Unpooled.wrappedBuffer(content)); - var chunkedIs = new ChunkedStream(is); - var httpChunkedIs = new HttpChunkedInput(chunkedIs, LastHttpContent.EMPTY_LAST_CONTENT); - var req = httpRequest(id, 0); - HttpUtil.setTransferEncodingChunked(req, true); - - ctx.clientChannel.pipeline().addLast(new ChunkedWriteHandler()); - ctx.clientChannel.writeAndFlush(req); - ctx.clientChannel.writeAndFlush(httpChunkedIs); - var handler = ctx.awaitRestChannelAccepted(id); - var consumed = handler.readAllBytes(); - assertTrue(consumed <= MAX_CONTENT_LENGTH); - - var resp = (FullHttpResponse) safePoll(ctx.clientRespQueue); - assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); - resp.release(); + for (var reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { + var id = opaqueId(reqNo); + var contentSize = maxContentLength() + 1; + var content = randomByteArrayOfLength(contentSize); + var is = new ByteBufInputStream(Unpooled.wrappedBuffer(content)); + var chunkedIs = new ChunkedStream(is); + var httpChunkedIs = new HttpChunkedInput(chunkedIs, LastHttpContent.EMPTY_LAST_CONTENT); + var req = httpRequest(id, 0); + HttpUtil.setTransferEncodingChunked(req, true); + + ctx.clientChannel.pipeline().addLast(new ChunkedWriteHandler()); + ctx.clientChannel.writeAndFlush(req); + ctx.clientChannel.writeAndFlush(httpChunkedIs); + var handler = ctx.awaitRestChannelAccepted(id); + var consumed = handler.readAllBytes(); + assertEquals(contentSize, consumed); + handler.sendResponse(new RestResponse(RestStatus.OK, "")); + + var resp = (FullHttpResponse) safePoll(ctx.clientRespQueue); + assertEquals(HttpResponseStatus.OK, resp.status()); + resp.release(); + } } } @@ -370,7 +373,7 @@ public void testBadRequestReleaseQueuedChunks() throws Exception { try (var ctx = setupClientCtx()) { for (var reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { var id = opaqueId(reqNo); - var contentSize = randomIntBetween(0, MAX_CONTENT_LENGTH); + var contentSize = randomIntBetween(0, maxContentLength()); var req = httpRequest(id, contentSize); var content = randomContent(contentSize, true); @@ -406,7 +409,7 @@ public void testHttpClientStats() throws Exception { for (var reqNo = 0; reqNo < randomIntBetween(2, 10); reqNo++) { var id = opaqueId(reqNo); - var contentSize = randomIntBetween(0, MAX_CONTENT_LENGTH); + var contentSize = randomIntBetween(0, maxContentLength()); totalBytesSent += contentSize; ctx.clientChannel.writeAndFlush(httpRequest(id, contentSize)); ctx.clientChannel.writeAndFlush(randomContent(contentSize, true)); @@ -486,6 +489,10 @@ private void assertHttpBodyLogging(Function test) throws Exceptio } } + private int maxContentLength() { + return HttpHandlingSettings.fromSettings(internalCluster().getInstance(Settings.class)).maxContentLength(); + } + private String opaqueId(int reqNo) { return getTestName() + "-" + reqNo; } @@ -655,22 +662,14 @@ void sendResponse(RestResponse response) { int readBytes(int bytes) { var consumed = 0; if (recvLast == false) { - stream.next(); - while (consumed < bytes && streamClosed == false) { - try { - var recvChunk = recvChunks.poll(10, TimeUnit.MILLISECONDS); - if (recvChunk != null) { - consumed += recvChunk.chunk.length(); - recvChunk.chunk.close(); - if (recvChunk.isLast) { - recvLast = true; - break; - } - stream.next(); - } - } catch (InterruptedException e) { - Thread.currentThread().interrupt(); - throw new AssertionError(e); + while (consumed < bytes) { + stream.next(); + var recvChunk = safePoll(recvChunks); + consumed += recvChunk.chunk.length(); + recvChunk.chunk.close(); + if (recvChunk.isLast) { + recvLast = true; + break; } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpAggregator.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpAggregator.java index 0294b4626496c..021ce09e0ed8e 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpAggregator.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpAggregator.java @@ -11,10 +11,13 @@ import io.netty.channel.ChannelHandlerContext; import io.netty.handler.codec.http.FullHttpRequest; +import io.netty.handler.codec.http.FullHttpResponse; +import io.netty.handler.codec.http.HttpContent; import io.netty.handler.codec.http.HttpObject; import io.netty.handler.codec.http.HttpObjectAggregator; import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpRequestDecoder; +import io.netty.handler.codec.http.HttpResponseStatus; +import io.netty.handler.codec.http.HttpUtil; import org.elasticsearch.http.HttpPreRequest; import org.elasticsearch.http.netty4.internal.HttpHeadersAuthenticatorUtils; @@ -24,19 +27,18 @@ /** * A wrapper around {@link HttpObjectAggregator}. Provides optional content aggregation based on * predicate. {@link HttpObjectAggregator} also handles Expect: 100-continue and oversized content. - * Provides content size handling for non-aggregated requests too. + * Unfortunately, Netty does not provide handlers for oversized messages beyond HttpObjectAggregator. */ public class Netty4HttpAggregator extends HttpObjectAggregator { private static final Predicate IGNORE_TEST = (req) -> req.uri().startsWith("/_test/request-stream") == false; private final Predicate decider; - private final Netty4HttpContentSizeHandler streamContentSizeHandler; private boolean aggregating = true; + private boolean ignoreContentAfterContinueResponse = false; - public Netty4HttpAggregator(int maxContentLength, Predicate decider, HttpRequestDecoder decoder) { + public Netty4HttpAggregator(int maxContentLength, Predicate decider) { super(maxContentLength); this.decider = decider; - this.streamContentSizeHandler = new Netty4HttpContentSizeHandler(decoder, maxContentLength); } @Override @@ -49,7 +51,34 @@ public void channelRead(ChannelHandlerContext ctx, Object msg) throws Exception if (aggregating || msg instanceof FullHttpRequest) { super.channelRead(ctx, msg); } else { - streamContentSizeHandler.channelRead(ctx, msg); + handle(ctx, (HttpObject) msg); + } + } + + private void handle(ChannelHandlerContext ctx, HttpObject msg) { + if (msg instanceof HttpRequest request) { + var continueResponse = newContinueResponse(request, maxContentLength(), ctx.pipeline()); + if (continueResponse != null) { + // there are 3 responses expected: 100, 413, 417 + // on 100 we pass request further and reply to client to continue + // on 413/417 we ignore following content + ctx.writeAndFlush(continueResponse); + var resp = (FullHttpResponse) continueResponse; + if (resp.status() != HttpResponseStatus.CONTINUE) { + ignoreContentAfterContinueResponse = true; + return; + } + HttpUtil.set100ContinueExpected(request, false); + } + ignoreContentAfterContinueResponse = false; + ctx.fireChannelRead(msg); + } else { + var httpContent = (HttpContent) msg; + if (ignoreContentAfterContinueResponse) { + httpContent.release(); + } else { + ctx.fireChannelRead(msg); + } } } } diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandler.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandler.java deleted file mode 100644 index 2b322fefa1262..0000000000000 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandler.java +++ /dev/null @@ -1,171 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.http.netty4; - -import io.netty.buffer.Unpooled; -import io.netty.channel.ChannelFutureListener; -import io.netty.channel.ChannelHandlerContext; -import io.netty.channel.ChannelInboundHandlerAdapter; -import io.netty.handler.codec.http.DefaultFullHttpResponse; -import io.netty.handler.codec.http.DefaultHttpHeaders; -import io.netty.handler.codec.http.EmptyHttpHeaders; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpContent; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpObject; -import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpRequestDecoder; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpUtil; -import io.netty.handler.codec.http.HttpVersion; -import io.netty.handler.codec.http.LastHttpContent; - -import org.elasticsearch.core.SuppressForbidden; - -import static io.netty.handler.codec.http.HttpHeaderNames.CONNECTION; -import static io.netty.handler.codec.http.HttpHeaderNames.CONTENT_LENGTH; - -/** - * Provides handling for 'Expect' header and content size. Implements HTTP1.1 spec. - * Allows {@code Expect: 100-continue} header only. Other 'Expect' headers will be rejected with - * {@code 417 Expectation Failed} reason. - *
- * Replies {@code 100 Continue} to requests with allowed maxContentLength. - *
- * Replies {@code 413 Request Entity Too Large} when content size exceeds maxContentLength. - * - * Channel can be reused for requests with "Expect:100-Continue" header that exceed allowed content length, - * as long as request does not include content. If oversized request already contains content then - * we cannot safely proceed and connection will be closed. - *

- * TODO: move to RestController to allow content limits per RestHandler. - * Ideally we should be able to handle Continue and oversized request in the RestController. - *
    - *
  • - * 100 Continue is interim response, means RestChannel will send 2 responses for a single request. See - * rfc9110.html#status.100 - *
  • - *
  • - * RestChannel should be able to close underlying HTTP channel connection. - *
  • - *
- */ -@SuppressForbidden(reason = "use of default ChannelFutureListener's CLOSE and CLOSE_ON_FAILURE") -public class Netty4HttpContentSizeHandler extends ChannelInboundHandlerAdapter { - - // copied from netty's HttpObjectAggregator - static final FullHttpResponse CONTINUE = new DefaultFullHttpResponse( - HttpVersion.HTTP_1_1, - HttpResponseStatus.CONTINUE, - Unpooled.EMPTY_BUFFER - ); - static final FullHttpResponse EXPECTATION_FAILED_CLOSE = new DefaultFullHttpResponse( - HttpVersion.HTTP_1_1, - HttpResponseStatus.EXPECTATION_FAILED, - Unpooled.EMPTY_BUFFER, - new DefaultHttpHeaders().add(CONTENT_LENGTH, 0).add(CONNECTION, HttpHeaderValues.CLOSE), - EmptyHttpHeaders.INSTANCE - ); - static final FullHttpResponse TOO_LARGE_CLOSE = new DefaultFullHttpResponse( - HttpVersion.HTTP_1_1, - HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, - Unpooled.EMPTY_BUFFER, - new DefaultHttpHeaders().add(CONTENT_LENGTH, 0).add(CONNECTION, HttpHeaderValues.CLOSE), - EmptyHttpHeaders.INSTANCE - ); - static final FullHttpResponse TOO_LARGE = new DefaultFullHttpResponse( - HttpVersion.HTTP_1_1, - HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, - Unpooled.EMPTY_BUFFER, - new DefaultHttpHeaders().add(CONTENT_LENGTH, 0), - EmptyHttpHeaders.INSTANCE - ); - - private final int maxContentLength; - private final HttpRequestDecoder decoder; // need to reset decoder after sending 413 - private int currentContentLength; // chunked encoding does not provide content length, need to track actual length - private boolean contentExpected; - - public Netty4HttpContentSizeHandler(HttpRequestDecoder decoder, int maxContentLength) { - this.maxContentLength = maxContentLength; - this.decoder = decoder; - } - - @Override - public void channelRead(ChannelHandlerContext ctx, Object msg) { - assert msg instanceof HttpObject; - if (msg instanceof HttpRequest request) { - handleRequest(ctx, request); - } else { - handleContent(ctx, (HttpContent) msg); - } - } - - private void handleRequest(ChannelHandlerContext ctx, HttpRequest request) { - contentExpected = false; - if (request.decoderResult().isFailure()) { - ctx.fireChannelRead(request); - return; - } - - final var expectValue = request.headers().get(HttpHeaderNames.EXPECT); - boolean isContinueExpected = false; - // Only "Expect: 100-Continue" header is supported - if (expectValue != null) { - if (HttpHeaderValues.CONTINUE.toString().equalsIgnoreCase(expectValue)) { - isContinueExpected = true; - } else { - ctx.writeAndFlush(EXPECTATION_FAILED_CLOSE.retainedDuplicate()).addListener(ChannelFutureListener.CLOSE); - return; - } - } - - boolean isOversized = HttpUtil.getContentLength(request, -1) > maxContentLength; - if (isOversized) { - if (isContinueExpected) { - // Client is allowed to send content without waiting for Continue. - // See https://www.rfc-editor.org/rfc/rfc9110.html#section-10.1.1-11.3 - // this content will result in HttpRequestDecoder failure and send downstream - decoder.reset(); - ctx.writeAndFlush(TOO_LARGE.retainedDuplicate()).addListener(ChannelFutureListener.CLOSE_ON_FAILURE); - } else { - // Client is sending oversized content, we cannot safely take it. Closing channel. - ctx.writeAndFlush(TOO_LARGE_CLOSE.retainedDuplicate()).addListener(ChannelFutureListener.CLOSE); - } - } else { - contentExpected = true; - currentContentLength = 0; - if (isContinueExpected) { - ctx.writeAndFlush(CONTINUE.retainedDuplicate()); - HttpUtil.set100ContinueExpected(request, false); - } - ctx.fireChannelRead(request); - } - } - - private void handleContent(ChannelHandlerContext ctx, HttpContent msg) { - if (contentExpected) { - currentContentLength += msg.content().readableBytes(); - if (currentContentLength > maxContentLength) { - msg.release(); - ctx.writeAndFlush(TOO_LARGE_CLOSE.retainedDuplicate()).addListener(ChannelFutureListener.CLOSE); - } else { - ctx.fireChannelRead(msg); - } - } else { - msg.release(); - if (msg != LastHttpContent.EMPTY_LAST_CONTENT) { - ctx.close(); // there is no reliable recovery from unexpected content, closing channel - } - } - } - -} diff --git a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java index 9ffa4b479be17..36c860f1fb90b 100644 --- a/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java +++ b/modules/transport-netty4/src/main/java/org/elasticsearch/http/netty4/Netty4HttpServerTransport.java @@ -381,8 +381,7 @@ protected HttpMessage createMessage(String[] initialLine) throws Exception { handlingSettings.maxContentLength(), httpPreRequest -> enabled.get() == false || ((httpPreRequest.rawPath().endsWith("/_bulk") == false) - || httpPreRequest.rawPath().startsWith("/_xpack/monitoring/_bulk")), - decoder + || httpPreRequest.rawPath().startsWith("/_xpack/monitoring/_bulk")) ); aggregator.setMaxCumulationBufferComponents(transport.maxCompositeBufferComponents); ch.pipeline() diff --git a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandlerTests.java b/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandlerTests.java deleted file mode 100644 index 3f8fe0075689f..0000000000000 --- a/modules/transport-netty4/src/test/java/org/elasticsearch/http/netty4/Netty4HttpContentSizeHandlerTests.java +++ /dev/null @@ -1,238 +0,0 @@ -/* - * Copyright Elasticsearch B.V. and/or licensed to Elasticsearch B.V. under one - * or more contributor license agreements. Licensed under the "Elastic License - * 2.0", the "GNU Affero General Public License v3.0 only", and the "Server Side - * Public License v 1"; you may not use this file except in compliance with, at - * your election, the "Elastic License 2.0", the "GNU Affero General Public - * License v3.0 only", or the "Server Side Public License, v 1". - */ - -package org.elasticsearch.http.netty4; - -import io.netty.buffer.ByteBuf; -import io.netty.buffer.Unpooled; -import io.netty.channel.embedded.EmbeddedChannel; -import io.netty.handler.codec.http.DefaultHttpContent; -import io.netty.handler.codec.http.DefaultHttpRequest; -import io.netty.handler.codec.http.DefaultLastHttpContent; -import io.netty.handler.codec.http.FullHttpResponse; -import io.netty.handler.codec.http.HttpContent; -import io.netty.handler.codec.http.HttpHeaderNames; -import io.netty.handler.codec.http.HttpHeaderValues; -import io.netty.handler.codec.http.HttpMethod; -import io.netty.handler.codec.http.HttpObject; -import io.netty.handler.codec.http.HttpRequest; -import io.netty.handler.codec.http.HttpRequestDecoder; -import io.netty.handler.codec.http.HttpRequestEncoder; -import io.netty.handler.codec.http.HttpResponseStatus; -import io.netty.handler.codec.http.HttpUtil; -import io.netty.handler.codec.http.HttpVersion; -import io.netty.handler.codec.http.LastHttpContent; - -import org.elasticsearch.test.ESTestCase; - -import java.util.Arrays; - -public class Netty4HttpContentSizeHandlerTests extends ESTestCase { - - private static final int MAX_CONTENT_LENGTH = 1024; - private static final int OVERSIZED_LENGTH = MAX_CONTENT_LENGTH + 1; - private static final int REPS = 1000; - private EmbeddedChannel channel; - private EmbeddedChannel encoder; // channel to encode HTTP objects into bytes - - private static HttpContent httpContent(int size) { - return new DefaultHttpContent(Unpooled.wrappedBuffer(randomByteArrayOfLength(size))); - } - - private static LastHttpContent lastHttpContent(int size) { - return new DefaultLastHttpContent(Unpooled.wrappedBuffer(randomByteArrayOfLength(size))); - } - - private HttpRequest httpRequest() { - return new DefaultHttpRequest(HttpVersion.HTTP_1_1, HttpMethod.POST, "/"); - } - - // encodes multiple HTTP objects into single ByteBuf - private ByteBuf encode(HttpObject... objs) { - var out = Unpooled.compositeBuffer(); - Arrays.stream(objs).forEach(encoder::writeOutbound); - while (encoder.outboundMessages().isEmpty() == false) { - out.addComponent(true, encoder.readOutbound()); - } - return out; - } - - @Override - public void setUp() throws Exception { - super.setUp(); - var decoder = new HttpRequestDecoder(); - encoder = new EmbeddedChannel(new HttpRequestEncoder()); - channel = new EmbeddedChannel(decoder, new Netty4HttpContentSizeHandler(decoder, MAX_CONTENT_LENGTH)); - } - - /** - * Assert that handler replies 100-continue for acceptable request and pass request further. - */ - public void testContinue() { - for (var i = 0; i < REPS; i++) { - var sendRequest = httpRequest(); - HttpUtil.set100ContinueExpected(sendRequest, true); - channel.writeInbound(encode(sendRequest)); - assertEquals("should send back 100-continue", Netty4HttpContentSizeHandler.CONTINUE, channel.readOutbound()); - var recvRequest = (HttpRequest) channel.readInbound(); - assertNotNull(recvRequest); - assertFalse(HttpUtil.is100ContinueExpected(recvRequest)); - channel.writeInbound(encode(LastHttpContent.EMPTY_LAST_CONTENT)); - assertEquals(LastHttpContent.EMPTY_LAST_CONTENT, channel.readInbound()); - } - } - - /** - * Assert that handler pass through acceptable request. - */ - public void testWithoutContinue() { - for (var i = 0; i < REPS; i++) { - var sendRequest = httpRequest(); - channel.writeInbound(encode(sendRequest)); - assertNull("should not receive response", channel.readOutbound()); - assertNotNull("request should pass", channel.readInbound()); - channel.writeInbound(encode(LastHttpContent.EMPTY_LAST_CONTENT)); - assertEquals(LastHttpContent.EMPTY_LAST_CONTENT, channel.readInbound()); - } - } - - /** - * Assert that handler pass through request and content for acceptable request. - */ - public void testContinueWithContent() { - for (var i = 0; i < REPS; i++) { - var sendRequest = httpRequest(); - HttpUtil.set100ContinueExpected(sendRequest, true); - HttpUtil.setContentLength(sendRequest, MAX_CONTENT_LENGTH); - var sendContent = lastHttpContent(MAX_CONTENT_LENGTH); - channel.writeInbound(encode(sendRequest, sendContent)); - var resp = (FullHttpResponse) channel.readOutbound(); - assertEquals("should send back 100-continue", Netty4HttpContentSizeHandler.CONTINUE, resp); - resp.release(); - var recvRequest = (HttpRequest) channel.readInbound(); - assertNotNull(recvRequest); - var recvContent = (HttpContent) channel.readInbound(); - assertNotNull(recvContent); - assertEquals(MAX_CONTENT_LENGTH, recvContent.content().readableBytes()); - recvContent.release(); - } - } - - /** - * Assert that handler return 417 Expectation Failed and closes channel on request - * with "Expect" header other than "100-Continue". - */ - public void testExpectationFailed() { - var sendRequest = httpRequest(); - sendRequest.headers().set(HttpHeaderNames.EXPECT, randomValueOtherThan(HttpHeaderValues.CONTINUE, ESTestCase::randomIdentifier)); - channel.writeInbound(encode(sendRequest)); - var resp = (FullHttpResponse) channel.readOutbound(); - assertEquals(HttpResponseStatus.EXPECTATION_FAILED, resp.status()); - assertFalse(channel.isOpen()); - resp.release(); - } - - /** - * Assert that handler returns 413 Request Entity Too Large for oversized request - * and does not close channel if following content is not present. - */ - public void testEntityTooLarge() { - for (var i = 0; i < REPS; i++) { - var sendRequest = httpRequest(); - HttpUtil.set100ContinueExpected(sendRequest, true); - HttpUtil.setContentLength(sendRequest, OVERSIZED_LENGTH); - channel.writeInbound(encode(sendRequest, LastHttpContent.EMPTY_LAST_CONTENT)); - var resp = (FullHttpResponse) channel.readOutbound(); - assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); - assertNull("request should not pass", channel.readInbound()); - assertTrue("should not close channel", channel.isOpen()); - resp.release(); - } - } - - /** - * Mixed load of oversized and normal requests with Exepct:100-Continue. - */ - public void testMixedContent() { - for (int i = 0; i < REPS; i++) { - var isOversized = randomBoolean(); - var sendRequest = httpRequest(); - HttpUtil.set100ContinueExpected(sendRequest, true); - if (isOversized) { - HttpUtil.setContentLength(sendRequest, OVERSIZED_LENGTH); - channel.writeInbound(encode(sendRequest)); - var resp = (FullHttpResponse) channel.readOutbound(); - assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); - channel.writeInbound(encode(LastHttpContent.EMPTY_LAST_CONTENT)); // terminate - assertNull(channel.readInbound()); - resp.release(); - } else { - var normalSize = between(1, MAX_CONTENT_LENGTH); - HttpUtil.setContentLength(sendRequest, normalSize); - channel.writeInbound(encode(sendRequest)); - var resp = (FullHttpResponse) channel.readOutbound(); - assertEquals(HttpResponseStatus.CONTINUE, resp.status()); - resp.release(); - var sendContent = lastHttpContent(normalSize); - channel.writeInbound(encode(sendContent)); - var recvRequest = (HttpRequest) channel.readInbound(); - var recvContent = (LastHttpContent) channel.readInbound(); - assertEquals("content length header should match", normalSize, HttpUtil.getContentLength(recvRequest)); - assertFalse("should remove expect header", HttpUtil.is100ContinueExpected(recvRequest)); - assertEquals("actual content size should match", normalSize, recvContent.content().readableBytes()); - recvContent.release(); - } - } - } - - /** - * Assert that handler returns 413 Request Entity Too Large and close channel for - * oversized request with content. - */ - public void testEntityTooLargeWithContentWithoutExpect() { - var sendRequest = httpRequest(); - HttpUtil.setContentLength(sendRequest, OVERSIZED_LENGTH); - var unexpectedContent = lastHttpContent(OVERSIZED_LENGTH); - channel.writeInbound(encode(sendRequest, unexpectedContent)); - var resp = (FullHttpResponse) channel.readOutbound(); - assertEquals(HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); - assertFalse(channel.isOpen()); - resp.release(); - } - - /** - * Assert that handler return 413 Request Entity Too Large and closes channel for oversized - * requests with chunked content. - */ - public void testEntityTooLargeWithChunkedContent() { - var sendRequest = httpRequest(); - HttpUtil.setTransferEncodingChunked(sendRequest, true); - channel.writeInbound(encode(sendRequest)); - assertTrue("request should pass", channel.readInbound() instanceof HttpRequest); - - int contentBytesSent = 0; - do { - var thisPartSize = between(1, MAX_CONTENT_LENGTH * 2); - channel.writeInbound(encode(httpContent(thisPartSize))); - contentBytesSent += thisPartSize; - - if (contentBytesSent <= MAX_CONTENT_LENGTH) { - ((HttpContent) channel.readInbound()).release(); - } else { - break; - } - } while (true); - - var resp = (FullHttpResponse) channel.readOutbound(); - assertEquals("should respond with 413", HttpResponseStatus.REQUEST_ENTITY_TOO_LARGE, resp.status()); - assertFalse("should close channel", channel.isOpen()); - resp.release(); - } - -}