From 8e2cd7f590381ff77458ec6a44f0d3b83779eb53 Mon Sep 17 00:00:00 2001 From: Ferenc Erdelyi Date: Wed, 17 Sep 2025 13:31:31 +0200 Subject: [PATCH] HADOOP-19695. Add dual-stack/IPv6 Support to HttpServer2 To enable dual-stack or IPv6 support, use InetAddress.getAllByName(hostname) to resolve the IP addresses of a host. When the system property java.net.preferIPv4Stack is set to true, only IPv4 addresses are returned, and any IPv6 addresses are ignored, so no extra check is needed to exclude IPv6. When java.net.preferIPv4Stack is false, both IPv4 and IPv6 addresses may be returned, and any IPv6 addresses will also be added as connectors. To disable IPv4, you need to configure the OS at the system level. --- .../org/apache/hadoop/http/HttpServer2.java | 40 +++++++++++++++---- .../apache/hadoop/http/TestHttpServer.java | 22 ++++++++++ .../datanode/web/DatanodeHttpServer.java | 4 +- 3 files changed, 57 insertions(+), 9 deletions(-) diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java index 3592ecdc9c94c..597bec55ecccc 100644 --- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java +++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java @@ -23,6 +23,7 @@ import java.io.InterruptedIOException; import java.io.PrintStream; import java.net.BindException; +import java.net.InetAddress; import java.net.InetSocketAddress; import java.net.MalformedURLException; import java.net.URI; @@ -549,25 +550,50 @@ public HttpServer2 build() throws IOException { } for (URI ep : endpoints) { - final ServerConnector connector; + // + // To enable dual-stack or IPv6 support, use InetAddress + // .getAllByName(hostname) to resolve the IP addresses of a host. + // When the system property java.net.preferIPv4Stack is set to true, + // only IPv4 addresses are returned, and any IPv6 addresses are + // ignored, so no extra check is needed to exclude IPv6. + // When java.net.preferIPv4Stack is false, both IPv4 and IPv6 + // addresses may be returned, and any IPv6 addresses will also be + // added as connectors. + // To disable IPv4, you need to configure the OS at the system level. + // + InetAddress[] addresses = InetAddress.getAllByName(ep.getHost()); + server = addConnectors( + ep, addresses, server, httpConfig, backlogSize, idleTimeout); + } + server.loadListeners(); + return server; + } + + @VisibleForTesting + HttpServer2 addConnectors( + URI ep, InetAddress[] addresses, HttpServer2 server, + HttpConfiguration httpConfig, int backlogSize, int idleTimeout){ + for (InetAddress addr : addresses) { + ServerConnector connector; String scheme = ep.getScheme(); if (HTTP_SCHEME.equals(scheme)) { - connector = createHttpChannelConnector(server.webServer, - httpConfig); + connector = createHttpChannelConnector( + server.webServer, httpConfig); } else if (HTTPS_SCHEME.equals(scheme)) { - connector = createHttpsChannelConnector(server.webServer, - httpConfig); + connector = createHttpsChannelConnector( + server.webServer, httpConfig); } else { throw new HadoopIllegalArgumentException( "unknown scheme for endpoint:" + ep); } - connector.setHost(ep.getHost()); + LOG.debug("Adding connector to WebServer for address {}", + addr.getHostAddress()); + connector.setHost(addr.getHostAddress()); connector.setPort(ep.getPort() == -1 ? 0 : ep.getPort()); connector.setAcceptQueueSize(backlogSize); connector.setIdleTimeout(idleTimeout); server.addListener(connector); } - server.loadListeners(); return server; } diff --git a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java index d59aa34a65688..ed57a1241fbd0 100644 --- a/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java +++ b/hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java @@ -30,6 +30,7 @@ import org.apache.hadoop.security.UserGroupInformation; import org.apache.hadoop.security.authorize.AccessControlList; +import org.eclipse.jetty.server.HttpConfiguration; import org.eclipse.jetty.server.ServerConnector; import org.eclipse.jetty.server.handler.StatisticsHandler; import org.eclipse.jetty.util.ajax.JSON; @@ -56,6 +57,7 @@ import java.io.PrintWriter; import java.lang.reflect.Field; import java.net.HttpURLConnection; +import java.net.InetAddress; import java.net.URI; import java.net.URL; import java.util.Arrays; @@ -635,6 +637,26 @@ public void testRequiresAuthorizationAccess() throws Exception { assertFalse(HttpServer2.isInstrumentationAccessAllowed(context, request, response)); } + @Test + public void testAddConnectors() throws Exception { + HttpServer2.Builder builder = new HttpServer2.Builder() + .setName("test").setConf(new Configuration()).setFindPort(false); + URI endpoint = URI.create("http://testaddress.com:8080/my-app"); + InetAddress[] addresses = new InetAddress[2]; + // IPv4 test address + addresses[0] = InetAddress.getByName("192.168.1.100"); + // IPv6 test address + addresses[1] = InetAddress.getByName("fd00::1"); + HttpConfiguration httpConfig = new HttpConfiguration(); + final int backlogSize = 2048; + final int idleTimeout = 1000; + + server = builder.addConnectors( + endpoint, addresses, server, httpConfig, backlogSize, idleTimeout); + //the expected value is 3: the loopback address and the two addresses + assertEquals(server.getListeners().toArray().length, 3); + } + @Test public void testBindAddress() throws Exception { checkBindAddress("localhost", 0, false).stop(); // hang onto this one for a bit more testing diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java index d2bac9015b7a0..bd8c7378e8a32 100644 --- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java +++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java @@ -79,11 +79,11 @@ public class DatanodeHttpServer implements Closeable { static final Logger LOG = LoggerFactory.getLogger(DatanodeHttpServer.class); // HttpServer threads are only used for the web UI and basic servlets, so // set them to the minimum possible - private static final int HTTP_SELECTOR_THREADS = 1; + private static final int HTTP_SELECTOR_THREADS = 2; private static final int HTTP_ACCEPTOR_THREADS = 1; // Jetty 9.4.x: Adding one more thread to HTTP_MAX_THREADS. private static final int HTTP_MAX_THREADS = - HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 2; + HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 5; private final HttpServer2 infoServer; private final EventLoopGroup bossGroup; private final EventLoopGroup workerGroup;