Skip to content

Commit e14a395

Browse files
committed
HADOOP-19695. Add dual-stack/IPv6 Support to HttpServer2
To enable dual-stack or IPv6 support, use InetAddress.getAllByName(hostname) to resolve the IP addresses of a host. When the system property java.net.preferIPv4Stack is set to true, only IPv4 addresses are returned, and any IPv6 addresses are ignored, so no extra check is needed to exclude IPv6. When java.net.preferIPv4Stack is false, both IPv4 and IPv6 addresses may be returned, and any IPv6 addresses will also be added as connectors. To disable IPv4, you need to configure the OS at the system level.
1 parent 01c9d4b commit e14a395

File tree

3 files changed

+77
-28
lines changed

3 files changed

+77
-28
lines changed

hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/http/HttpServer2.java

Lines changed: 33 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@
2323
import java.io.InterruptedIOException;
2424
import java.io.PrintStream;
2525
import java.net.BindException;
26+
import java.net.InetAddress;
2627
import java.net.InetSocketAddress;
2728
import java.net.MalformedURLException;
2829
import java.net.URI;
@@ -549,25 +550,50 @@ public HttpServer2 build() throws IOException {
549550
}
550551

551552
for (URI ep : endpoints) {
552-
final ServerConnector connector;
553+
//
554+
// To enable dual-stack or IPv6 support, use InetAddress
555+
// .getAllByName(hostname) to resolve the IP addresses of a host.
556+
// When the system property java.net.preferIPv4Stack is set to true,
557+
// only IPv4 addresses are returned, and any IPv6 addresses are
558+
// ignored, so no extra check is needed to exclude IPv6.
559+
// When java.net.preferIPv4Stack is false, both IPv4 and IPv6
560+
// addresses may be returned, and any IPv6 addresses will also be
561+
// added as connectors.
562+
// To disable IPv4, you need to configure the OS at the system level.
563+
//
564+
InetAddress[] addresses = InetAddress.getAllByName(ep.getHost());
565+
server = addConnectors(
566+
ep, addresses, server, httpConfig, backlogSize, idleTimeout);
567+
}
568+
server.loadListeners();
569+
return server;
570+
}
571+
572+
@VisibleForTesting
573+
HttpServer2 addConnectors(
574+
URI ep, InetAddress[] addresses, HttpServer2 server,
575+
HttpConfiguration httpConfig, int backlogSize, int idleTimeout){
576+
for (InetAddress addr : addresses) {
577+
ServerConnector connector;
553578
String scheme = ep.getScheme();
554579
if (HTTP_SCHEME.equals(scheme)) {
555-
connector = createHttpChannelConnector(server.webServer,
556-
httpConfig);
580+
connector = createHttpChannelConnector(
581+
server.webServer, httpConfig);
557582
} else if (HTTPS_SCHEME.equals(scheme)) {
558-
connector = createHttpsChannelConnector(server.webServer,
559-
httpConfig);
583+
connector = createHttpsChannelConnector(
584+
server.webServer, httpConfig);
560585
} else {
561586
throw new HadoopIllegalArgumentException(
562587
"unknown scheme for endpoint:" + ep);
563588
}
564-
connector.setHost(ep.getHost());
589+
LOG.debug("Adding connector to WebServer for address {}",
590+
addr.getHostAddress());
591+
connector.setHost(addr.getHostAddress());
565592
connector.setPort(ep.getPort() == -1 ? 0 : ep.getPort());
566593
connector.setAcceptQueueSize(backlogSize);
567594
connector.setIdleTimeout(idleTimeout);
568595
server.addListener(connector);
569596
}
570-
server.loadListeners();
571597
return server;
572598
}
573599

hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/http/TestHttpServer.java

Lines changed: 22 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -30,6 +30,7 @@
3030
import org.apache.hadoop.security.UserGroupInformation;
3131
import org.apache.hadoop.security.authorize.AccessControlList;
3232

33+
import org.eclipse.jetty.server.HttpConfiguration;
3334
import org.eclipse.jetty.server.ServerConnector;
3435
import org.eclipse.jetty.server.handler.StatisticsHandler;
3536
import org.eclipse.jetty.util.ajax.JSON;
@@ -56,6 +57,7 @@
5657
import java.io.PrintWriter;
5758
import java.lang.reflect.Field;
5859
import java.net.HttpURLConnection;
60+
import java.net.InetAddress;
5961
import java.net.URI;
6062
import java.net.URL;
6163
import java.util.Arrays;
@@ -635,6 +637,26 @@ public void testRequiresAuthorizationAccess() throws Exception {
635637
assertFalse(HttpServer2.isInstrumentationAccessAllowed(context, request, response));
636638
}
637639

640+
@Test
641+
public void testAddConnectors() throws Exception {
642+
HttpServer2.Builder builder = new HttpServer2.Builder()
643+
.setName("test").setConf(new Configuration()).setFindPort(false);
644+
URI endpoint = URI.create("http://testaddress.com:8080/my-app");
645+
InetAddress[] addresses = new InetAddress[2];
646+
// IPv4 test address
647+
addresses[0] = InetAddress.getByName("192.168.1.100");
648+
// IPv6 test address
649+
addresses[1] = InetAddress.getByName("fd00::1");
650+
HttpConfiguration httpConfig = new HttpConfiguration();
651+
final int backlogSize = 2048;
652+
final int idleTimeout = 1000;
653+
654+
server = builder.addConnectors(
655+
endpoint, addresses, server, httpConfig, backlogSize, idleTimeout);
656+
//the expected value is 3: the loopback address and the two addresses
657+
assertEquals(server.getListeners().toArray().length, 3);
658+
}
659+
638660
@Test public void testBindAddress() throws Exception {
639661
checkBindAddress("localhost", 0, false).stop();
640662
// hang onto this one for a bit more testing

hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/web/DatanodeHttpServer.java

Lines changed: 22 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -17,6 +17,23 @@
1717
*/
1818
package org.apache.hadoop.hdfs.server.datanode.web;
1919

20+
import java.io.Closeable;
21+
import java.io.IOException;
22+
import java.lang.reflect.Constructor;
23+
import java.lang.reflect.InvocationTargetException;
24+
import java.lang.reflect.Method;
25+
import java.net.BindException;
26+
import java.net.InetSocketAddress;
27+
import java.net.SocketAddress;
28+
import java.net.SocketException;
29+
import java.net.URI;
30+
import java.nio.channels.ServerSocketChannel;
31+
import java.security.GeneralSecurityException;
32+
import java.util.Enumeration;
33+
import java.util.Map;
34+
import javax.servlet.FilterConfig;
35+
import javax.servlet.ServletContext;
36+
2037
import io.netty.bootstrap.ServerBootstrap;
2138
import io.netty.channel.ChannelFactory;
2239
import io.netty.channel.ChannelFuture;
@@ -25,14 +42,17 @@
2542
import io.netty.channel.ChannelOption;
2643
import io.netty.channel.ChannelPipeline;
2744
import io.netty.channel.EventLoopGroup;
45+
import io.netty.channel.WriteBufferWaterMark;
2846
import io.netty.channel.nio.NioEventLoopGroup;
2947
import io.netty.channel.socket.SocketChannel;
3048
import io.netty.channel.socket.nio.NioServerSocketChannel;
31-
import io.netty.channel.WriteBufferWaterMark;
3249
import io.netty.handler.codec.http.HttpRequestDecoder;
3350
import io.netty.handler.codec.http.HttpResponseEncoder;
3451
import io.netty.handler.ssl.SslHandler;
3552
import io.netty.handler.stream.ChunkedWriteHandler;
53+
import org.slf4j.Logger;
54+
import org.slf4j.LoggerFactory;
55+
3656
import org.apache.hadoop.conf.Configuration;
3757
import org.apache.hadoop.fs.permission.FsPermission;
3858
import org.apache.hadoop.hdfs.DFSConfigKeys;
@@ -46,25 +66,6 @@
4666
import org.apache.hadoop.net.NetUtils;
4767
import org.apache.hadoop.security.authorize.AccessControlList;
4868
import org.apache.hadoop.security.ssl.SSLFactory;
49-
import org.slf4j.Logger;
50-
import org.slf4j.LoggerFactory;
51-
52-
import javax.servlet.FilterConfig;
53-
import javax.servlet.ServletContext;
54-
import java.io.Closeable;
55-
import java.io.IOException;
56-
import java.lang.reflect.Constructor;
57-
import java.lang.reflect.InvocationTargetException;
58-
import java.lang.reflect.Method;
59-
import java.net.BindException;
60-
import java.net.InetSocketAddress;
61-
import java.net.SocketAddress;
62-
import java.net.SocketException;
63-
import java.net.URI;
64-
import java.nio.channels.ServerSocketChannel;
65-
import java.security.GeneralSecurityException;
66-
import java.util.Enumeration;
67-
import java.util.Map;
6869

6970
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_ADMIN;
7071
import static org.apache.hadoop.hdfs.DFSConfigKeys.DFS_DATANODE_HTTPS_ADDRESS_DEFAULT;
@@ -83,7 +84,7 @@ public class DatanodeHttpServer implements Closeable {
8384
private static final int HTTP_ACCEPTOR_THREADS = 1;
8485
// Jetty 9.4.x: Adding one more thread to HTTP_MAX_THREADS.
8586
private static final int HTTP_MAX_THREADS =
86-
HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 2;
87+
HTTP_SELECTOR_THREADS + HTTP_ACCEPTOR_THREADS + 8;
8788
private final HttpServer2 infoServer;
8889
private final EventLoopGroup bossGroup;
8990
private final EventLoopGroup workerGroup;

0 commit comments

Comments
 (0)