Skip to content

Commit 0c435e3

Browse files
authored
KAFKA-18353 Remove zk config control.plane.listener.name (apache#18329)
Reviewers: Chia-Ping Tsai <[email protected]>
1 parent 746ab4d commit 0c435e3

File tree

8 files changed

+8
-184
lines changed

8 files changed

+8
-184
lines changed

core/src/main/scala/kafka/controller/ControllerChannelManager.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -116,8 +116,8 @@ class ControllerChannelManager(controllerEpoch: () => Int,
116116
private def addNewBroker(broker: Broker): Unit = {
117117
val messageQueue = new LinkedBlockingQueue[QueueItem]
118118
debug(s"Controller ${config.brokerId} trying to connect to broker ${broker.id}")
119-
val controllerToBrokerListenerName = config.controlPlaneListenerName.getOrElse(config.interBrokerListenerName)
120-
val controllerToBrokerSecurityProtocol = config.controlPlaneSecurityProtocol.getOrElse(config.interBrokerSecurityProtocol)
119+
val controllerToBrokerListenerName = config.interBrokerListenerName
120+
val controllerToBrokerSecurityProtocol = config.interBrokerSecurityProtocol
121121
val brokerNode = broker.node(controllerToBrokerListenerName)
122122
val logContext = new LogContext(s"[Controller id=${config.brokerId}, targetBrokerId=${brokerNode.idString}] ")
123123
val (networkClient, reconfigurableChannelBuilder) = {

core/src/main/scala/kafka/network/SocketServer.scala

Lines changed: 4 additions & 95 deletions
Original file line numberDiff line numberDiff line change
@@ -69,13 +69,6 @@ import scala.util.control.ControlThrowable
6969
* It is possible to configure multiple data-planes by specifying multiple "," separated endpoints for "listeners" in KafkaConfig.
7070
* Acceptor has N Processor threads that each have their own selector and read requests from sockets
7171
* M Handler threads that handle requests and produce responses back to the processor threads for writing.
72-
* - control-plane :
73-
* - Handles requests from controller. This is optional and can be configured by specifying "control.plane.listener.name".
74-
* If not configured, the controller requests are handled by the data-plane.
75-
* - The threading model is
76-
* 1 Acceptor thread that handles new connections
77-
* Acceptor has 1 Processor thread that has its own selector and read requests from the socket.
78-
* 1 Handler thread that handles requests and produces responses back to the processor thread for writing.
7972
*/
8073
class SocketServer(
8174
val config: KafkaConfig,
@@ -105,10 +98,6 @@ class SocketServer(
10598
// data-plane
10699
private[network] val dataPlaneAcceptors = new ConcurrentHashMap[EndPoint, DataPlaneAcceptor]()
107100
val dataPlaneRequestChannel = new RequestChannel(maxQueuedRequests, DataPlaneAcceptor.MetricPrefix, time, apiVersionManager.newRequestMetrics)
108-
// control-plane
109-
private[network] var controlPlaneAcceptorOpt: Option[ControlPlaneAcceptor] = None
110-
val controlPlaneRequestChannelOpt: Option[RequestChannel] = config.controlPlaneListenerName.map(_ =>
111-
new RequestChannel(20, ControlPlaneAcceptor.MetricPrefix, time, apiVersionManager.newRequestMetrics))
112101

113102
private[this] val nextProcessorId: AtomicInteger = new AtomicInteger(0)
114103
val connectionQuotas = new ConnectionQuotas(config, time, metrics)
@@ -137,17 +126,7 @@ class SocketServer(
137126
}.sum / dataPlaneProcessors.size
138127
}
139128
})
140-
if (config.requiresZookeeper) {
141-
metricsGroup.newGauge(s"${ControlPlaneAcceptor.MetricPrefix}NetworkProcessorAvgIdlePercent", () => SocketServer.this.synchronized {
142-
val controlPlaneProcessorOpt = controlPlaneAcceptorOpt.map(a => a.processors(0))
143-
val ioWaitRatioMetricName = controlPlaneProcessorOpt.map { p =>
144-
metrics.metricName("io-wait-ratio", MetricsGroup, p.metricTags)
145-
}
146-
ioWaitRatioMetricName.map { metricName =>
147-
Option(metrics.metric(metricName)).fold(0.0)(m => Math.min(m.metricValue.asInstanceOf[Double], 1.0))
148-
}.getOrElse(Double.NaN)
149-
})
150-
}
129+
151130
metricsGroup.newGauge("MemoryPoolAvailable", () => memoryPool.availableMemory)
152131
metricsGroup.newGauge("MemoryPoolUsed", () => memoryPool.size() - memoryPool.availableMemory)
153132
metricsGroup.newGauge(s"${DataPlaneAcceptor.MetricPrefix}ExpiredConnectionsKilledCount", () => SocketServer.this.synchronized {
@@ -159,17 +138,6 @@ class SocketServer(
159138
Option(metrics.metric(metricName)).fold(0.0)(m => m.metricValue.asInstanceOf[Double])
160139
}.sum
161140
})
162-
if (config.requiresZookeeper) {
163-
metricsGroup.newGauge(s"${ControlPlaneAcceptor.MetricPrefix}ExpiredConnectionsKilledCount", () => SocketServer.this.synchronized {
164-
val controlPlaneProcessorOpt = controlPlaneAcceptorOpt.map(a => a.processors(0))
165-
val expiredConnectionsKilledCountMetricNames = controlPlaneProcessorOpt.map { p =>
166-
metrics.metricName("expired-connections-killed-count", MetricsGroup, p.metricTags)
167-
}
168-
expiredConnectionsKilledCountMetricNames.map { metricName =>
169-
Option(metrics.metric(metricName)).fold(0.0)(m => m.metricValue.asInstanceOf[Double])
170-
}.getOrElse(0.0)
171-
})
172-
}
173141

174142
// Create acceptors and processors for the statically configured endpoints when the
175143
// SocketServer is constructed. Note that this just opens the ports and creates the data
@@ -178,7 +146,6 @@ class SocketServer(
178146
if (apiVersionManager.listenerType.equals(ListenerType.CONTROLLER)) {
179147
config.controllerListeners.foreach(createDataPlaneAcceptorAndProcessors)
180148
} else {
181-
config.controlPlaneListener.foreach(createControlPlaneAcceptorAndProcessor)
182149
config.dataPlaneListeners.foreach(createDataPlaneAcceptorAndProcessors)
183150
}
184151

@@ -232,16 +199,14 @@ class SocketServer(
232199
}
233200

234201
info("Enabling request processing.")
235-
controlPlaneAcceptorOpt.foreach(chainAcceptorFuture)
236202
dataPlaneAcceptors.values().forEach(chainAcceptorFuture)
237203
FutureUtils.chainFuture(CompletableFuture.allOf(authorizerFutures.values.toArray: _*),
238204
allAuthorizerFuturesComplete)
239205

240206
// Construct a future that will be completed when all Acceptors have been successfully started.
241207
// Alternately, if any of them fail to start, this future will be completed exceptionally.
242-
val allAcceptors = dataPlaneAcceptors.values().asScala.toSeq ++ controlPlaneAcceptorOpt
243208
val enableFuture = new CompletableFuture[Void]
244-
FutureUtils.chainFuture(CompletableFuture.allOf(allAcceptors.map(_.startedFuture).toArray: _*), enableFuture)
209+
FutureUtils.chainFuture(CompletableFuture.allOf(dataPlaneAcceptors.values().asScala.toArray.map(_.startedFuture): _*), enableFuture)
245210
enableFuture
246211
}
247212

@@ -251,36 +216,20 @@ class SocketServer(
251216
}
252217
val parsedConfigs = config.valuesFromThisConfigWithPrefixOverride(endpoint.listenerName.configPrefix)
253218
connectionQuotas.addListener(config, endpoint.listenerName)
254-
val isPrivilegedListener = controlPlaneRequestChannelOpt.isEmpty &&
255-
config.interBrokerListenerName == endpoint.listenerName
219+
val isPrivilegedListener = config.interBrokerListenerName == endpoint.listenerName
256220
val dataPlaneAcceptor = createDataPlaneAcceptor(endpoint, isPrivilegedListener, dataPlaneRequestChannel)
257221
config.addReconfigurable(dataPlaneAcceptor)
258222
dataPlaneAcceptor.configure(parsedConfigs)
259223
dataPlaneAcceptors.put(endpoint, dataPlaneAcceptor)
260224
info(s"Created data-plane acceptor and processors for endpoint : ${endpoint.listenerName}")
261225
}
262226

263-
private def createControlPlaneAcceptorAndProcessor(endpoint: EndPoint): Unit = synchronized {
264-
if (stopped) {
265-
throw new RuntimeException("Can't create new control plane acceptor and processor: SocketServer is stopped.")
266-
}
267-
connectionQuotas.addListener(config, endpoint.listenerName)
268-
val controlPlaneAcceptor = createControlPlaneAcceptor(endpoint, controlPlaneRequestChannelOpt.get)
269-
controlPlaneAcceptor.addProcessors(1)
270-
controlPlaneAcceptorOpt = Some(controlPlaneAcceptor)
271-
info(s"Created control-plane acceptor and processor for endpoint : ${endpoint.listenerName}")
272-
}
273-
274227
private def endpoints = config.listeners.map(l => l.listenerName -> l).toMap
275228

276229
protected def createDataPlaneAcceptor(endPoint: EndPoint, isPrivilegedListener: Boolean, requestChannel: RequestChannel): DataPlaneAcceptor = {
277230
new DataPlaneAcceptor(this, endPoint, config, nodeId, connectionQuotas, time, isPrivilegedListener, requestChannel, metrics, credentialProvider, logContext, memoryPool, apiVersionManager)
278231
}
279232

280-
private def createControlPlaneAcceptor(endPoint: EndPoint, requestChannel: RequestChannel): ControlPlaneAcceptor = {
281-
new ControlPlaneAcceptor(this, endPoint, config, nodeId, connectionQuotas, time, requestChannel, metrics, credentialProvider, logContext, memoryPool, apiVersionManager)
282-
}
283-
284233
/**
285234
* Stop processing requests and new connections.
286235
*/
@@ -289,11 +238,8 @@ class SocketServer(
289238
stopped = true
290239
info("Stopping socket server request processors")
291240
dataPlaneAcceptors.asScala.values.foreach(_.beginShutdown())
292-
controlPlaneAcceptorOpt.foreach(_.beginShutdown())
293241
dataPlaneAcceptors.asScala.values.foreach(_.close())
294-
controlPlaneAcceptorOpt.foreach(_.close())
295242
dataPlaneRequestChannel.clear()
296-
controlPlaneRequestChannelOpt.foreach(_.clear())
297243
info("Stopped socket server request processors")
298244
}
299245
}
@@ -309,7 +255,6 @@ class SocketServer(
309255
this.synchronized {
310256
stopProcessingRequests()
311257
dataPlaneRequestChannel.shutdown()
312-
controlPlaneRequestChannelOpt.foreach(_.shutdown())
313258
connectionQuotas.close()
314259
}
315260
info("Shutdown completed")
@@ -321,7 +266,7 @@ class SocketServer(
321266
if (acceptor != null) {
322267
acceptor.localPort
323268
} else {
324-
controlPlaneAcceptorOpt.map(_.localPort).getOrElse(throw new KafkaException("Could not find listenerName : " + listenerName + " in data-plane or control-plane"))
269+
throw new KafkaException("Could not find listenerName : " + listenerName + " in data-plane.")
325270
}
326271
} catch {
327272
case e: Exception =>
@@ -528,42 +473,6 @@ class DataPlaneAcceptor(socketServer: SocketServer,
528473
}
529474
}
530475

531-
object ControlPlaneAcceptor {
532-
val ThreadPrefix = "control-plane"
533-
val MetricPrefix = "ControlPlane"
534-
}
535-
536-
class ControlPlaneAcceptor(socketServer: SocketServer,
537-
endPoint: EndPoint,
538-
config: KafkaConfig,
539-
nodeId: Int,
540-
connectionQuotas: ConnectionQuotas,
541-
time: Time,
542-
requestChannel: RequestChannel,
543-
metrics: Metrics,
544-
credentialProvider: CredentialProvider,
545-
logContext: LogContext,
546-
memoryPool: MemoryPool,
547-
apiVersionManager: ApiVersionManager)
548-
extends Acceptor(socketServer,
549-
endPoint,
550-
config,
551-
nodeId,
552-
connectionQuotas,
553-
time,
554-
true,
555-
requestChannel,
556-
metrics,
557-
credentialProvider,
558-
logContext,
559-
memoryPool,
560-
apiVersionManager) {
561-
562-
override def metricPrefix(): String = ControlPlaneAcceptor.MetricPrefix
563-
override def threadPrefix(): String = ControlPlaneAcceptor.ThreadPrefix
564-
565-
}
566-
567476
/**
568477
* Thread that accepts and configures new connections. There is one of these per endpoint.
569478
*/

core/src/main/scala/kafka/server/KafkaConfig.scala

Lines changed: 0 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -489,8 +489,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
489489

490490
def interBrokerListenerName = getInterBrokerListenerNameAndSecurityProtocol._1
491491
def interBrokerSecurityProtocol = getInterBrokerListenerNameAndSecurityProtocol._2
492-
def controlPlaneListenerName = getControlPlaneListenerNameAndSecurityProtocol.map { case (listenerName, _) => listenerName }
493-
def controlPlaneSecurityProtocol = getControlPlaneListenerNameAndSecurityProtocol.map { case (_, securityProtocol) => securityProtocol }
494492
def saslMechanismInterBrokerProtocol = getString(BrokerSecurityConfigs.SASL_MECHANISM_INTER_BROKER_PROTOCOL_CONFIG)
495493
val saslInterBrokerHandshakeRequestEnable = interBrokerProtocolVersion.isSaslInterBrokerHandshakeRequestEnabled
496494

@@ -567,16 +565,9 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
567565

568566
def saslMechanismControllerProtocol: String = getString(KRaftConfigs.SASL_MECHANISM_CONTROLLER_PROTOCOL_CONFIG)
569567

570-
def controlPlaneListener: Option[EndPoint] = {
571-
controlPlaneListenerName.map { listenerName =>
572-
listeners.filter(endpoint => endpoint.listenerName.value() == listenerName.value()).head
573-
}
574-
}
575-
576568
def dataPlaneListeners: Seq[EndPoint] = {
577569
listeners.filterNot { listener =>
578570
val name = listener.listenerName.value()
579-
name.equals(getString(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG)) ||
580571
controllerListenerNames.contains(name)
581572
}
582573
}
@@ -625,19 +616,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
625616
}
626617
}
627618

628-
private def getControlPlaneListenerNameAndSecurityProtocol: Option[(ListenerName, SecurityProtocol)] = {
629-
Option(getString(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG)) match {
630-
case Some(name) =>
631-
val listenerName = ListenerName.normalised(name)
632-
val securityProtocol = effectiveListenerSecurityProtocolMap.getOrElse(listenerName,
633-
throw new ConfigException(s"Listener with ${listenerName.value} defined in " +
634-
s"${SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG} not found in ${SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG}."))
635-
Some(listenerName, securityProtocol)
636-
637-
case None => None
638-
}
639-
}
640-
641619
private def getSecurityProtocol(protocolName: String, configName: String): SecurityProtocol = {
642620
try SecurityProtocol.forName(protocolName)
643621
catch {
@@ -721,10 +699,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
721699
}
722700
}
723701

724-
def validateControlPlaneListenerEmptyForKRaft(): Unit = {
725-
require(controlPlaneListenerName.isEmpty,
726-
s"${SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG} is not supported in KRaft mode.")
727-
}
728702
def validateControllerQuorumVotersMustContainNodeIdForKRaftController(): Unit = {
729703
require(voterIds.isEmpty || voterIds.contains(nodeId),
730704
s"If ${KRaftConfigs.PROCESS_ROLES_CONFIG} contains the 'controller' role, the node id $nodeId must be included in the set of voters ${QuorumConfig.QUORUM_VOTERS_CONFIG}=${voterIds.asScala.toSet}")
@@ -746,7 +720,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
746720
if (processRoles == Set(ProcessRole.BrokerRole)) {
747721
// KRaft broker-only
748722
validateQuorumVotersAndQuorumBootstrapServerForKRaft()
749-
validateControlPlaneListenerEmptyForKRaft()
750723
// nodeId must not appear in controller.quorum.voters
751724
require(!voterIds.contains(nodeId),
752725
s"If ${KRaftConfigs.PROCESS_ROLES_CONFIG} contains just the 'broker' role, the node id $nodeId must not be included in the set of voters ${QuorumConfig.QUORUM_VOTERS_CONFIG}=${voterIds.asScala.toSet}")
@@ -771,7 +744,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
771744
} else if (processRoles == Set(ProcessRole.ControllerRole)) {
772745
// KRaft controller-only
773746
validateQuorumVotersAndQuorumBootstrapServerForKRaft()
774-
validateControlPlaneListenerEmptyForKRaft()
775747
// listeners should only contain listeners also enumerated in the controller listener
776748
require(
777749
effectiveAdvertisedControllerListeners.size == listeners.size,
@@ -790,7 +762,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
790762
} else if (isKRaftCombinedMode) {
791763
// KRaft combined broker and controller
792764
validateQuorumVotersAndQuorumBootstrapServerForKRaft()
793-
validateControlPlaneListenerEmptyForKRaft()
794765
validateControllerQuorumVotersMustContainNodeIdForKRaftController()
795766
validateAdvertisedControllerListenersNonEmptyForKRaftController()
796767
validateControllerListenerNamesMustAppearInListenersForKRaftController()
@@ -822,17 +793,6 @@ class KafkaConfig private(doLog: Boolean, val props: util.Map[_, _])
822793
s"${SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG} cannot use the nonroutable meta-address 0.0.0.0. "+
823794
s"Use a routable IP address.")
824795

825-
// validate control.plane.listener.name config
826-
if (controlPlaneListenerName.isDefined) {
827-
require(advertisedBrokerListenerNames.contains(controlPlaneListenerName.get),
828-
s"${SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG} must be a listener name defined in ${SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG}. " +
829-
s"The valid options based on currently configured listeners are ${advertisedBrokerListenerNames.map(_.value).mkString(",")}")
830-
// controlPlaneListenerName should be different from interBrokerListenerName
831-
require(!controlPlaneListenerName.get.value().equals(interBrokerListenerName.value()),
832-
s"${SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG}, when defined, should have a different value from the inter broker listener name. " +
833-
s"Currently they both have the value ${controlPlaneListenerName.get}")
834-
}
835-
836796
if (groupCoordinatorConfig.offsetTopicCompressionType == CompressionType.ZSTD)
837797
require(interBrokerProtocolVersion.highestSupportedRecordVersion().value >= IBP_2_1_IV0.highestSupportedRecordVersion().value,
838798
"offsets.topic.compression.codec zstd can only be used when inter.broker.protocol.version " +

core/src/main/scala/kafka/server/NodeToControllerChannelManager.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -58,8 +58,8 @@ class MetadataCacheControllerNodeProvider(
5858
val quorumControllerNodeProvider: () => Option[ControllerInformation]
5959
) extends ControllerNodeProvider {
6060

61-
private val zkControllerListenerName = config.controlPlaneListenerName.getOrElse(config.interBrokerListenerName)
62-
private val zkControllerSecurityProtocol = config.controlPlaneSecurityProtocol.getOrElse(config.interBrokerSecurityProtocol)
61+
private val zkControllerListenerName = config.interBrokerListenerName
62+
private val zkControllerSecurityProtocol = config.interBrokerSecurityProtocol
6363
private val zkControllerSaslMechanism = config.saslMechanismInterBrokerProtocol
6464

6565
val emptyZkControllerInfo = ControllerInformation(

core/src/test/scala/unit/kafka/network/SocketServerTest.scala

Lines changed: 0 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -104,7 +104,6 @@ class SocketServerTest {
104104
logLevelToRestore = kafkaLogger.getLevel
105105
Configurator.setLevel(kafkaLogger.getName, Level.TRACE)
106106

107-
assertTrue(server.controlPlaneRequestChannelOpt.isEmpty)
108107
}
109108

110109
@AfterEach
@@ -1542,8 +1541,6 @@ class SocketServerTest {
15421541
val testableServer = new TestableSocketServer(time = time)
15431542
testableServer.enableRequestProcessing(Map.empty).get(1, TimeUnit.MINUTES)
15441543

1545-
assertTrue(testableServer.controlPlaneRequestChannelOpt.isEmpty)
1546-
15471544
val proxyServer = new ProxyServer(testableServer)
15481545
try {
15491546
val testableSelector = testableServer.testableSelector

core/src/test/scala/unit/kafka/server/KafkaConfigTest.scala

Lines changed: 0 additions & 17 deletions
Original file line numberDiff line numberDiff line change
@@ -275,23 +275,6 @@ class KafkaConfigTest {
275275
assertEquals(SecurityProtocol.SASL_SSL, controllerEndpoint.securityProtocol)
276276
}
277277

278-
@Test
279-
def testControlPlaneListenerNameNotAllowedWithKRaft(): Unit = {
280-
val props = new Properties()
281-
props.setProperty(KRaftConfigs.PROCESS_ROLES_CONFIG, "broker,controller")
282-
props.setProperty(SocketServerConfigs.LISTENERS_CONFIG, "PLAINTEXT://localhost:9092,SSL://localhost:9093")
283-
props.setProperty(KRaftConfigs.CONTROLLER_LISTENER_NAMES_CONFIG, "SSL")
284-
props.setProperty(KRaftConfigs.NODE_ID_CONFIG, "2")
285-
props.setProperty(QuorumConfig.QUORUM_VOTERS_CONFIG, "2@localhost:9093")
286-
props.setProperty(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG, "SSL")
287-
288-
assertFalse(isValidKafkaConfig(props))
289-
assertBadConfigContainingMessage(props, "control.plane.listener.name is not supported in KRaft mode.")
290-
291-
props.remove(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG)
292-
KafkaConfig.fromProps(props)
293-
}
294-
295278
@Test
296279
def testControllerListenerDefinedForKRaftController(): Unit = {
297280
val props = new Properties()

core/src/test/scala/unit/kafka/server/SaslApiVersionsRequestTest.scala

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,6 @@ object SaslApiVersionsRequestTest {
5151

5252
// Configure control plane listener to make sure we have separate listeners for testing.
5353
val serverProperties = new java.util.HashMap[String, String]()
54-
serverProperties.put(SocketServerConfigs.CONTROL_PLANE_LISTENER_NAME_CONFIG, controlPlaneListenerName)
5554
serverProperties.put(SocketServerConfigs.LISTENER_SECURITY_PROTOCOL_MAP_CONFIG, s"$controlPlaneListenerName:$securityProtocol,$securityProtocol:$securityProtocol")
5655
serverProperties.put("listeners", s"$securityProtocol://localhost:0,$controlPlaneListenerName://localhost:0")
5756
serverProperties.put(SocketServerConfigs.ADVERTISED_LISTENERS_CONFIG, s"$securityProtocol://localhost:0,$controlPlaneListenerName://localhost:0")

0 commit comments

Comments
 (0)