|
17 | 17 |
|
18 | 18 | package org.apache.spark
|
19 | 19 |
|
| 20 | +import java.util.{Map => JMap} |
20 | 21 | import java.util.concurrent.ConcurrentHashMap
|
21 | 22 |
|
22 | 23 | import scala.collection.JavaConverters._
|
23 | 24 | import scala.collection.mutable.LinkedHashSet
|
24 | 25 |
|
25 | 26 | import org.apache.avro.{Schema, SchemaNormalization}
|
26 | 27 |
|
| 28 | +import org.apache.spark.deploy.history.config._ |
27 | 29 | import org.apache.spark.internal.Logging
|
28 | 30 | import org.apache.spark.internal.config._
|
29 | 31 | import org.apache.spark.serializer.KryoSerializer
|
@@ -370,7 +372,7 @@ class SparkConf(loadDefaults: Boolean) extends Cloneable with Logging with Seria
|
370 | 372 |
|
371 | 373 | /** Get a parameter as an Option */
|
372 | 374 | def getOption(key: String): Option[String] = {
|
373 |
| - Option(settings.get(key)).orElse(getDeprecatedConfig(key, this)) |
| 375 | + Option(settings.get(key)).orElse(getDeprecatedConfig(key, settings)) |
374 | 376 | }
|
375 | 377 |
|
376 | 378 | /** Get an optional value, applying variable substitution. */
|
@@ -622,7 +624,7 @@ private[spark] object SparkConf extends Logging {
|
622 | 624 | AlternateConfig("spark.history.updateInterval", "1.3")),
|
623 | 625 | "spark.history.fs.cleaner.interval" -> Seq(
|
624 | 626 | AlternateConfig("spark.history.fs.cleaner.interval.seconds", "1.4")),
|
625 |
| - "spark.history.fs.cleaner.maxAge" -> Seq( |
| 627 | + MAX_LOG_AGE_S.key -> Seq( |
626 | 628 | AlternateConfig("spark.history.fs.cleaner.maxAge.seconds", "1.4")),
|
627 | 629 | "spark.yarn.am.waitTime" -> Seq(
|
628 | 630 | AlternateConfig("spark.yarn.applicationMaster.waitTries", "1.3",
|
@@ -663,8 +665,10 @@ private[spark] object SparkConf extends Logging {
|
663 | 665 | AlternateConfig("spark.yarn.jar", "2.0")),
|
664 | 666 | "spark.yarn.access.hadoopFileSystems" -> Seq(
|
665 | 667 | AlternateConfig("spark.yarn.access.namenodes", "2.2")),
|
666 |
| - "spark.maxRemoteBlockSizeFetchToMem" -> Seq( |
667 |
| - AlternateConfig("spark.reducer.maxReqSizeShuffleToMem", "2.3")) |
| 668 | + MAX_REMOTE_BLOCK_SIZE_FETCH_TO_MEM.key -> Seq( |
| 669 | + AlternateConfig("spark.reducer.maxReqSizeShuffleToMem", "2.3")), |
| 670 | + LISTENER_BUS_EVENT_QUEUE_CAPACITY.key -> Seq( |
| 671 | + AlternateConfig("spark.scheduler.listenerbus.eventqueue.size", "2.3")) |
668 | 672 | )
|
669 | 673 |
|
670 | 674 | /**
|
@@ -704,9 +708,9 @@ private[spark] object SparkConf extends Logging {
|
704 | 708 | * Looks for available deprecated keys for the given config option, and return the first
|
705 | 709 | * value available.
|
706 | 710 | */
|
707 |
| - def getDeprecatedConfig(key: String, conf: SparkConf): Option[String] = { |
| 711 | + def getDeprecatedConfig(key: String, conf: JMap[String, String]): Option[String] = { |
708 | 712 | configsWithAlternatives.get(key).flatMap { alts =>
|
709 |
| - alts.collectFirst { case alt if conf.contains(alt.key) => |
| 713 | + alts.collectFirst { case alt if conf.containsKey(alt.key) => |
710 | 714 | val value = conf.get(alt.key)
|
711 | 715 | if (alt.translation != null) alt.translation(value) else value
|
712 | 716 | }
|
|
0 commit comments