Skip to content

Commit 8204dc1

Browse files
wangjiaochunsrowen
authored andcommitted
[SPARK-27141][YARN] Use ConfigEntry for hardcoded configs for Yarn
## What changes were proposed in this pull request? There is some hardcode configs in code, I think it best to modify。 ## How was this patch tested? Existing tests Closes apache#24103 from wangjiaochun/yarnHardCode. Authored-by: 10087686 <[email protected]> Signed-off-by: Sean Owen <[email protected]>
1 parent 174531c commit 8204dc1

File tree

6 files changed

+25
-22
lines changed

6 files changed

+25
-22
lines changed

resource-managers/yarn/src/main/scala/org/apache/spark/deploy/yarn/ApplicationMaster.scala

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -296,8 +296,8 @@ private[spark] class ApplicationMaster(
296296
Option(appAttemptId.getApplicationId.toString), None).setCurrentContext()
297297

298298
val driverRef = clientRpcEnv.setupEndpointRef(
299-
RpcAddress(sparkConf.get("spark.driver.host"),
300-
sparkConf.get("spark.driver.port").toInt),
299+
RpcAddress(sparkConf.get(DRIVER_HOST_ADDRESS),
300+
sparkConf.get(DRIVER_PORT)),
301301
YarnSchedulerBackend.ENDPOINT_NAME)
302302
// The client-mode AM doesn't listen for incoming connections, so report an invalid port.
303303
registerAM(Utils.localHostName, -1, sparkConf,

resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/ApplicationMasterSuite.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ package org.apache.spark.deploy.yarn
2020
import org.apache.hadoop.yarn.conf.YarnConfiguration
2121

2222
import org.apache.spark.{SparkConf, SparkFunSuite}
23+
import org.apache.spark.deploy.yarn.config._
2324

2425
class ApplicationMasterSuite extends SparkFunSuite {
2526

@@ -28,7 +29,7 @@ class ApplicationMasterSuite extends SparkFunSuite {
2829
val port = 18080
2930
val sparkConf = new SparkConf()
3031

31-
sparkConf.set("spark.yarn.historyServer.address",
32+
sparkConf.set(HISTORY_SERVER_ADDRESS,
3233
"http://${hadoopconf-yarn.resourcemanager.hostname}:${spark.history.ui.port}")
3334
val yarnConf = new YarnConfiguration()
3435
yarnConf.set("yarn.resourcemanager.hostname", host)

resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/BaseYarnClusterSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ abstract class BaseYarnClusterSuite
149149
launcher.setSparkHome(sys.props("spark.test.home"))
150150
.setMaster("yarn")
151151
.setDeployMode(deployMode)
152-
.setConf("spark.executor.instances", "1")
152+
.setConf(EXECUTOR_INSTANCES.key, "1")
153153
.setPropertiesFile(propsFile)
154154
.addAppArgs(appArgs.toArray: _*)
155155

resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnAllocatorSuite.scala

Lines changed: 9 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -33,6 +33,7 @@ import org.scalatest.{BeforeAndAfterEach, Matchers}
3333
import org.apache.spark.{SecurityManager, SparkConf, SparkFunSuite}
3434
import org.apache.spark.deploy.yarn.YarnSparkHadoopUtil._
3535
import org.apache.spark.deploy.yarn.config._
36+
import org.apache.spark.internal.config._
3637
import org.apache.spark.rpc.RpcEndpointRef
3738
import org.apache.spark.scheduler.SplitInfo
3839
import org.apache.spark.util.ManualClock
@@ -48,8 +49,8 @@ class MockResolver extends SparkRackResolver {
4849
class YarnAllocatorSuite extends SparkFunSuite with Matchers with BeforeAndAfterEach {
4950
val conf = new YarnConfiguration()
5051
val sparkConf = new SparkConf()
51-
sparkConf.set("spark.driver.host", "localhost")
52-
sparkConf.set("spark.driver.port", "4040")
52+
sparkConf.set(DRIVER_HOST_ADDRESS, "localhost")
53+
sparkConf.set(DRIVER_PORT, 4040)
5354
sparkConf.set(SPARK_JARS, Seq("notarealjar.jar"))
5455
sparkConf.set("spark.yarn.launchContainers", "false")
5556

@@ -95,9 +96,9 @@ class YarnAllocatorSuite extends SparkFunSuite with Matchers with BeforeAndAfter
9596
"--class", "SomeClass")
9697
val sparkConfClone = sparkConf.clone()
9798
sparkConfClone
98-
.set("spark.executor.instances", maxExecutors.toString)
99-
.set("spark.executor.cores", "5")
100-
.set("spark.executor.memory", "2048")
99+
.set(EXECUTOR_INSTANCES, maxExecutors)
100+
.set(EXECUTOR_CORES, 5)
101+
.set(EXECUTOR_MEMORY, 2048L)
101102

102103
for ((name, value) <- additionalConfigs) {
103104
sparkConfClone.set(name, value)
@@ -394,7 +395,7 @@ class YarnAllocatorSuite extends SparkFunSuite with Matchers with BeforeAndAfter
394395
}
395396

396397
test("window based failure executor counting") {
397-
sparkConf.set("spark.yarn.executor.failuresValidityInterval", "100s")
398+
sparkConf.set(EXECUTOR_ATTEMPT_FAILURE_VALIDITY_INTERVAL_MS, 100 * 1000L)
398399
val handler = createAllocator(4)
399400

400401
handler.updateResourceRequests()
@@ -444,8 +445,8 @@ class YarnAllocatorSuite extends SparkFunSuite with Matchers with BeforeAndAfter
444445
maxExecutors,
445446
rmClientSpy,
446447
Map(
447-
"spark.yarn.blacklist.executor.launch.blacklisting.enabled" -> "true",
448-
"spark.blacklist.application.maxFailedExecutorsPerNode" -> "0"))
448+
YARN_EXECUTOR_LAUNCH_BLACKLIST_ENABLED.key -> "true",
449+
MAX_FAILED_EXEC_PER_NODE.key -> "0"))
449450
handler.updateResourceRequests()
450451

451452
val hosts = (0 until maxExecutors).map(i => s"host$i")

resource-managers/yarn/src/test/scala/org/apache/spark/deploy/yarn/YarnClusterSuite.scala

Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -98,10 +98,10 @@ class YarnClusterSuite extends BaseYarnClusterSuite {
9898
test("run Spark in yarn-client mode with different configurations, ensuring redaction") {
9999
testBasicYarnApp(true,
100100
Map(
101-
"spark.driver.memory" -> "512m",
102-
"spark.executor.cores" -> "1",
103-
"spark.executor.memory" -> "512m",
104-
"spark.executor.instances" -> "2",
101+
DRIVER_MEMORY.key -> "512m",
102+
EXECUTOR_CORES.key -> "1",
103+
EXECUTOR_MEMORY.key -> "512m",
104+
EXECUTOR_INSTANCES.key -> "2",
105105
// Sending some sensitive information, which we'll make sure gets redacted
106106
"spark.executorEnv.HADOOP_CREDSTORE_PASSWORD" -> YarnClusterDriver.SECRET_PASSWORD,
107107
"spark.yarn.appMasterEnv.HADOOP_CREDSTORE_PASSWORD" -> YarnClusterDriver.SECRET_PASSWORD
@@ -111,11 +111,11 @@ class YarnClusterSuite extends BaseYarnClusterSuite {
111111
test("run Spark in yarn-cluster mode with different configurations, ensuring redaction") {
112112
testBasicYarnApp(false,
113113
Map(
114-
"spark.driver.memory" -> "512m",
115-
"spark.driver.cores" -> "1",
116-
"spark.executor.cores" -> "1",
117-
"spark.executor.memory" -> "512m",
118-
"spark.executor.instances" -> "2",
114+
DRIVER_MEMORY.key -> "512m",
115+
DRIVER_CORES.key -> "1",
116+
EXECUTOR_CORES.key -> "1",
117+
EXECUTOR_MEMORY.key -> "512m",
118+
EXECUTOR_INSTANCES.key -> "2",
119119
// Sending some sensitive information, which we'll make sure gets redacted
120120
"spark.executorEnv.HADOOP_CREDSTORE_PASSWORD" -> YarnClusterDriver.SECRET_PASSWORD,
121121
"spark.yarn.appMasterEnv.HADOOP_CREDSTORE_PASSWORD" -> YarnClusterDriver.SECRET_PASSWORD

resource-managers/yarn/src/test/scala/org/apache/spark/network/yarn/YarnShuffleServiceSuite.scala

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,6 +36,7 @@ import org.scalatest.concurrent.Eventually._
3636

3737
import org.apache.spark.SecurityManager
3838
import org.apache.spark.SparkFunSuite
39+
import org.apache.spark.internal.config._
3940
import org.apache.spark.network.shuffle.ShuffleTestAccessor
4041
import org.apache.spark.network.shuffle.protocol.ExecutorShuffleInfo
4142
import org.apache.spark.util.Utils
@@ -52,7 +53,7 @@ class YarnShuffleServiceSuite extends SparkFunSuite with Matchers with BeforeAnd
5253
yarnConfig.set(YarnConfiguration.NM_AUX_SERVICES, "spark_shuffle")
5354
yarnConfig.set(YarnConfiguration.NM_AUX_SERVICE_FMT.format("spark_shuffle"),
5455
classOf[YarnShuffleService].getCanonicalName)
55-
yarnConfig.setInt("spark.shuffle.service.port", 0)
56+
yarnConfig.setInt(SHUFFLE_SERVICE_PORT.key, 0)
5657
yarnConfig.setBoolean(YarnShuffleService.STOP_ON_FAILURE_KEY, true)
5758
val localDir = Utils.createTempDir()
5859
yarnConfig.set(YarnConfiguration.NM_LOCAL_DIRS, localDir.getAbsolutePath)

0 commit comments

Comments
 (0)