Skip to content
This repository was archived by the owner on Jan 9, 2020. It is now read-only.

Commit a244db3

Browse files
committed
Use a pre-installed Minikube instance for integration tests.
This changes the integration test workflow to assume that Minikube is already installed on the testing machine. Previously, the integration tests downloaded Minikube in the build cycle, and started/stopped the Minikube VM on every test execution. However, this made it such that multiple integration tests cannot run concurrently. This commit allows multiple tests to share a single Minikube instance, and also requires users that run integration tests to have Minikube pre-installed. If the minikube instance has enough resources, multiple tests can run against it at the same time. Each test needs to use its own set of Docker images, so the docker image builder now tags images uniquely on every test execution.
1 parent 3c7dec5 commit a244db3

File tree

12 files changed

+140
-161
lines changed

12 files changed

+140
-161
lines changed

resource-managers/kubernetes/README.md

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -42,10 +42,12 @@ Below is a list of the submodules for this cluster manager and what they do.
4242

4343
# Running the Kubernetes Integration Tests
4444

45-
Note that the integration test framework is currently being heavily revised and is subject to change.
46-
4745
Note that currently the integration tests only run with Java 8.
4846

47+
Integration tests firstly require installing [Minikube](https://kubernetes.io/docs/getting-started-guides/minikube/) on
48+
your machine. Refer to the Minikube documentation for instructions on how to install it. It is recommended to allocate at
49+
least 88 CPUs and 8GB of memory to the Minikube cluster.
50+
4951
Running any of the integration tests requires including `kubernetes-integration-tests` profile in the build command. In
5052
order to prepare the environment for running the integration tests, the `pre-integration-test` step must be run in Maven
5153
on the `resource-managers/kubernetes/integration-tests` module:

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/k8s/config.scala

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -502,6 +502,9 @@ package object config extends Logging {
502502

503503
private[spark] val KUBERNETES_NODE_SELECTOR_PREFIX = "spark.kubernetes.node.selector."
504504

505+
private[spark] val KUBERNETES_TEST_DOCKER_TAG_SYSTEM_PROPERTY =
506+
"spark.kubernetes.test.imageDockerTag"
507+
505508
private[spark] def resolveK8sMaster(rawMasterString: String): String = {
506509
if (!rawMasterString.startsWith("k8s://")) {
507510
throw new IllegalArgumentException("Master URL should start with k8s:// in Kubernetes mode.")

resource-managers/kubernetes/integration-tests/pom.xml

Lines changed: 0 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -324,37 +324,6 @@
324324
</execution>
325325
</executions>
326326
</plugin>
327-
<plugin>
328-
<groupId>com.googlecode.maven-download-plugin</groupId>
329-
<artifactId>download-maven-plugin</artifactId>
330-
<version>1.3.0</version>
331-
<executions>
332-
<execution>
333-
<id>download-minikube-linux</id>
334-
<phase>pre-integration-test</phase>
335-
<goals>
336-
<goal>wget</goal>
337-
</goals>
338-
<configuration>
339-
<url>https://storage.googleapis.com/minikube/releases/v0.16.0/minikube-linux-amd64</url>
340-
<outputDirectory>${project.build.directory}/minikube-bin/linux-amd64</outputDirectory>
341-
<outputFileName>minikube</outputFileName>
342-
</configuration>
343-
</execution>
344-
<execution>
345-
<id>download-minikube-darwin</id>
346-
<phase>pre-integration-test</phase>
347-
<goals>
348-
<goal>wget</goal>
349-
</goals>
350-
<configuration>
351-
<url>https://storage.googleapis.com/minikube/releases/v0.16.0/minikube-darwin-amd64</url>
352-
<outputDirectory>${project.build.directory}/minikube-bin/darwin-amd64</outputDirectory>
353-
<outputFileName>minikube</outputFileName>
354-
</configuration>
355-
</execution>
356-
</executions>
357-
</plugin>
358327
<plugin>
359328
<!-- Triggers scalatest plugin in the integration-test phase instead of
360329
the test phase, so that test jobs are copied over beforehand.

resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/KubernetesSuite.scala

Lines changed: 33 additions & 29 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ import org.apache.spark.{SparkConf, SparkFunSuite, SSLOptions}
3232
import org.apache.spark.deploy.k8s.SSLUtils
3333
import org.apache.spark.deploy.k8s.config._
3434
import org.apache.spark.deploy.k8s.integrationtest.backend.IntegrationTestBackendFactory
35-
import org.apache.spark.deploy.k8s.integrationtest.backend.minikube.Minikube
35+
import org.apache.spark.deploy.k8s.integrationtest.backend.minikube.{Minikube, MinikubeTestBackend}
3636
import org.apache.spark.deploy.k8s.integrationtest.constants.MINIKUBE_TEST_BACKEND
3737
import org.apache.spark.deploy.k8s.submit.{Client, ClientArguments, JavaMainAppResource, KeyAndCertPem, MainAppResource, PythonMainAppResource}
3838
import org.apache.spark.launcher.SparkLauncher
@@ -51,9 +51,13 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
5151
testBackend.initialize()
5252
kubernetesTestComponents = new KubernetesTestComponents(testBackend.getKubernetesClient)
5353
resourceStagingServerLauncher = new ResourceStagingServerLauncher(
54-
kubernetesTestComponents.kubernetesClient.inNamespace(kubernetesTestComponents.namespace))
54+
kubernetesTestComponents
55+
.kubernetesClient
56+
.inNamespace(kubernetesTestComponents.namespace), testBackend.dockerImageTag())
5557
staticAssetServerLauncher = new StaticAssetServerLauncher(
56-
kubernetesTestComponents.kubernetesClient.inNamespace(kubernetesTestComponents.namespace))
58+
kubernetesTestComponents
59+
.kubernetesClient
60+
.inNamespace(kubernetesTestComponents.namespace), testBackend.dockerImageTag())
5761
}
5862

5963
override def afterAll(): Unit = {
@@ -62,8 +66,9 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
6266

6367
before {
6468
sparkConf = kubernetesTestComponents.newSparkConf()
65-
.set(INIT_CONTAINER_DOCKER_IMAGE, s"spark-init:latest")
66-
.set(DRIVER_DOCKER_IMAGE, s"spark-driver:latest")
69+
.set(INIT_CONTAINER_DOCKER_IMAGE, tagImage("spark-init"))
70+
.set(DRIVER_DOCKER_IMAGE, tagImage("spark-driver"))
71+
.set(EXECUTOR_DOCKER_IMAGE, tagImage("spark-executor"))
6772
.set(s"${KUBERNETES_DRIVER_LABEL_PREFIX}spark-app-locator", APP_LOCATOR_LABEL)
6873
kubernetesTestComponents.createNamespace()
6974
}
@@ -73,14 +78,13 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
7378
}
7479

7580
test("Run PySpark Job on file from SUBMITTER with --py-files") {
76-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
81+
assume(testBackend == MinikubeTestBackend)
7782

7883
launchStagingServer(SSLOptions(), None)
7984
sparkConf
80-
.set(DRIVER_DOCKER_IMAGE,
81-
System.getProperty("spark.docker.test.driverImage", "spark-driver-py:latest"))
82-
.set(EXECUTOR_DOCKER_IMAGE,
83-
System.getProperty("spark.docker.test.executorImage", "spark-executor-py:latest"))
85+
.set(DRIVER_DOCKER_IMAGE, tagImage("spark-driver-py"))
86+
.set(EXECUTOR_DOCKER_IMAGE, tagImage("spark-executor-py"))
87+
8488

8589
runPySparkPiAndVerifyCompletion(
8690
PYSPARK_PI_SUBMITTER_LOCAL_FILE_LOCATION,
@@ -89,27 +93,25 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
8993
}
9094

9195
test("Run PySpark Job on file from CONTAINER with spark.jar defined") {
92-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
96+
assume(testBackend == MinikubeTestBackend)
9397

9498
sparkConf.setJars(Seq(CONTAINER_LOCAL_HELPER_JAR_PATH))
9599
sparkConf
96-
.set(DRIVER_DOCKER_IMAGE,
97-
System.getProperty("spark.docker.test.driverImage", "spark-driver-py:latest"))
98-
.set(EXECUTOR_DOCKER_IMAGE,
99-
System.getProperty("spark.docker.test.executorImage", "spark-executor-py:latest"))
100+
.set(DRIVER_DOCKER_IMAGE, tagImage("spark-driver-py"))
101+
.set(EXECUTOR_DOCKER_IMAGE, tagImage("spark-executor-py"))
100102

101103
runPySparkPiAndVerifyCompletion(PYSPARK_PI_CONTAINER_LOCAL_FILE_LOCATION, Seq.empty[String])
102104
}
103105

104106
test("Simple submission test with the resource staging server.") {
105-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
107+
assume(testBackend == MinikubeTestBackend)
106108

107109
launchStagingServer(SSLOptions(), None)
108110
runSparkPiAndVerifyCompletion(SUBMITTER_LOCAL_MAIN_APP_RESOURCE)
109111
}
110112

111113
test("Enable SSL on the resource staging server") {
112-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
114+
assume(testBackend == MinikubeTestBackend)
113115

114116
val keyStoreAndTrustStore = SSLUtils.generateKeyStoreTrustStorePair(
115117
ipAddress = Minikube.getMinikubeIp,
@@ -136,14 +138,14 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
136138
}
137139

138140
test("Use container-local resources without the resource staging server") {
139-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
141+
assume(testBackend == MinikubeTestBackend)
140142

141143
sparkConf.setJars(Seq(CONTAINER_LOCAL_HELPER_JAR_PATH))
142144
runSparkPiAndVerifyCompletion(CONTAINER_LOCAL_MAIN_APP_RESOURCE)
143145
}
144146

145147
test("Dynamic executor scaling basic test") {
146-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
148+
assume(testBackend == MinikubeTestBackend)
147149

148150
launchStagingServer(SSLOptions(), None)
149151
createShuffleServiceDaemonSet()
@@ -163,7 +165,7 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
163165
}
164166

165167
test("Use remote resources without the resource staging server.") {
166-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
168+
assume(testBackend == MinikubeTestBackend)
167169
val assetServerUri = staticAssetServerLauncher.launchStaticAssetServer()
168170
sparkConf.setJars(Seq(
169171
s"$assetServerUri/${EXAMPLES_JAR_FILE.getName}",
@@ -173,7 +175,7 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
173175
}
174176

175177
test("Mix remote resources with submitted ones.") {
176-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
178+
assume(testBackend == MinikubeTestBackend)
177179
launchStagingServer(SSLOptions(), None)
178180
val assetServerUri = staticAssetServerLauncher.launchStaticAssetServer()
179181
sparkConf.setJars(Seq(
@@ -183,7 +185,7 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
183185
}
184186

185187
test("Use key and certificate PEM files for TLS.") {
186-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
188+
assume(testBackend == MinikubeTestBackend)
187189
val keyAndCertificate = SSLUtils.generateKeyCertPemPair(Minikube.getMinikubeIp)
188190
launchStagingServer(
189191
SSLOptions(enabled = true),
@@ -195,7 +197,7 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
195197
}
196198

197199
test("Use client key and client cert file when requesting executors") {
198-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
200+
assume(testBackend == MinikubeTestBackend)
199201
sparkConf.setJars(Seq(
200202
CONTAINER_LOCAL_MAIN_APP_RESOURCE,
201203
CONTAINER_LOCAL_HELPER_JAR_PATH))
@@ -212,7 +214,7 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
212214
}
213215

214216
test("Added files should be placed in the driver's working directory.") {
215-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
217+
assume(testBackend == MinikubeTestBackend)
216218
launchStagingServer(SSLOptions(), None)
217219
val testExistenceFileTempDir = Utils.createTempDir(namePrefix = "test-existence-file-temp-dir")
218220
val testExistenceFile = new File(testExistenceFileTempDir, "input.txt")
@@ -230,7 +232,7 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
230232
}
231233

232234
test("Setting JVM options on the driver and executors with spaces.") {
233-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
235+
assume(testBackend == MinikubeTestBackend)
234236
launchStagingServer(SSLOptions(), None)
235237
val driverJvmOptionsFile = storeJvmOptionsInTempFile(
236238
Map("simpleDriverConf" -> "simpleDriverConfValue",
@@ -260,7 +262,7 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
260262
}
261263

262264
test("Submit small local files without the resource staging server.") {
263-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
265+
assume(testBackend == MinikubeTestBackend)
264266
sparkConf.setJars(Seq(CONTAINER_LOCAL_HELPER_JAR_PATH))
265267
val testExistenceFileTempDir = Utils.createTempDir(namePrefix = "test-existence-file-temp-dir")
266268
val testExistenceFile = new File(testExistenceFileTempDir, "input.txt")
@@ -278,15 +280,15 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
278280
}
279281

280282
test("Use a very long application name.") {
281-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
283+
assume(testBackend == MinikubeTestBackend)
282284

283285
sparkConf.setJars(Seq(CONTAINER_LOCAL_HELPER_JAR_PATH)).setAppName("long" * 40)
284286
runSparkPiAndVerifyCompletion(CONTAINER_LOCAL_MAIN_APP_RESOURCE)
285287
}
286288

287289
private def launchStagingServer(
288290
resourceStagingServerSslOptions: SSLOptions, keyAndCertPem: Option[KeyAndCertPem]): Unit = {
289-
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
291+
assume(testBackend == MinikubeTestBackend)
290292

291293
val resourceStagingServerPort = resourceStagingServerLauncher.launchStagingServer(
292294
resourceStagingServerSslOptions, keyAndCertPem)
@@ -368,7 +370,7 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
368370
.endVolume()
369371
.addNewContainer()
370372
.withName("shuffle")
371-
.withImage("spark-shuffle:latest")
373+
.withImage(s"spark-shuffle:${testBackend.dockerImageTag()}")
372374
.withImagePullPolicy("IfNotPresent")
373375
.addNewVolumeMount()
374376
.withName("shuffle-dir")
@@ -404,6 +406,8 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
404406
}
405407
propertiesFile
406408
}
409+
410+
private def tagImage(image: String): String = s"$image:${testBackend.dockerImageTag()}"
407411
}
408412

409413
private[spark] object KubernetesSuite {

resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/ResourceStagingServerLauncher.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,8 @@ import org.apache.spark.util.Utils
3232
/**
3333
* Launches a pod that runs the resource staging server, exposing it over a NodePort.
3434
*/
35-
private[spark] class ResourceStagingServerLauncher(kubernetesClient: KubernetesClient) {
35+
private[spark] class ResourceStagingServerLauncher(
36+
kubernetesClient: KubernetesClient, dockerImageTag: String) {
3637

3738
private val SECRETS_ROOT_DIR = "/mnt/secrets/spark-staging"
3839
private val KEYSTORE_SECRET_KEY = "keyStore"
@@ -123,7 +124,7 @@ private[spark] class ResourceStagingServerLauncher(kubernetesClient: KubernetesC
123124
.endVolume()
124125
.addNewContainer()
125126
.withName("staging-server-container")
126-
.withImage("spark-resource-staging-server:latest")
127+
.withImage(s"spark-resource-staging-server:$dockerImageTag")
127128
.withImagePullPolicy("IfNotPresent")
128129
.withNewReadinessProbe()
129130
.withHttpGet(probePingHttpGet)

resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/StaticAssetServerLauncher.scala

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,7 +25,8 @@ import org.apache.spark.util.Utils
2525
* Launches a simple HTTP server which provides jars that can be downloaded by Spark applications
2626
* in integration tests.
2727
*/
28-
private[spark] class StaticAssetServerLauncher(kubernetesClient: KubernetesClient) {
28+
private[spark] class StaticAssetServerLauncher(
29+
kubernetesClient: KubernetesClient, dockerImageTag: String) {
2930

3031
// Returns the HTTP Base URI of the server.
3132
def launchStaticAssetServer(): String = {
@@ -46,7 +47,7 @@ private[spark] class StaticAssetServerLauncher(kubernetesClient: KubernetesClien
4647
.withNewSpec()
4748
.addNewContainer()
4849
.withName("static-asset-server-container")
49-
.withImage("spark-integration-test-asset-server:latest")
50+
.withImage(s"spark-integration-test-asset-server:$dockerImageTag")
5051
.withImagePullPolicy("IfNotPresent")
5152
.withNewReadinessProbe()
5253
.withHttpGet(probePingHttpGet)

resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/backend/GCE/GCETestBackend.scala

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -18,9 +18,8 @@ package org.apache.spark.deploy.k8s.integrationtest.backend.GCE
1818

1919
import io.fabric8.kubernetes.client.{ConfigBuilder, DefaultKubernetesClient}
2020

21-
import org.apache.spark.deploy.k8s.config.resolveK8sMaster
21+
import org.apache.spark.deploy.k8s.config._
2222
import org.apache.spark.deploy.k8s.integrationtest.backend.IntegrationTestBackend
23-
import org.apache.spark.deploy.k8s.integrationtest.constants.GCE_TEST_BACKEND
2423

2524
private[spark] class GCETestBackend(val master: String) extends IntegrationTestBackend {
2625
private var defaultClient: DefaultKubernetesClient = _
@@ -36,5 +35,7 @@ private[spark] class GCETestBackend(val master: String) extends IntegrationTestB
3635
defaultClient
3736
}
3837

39-
override def name(): String = GCE_TEST_BACKEND
38+
override def dockerImageTag(): String = {
39+
return System.getProperty(KUBERNETES_TEST_DOCKER_TAG_SYSTEM_PROPERTY, "latest")
40+
}
4041
}

resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/k8s/integrationtest/backend/IntegrationTestBackend.scala

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -21,19 +21,19 @@ import io.fabric8.kubernetes.client.DefaultKubernetesClient
2121

2222
import org.apache.spark.deploy.k8s.integrationtest.backend.GCE.GCETestBackend
2323
import org.apache.spark.deploy.k8s.integrationtest.backend.minikube.{Minikube, MinikubeTestBackend}
24-
import org.apache.spark.deploy.k8s.integrationtest.docker.SparkDockerImageBuilder
24+
import org.apache.spark.deploy.k8s.integrationtest.docker.SparkDockerImageManager
2525

2626
private[spark] trait IntegrationTestBackend {
27-
def name(): String
2827
def initialize(): Unit
2928
def getKubernetesClient(): DefaultKubernetesClient
29+
def dockerImageTag(): String
3030
def cleanUp(): Unit = {}
3131
}
3232

3333
private[spark] object IntegrationTestBackendFactory {
3434
def getTestBackend(): IntegrationTestBackend = {
3535
Option(System.getProperty("spark.kubernetes.test.master"))
3636
.map(new GCETestBackend(_))
37-
.getOrElse(new MinikubeTestBackend())
37+
.getOrElse(MinikubeTestBackend)
3838
}
3939
}

0 commit comments

Comments
 (0)