-
Notifications
You must be signed in to change notification settings - Fork 9
Add initial integration test code #3
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from 2 commits
e82d4c5
7c8612c
5a950eb
b14430e
0443af8
567ad3e
74c158b
bde1cf9
8624545
5585a0a
f793e38
069ae50
204c51f
6a93a9e
b3e4ee0
989a371
46d1f5f
5f88921
a68bd5f
0f8fad9
7bc4aa2
9ec071a
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -21,7 +21,7 @@ import java.nio.file.Paths | |
import java.util.UUID | ||
import java.util.regex.Pattern | ||
|
||
import com.google.common.io.{Files, PatternFilenameFilter} | ||
import com.google.common.io.PatternFilenameFilter | ||
import org.scalatest.{BeforeAndAfter, BeforeAndAfterAll, FunSuite} | ||
import org.scalatest.concurrent.{Eventually, PatienceConfiguration} | ||
import org.scalatest.time.{Minutes, Seconds, Span} | ||
|
@@ -31,6 +31,7 @@ import org.apache.spark.deploy.k8s.integrationtest.constants.MINIKUBE_TEST_BACKE | |
import org.apache.spark.deploy.k8s.integrationtest.constants.SPARK_DISTRO_PATH | ||
|
||
private[spark] class KubernetesSuite extends FunSuite with BeforeAndAfterAll with BeforeAndAfter { | ||
|
||
import KubernetesSuite._ | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Add an empty line before the import. |
||
private val testBackend = IntegrationTestBackendFactory.getTestBackend() | ||
private val APP_LOCATOR_LABEL = UUID.randomUUID().toString.replaceAll("-", "") | ||
|
@@ -48,8 +49,6 @@ private[spark] class KubernetesSuite extends FunSuite with BeforeAndAfterAll wit | |
|
||
before { | ||
sparkAppConf = kubernetesTestComponents.newSparkAppConf() | ||
.set("spark.kubernetes.initcontainer.docker.image", "spark-init:latest") | ||
.set("spark.kubernetes.driver.docker.image", "spark-driver:latest") | ||
.set("spark.kubernetes.driver.label.spark-app-locator", APP_LOCATOR_LABEL) | ||
kubernetesTestComponents.createNamespace() | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -19,11 +19,12 @@ package org.apache.spark.deploy.k8s.integrationtest | |
import java.nio.file.Paths | ||
import java.util.UUID | ||
|
||
import io.fabric8.kubernetes.client.DefaultKubernetesClient | ||
import org.scalatest.concurrent.Eventually | ||
import scala.collection.mutable | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Scala packages should be in a group after java packages. |
||
import scala.collection.JavaConverters._ | ||
|
||
import io.fabric8.kubernetes.client.DefaultKubernetesClient | ||
import org.scalatest.concurrent.Eventually | ||
|
||
import org.apache.spark.deploy.k8s.integrationtest.constants.SPARK_DISTRO_PATH | ||
|
||
private[spark] class KubernetesTestComponents(defaultClient: DefaultKubernetesClient) { | ||
|
@@ -56,6 +57,8 @@ private[spark] class KubernetesTestComponents(defaultClient: DefaultKubernetesCl | |
new SparkAppConf() | ||
.set("spark.master", s"k8s://${kubernetesClient.getMasterUrl}") | ||
.set("spark.kubernetes.namespace", namespace) | ||
// TODO: apache/spark#19995 is changing docker.image to container.image in these properties. | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. FYI: the PR has been merged. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I see. I'll have to update my distro, make this change and test it again. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. This is done by commit 989a371. |
||
// Update them once the PR is merged. | ||
.set("spark.kubernetes.driver.docker.image", | ||
System.getProperty("spark.docker.test.driverImage", "spark-driver:latest")) | ||
.set("spark.kubernetes.executor.docker.image", | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -16,10 +16,10 @@ | |
*/ | ||
package org.apache.spark.deploy.k8s.integrationtest | ||
|
||
import java.io.{BufferedReader, InputStreamReader} | ||
import java.util.concurrent.TimeUnit | ||
|
||
import scala.collection.mutable.ArrayBuffer | ||
import scala.io.Source | ||
|
||
object ProcessUtils extends Logging { | ||
/** | ||
|
@@ -32,17 +32,13 @@ object ProcessUtils extends Logging { | |
val proc = pb.start() | ||
val outputLines = new ArrayBuffer[String] | ||
|
||
Utils.tryWithResource(new InputStreamReader(proc.getInputStream)) { procOutput => | ||
Utils.tryWithResource(new BufferedReader(procOutput)) { (bufferedOutput: BufferedReader) => | ||
var line: String = null | ||
do { | ||
line = bufferedOutput.readLine() | ||
if (line != null) { | ||
logInfo(line) | ||
outputLines += line | ||
} | ||
} while (line != null) | ||
Utils.tryWithResource(Source.fromInputStream(proc.getInputStream, "UTF-8")) { output => | ||
for (line <- output.getLines) { | ||
logInfo(line) | ||
outputLines += line | ||
} | ||
}{ | ||
output => output.close() | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is calling There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, it is because There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It seems There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, that is interesting. I think it became
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Ah, OK. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I actually managed to get rid of the close call by using the input stream as resource. PTAL. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. LGTM. |
||
} | ||
assert(proc.waitFor(timeout, TimeUnit.SECONDS), | ||
s"Timed out while executing ${fullCommand.mkString(" ")}") | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -39,6 +39,9 @@ private[spark] object Minikube extends Logging { | |
|
||
private val MINIKUBE_STARTUP_TIMEOUT_SECONDS = 60 | ||
|
||
// NOTE: This and the following methods are synchronized to prevent deleteMinikube from | ||
// destroying the minikube VM while other methods try to use the VM. | ||
// Such a race condition can corrupt the VM or some VM provisioning tools like VirtualBox. | ||
def startMinikube(): Unit = synchronized { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why are this and the following methods There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @mccheah would know better. I guess we want to prevent Does this explanation make sense? If yes, I think we can leave a NOTE. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It makes sense to me. |
||
assert(MINIKUBE_EXECUTABLE_DEST.exists(), EXPECTED_DOWNLOADED_MINIKUBE_MESSAGE) | ||
if (getMinikubeStatus != MinikubeStatus.RUNNING) { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Just set it in the
Dockerfile
using:RUN chmod +x /opt/entrypoint.sh
.There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I think that's better to be done by the upstream code. It's hard for this integration code to surgically do in-place edit of Dockerfile.
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
+1
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The PR has merged now.