@@ -36,15 +36,15 @@ If you wish to use pre-built docker images, you may use the images published in
36
36
<tr ><th >Component</th ><th >Image</th ></tr >
37
37
<tr >
38
38
<td >Spark Driver Image</td >
39
- <td ><code >kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 </code ></td >
39
+ <td ><code >kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 </code ></td >
40
40
</tr >
41
41
<tr >
42
42
<td >Spark Executor Image</td >
43
- <td ><code >kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 </code ></td >
43
+ <td ><code >kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 </code ></td >
44
44
</tr >
45
45
<tr >
46
46
<td >Spark Initialization Image</td >
47
- <td ><code >kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 </code ></td >
47
+ <td ><code >kubespark/spark-init:v2.1.0-kubernetes-0.2.0 </code ></td >
48
48
</tr >
49
49
</table >
50
50
@@ -76,9 +76,9 @@ are set up as described above:
76
76
--kubernetes-namespace default \
77
77
--conf spark.executor.instances=5 \
78
78
--conf spark.app.name=spark-pi \
79
- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \
80
- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \
81
- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \
79
+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \
80
+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \
81
+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \
82
82
local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar
83
83
84
84
The Spark master, specified either via passing the ` --master ` command line argument to ` spark-submit ` or by setting
@@ -125,9 +125,9 @@ and then you can compute the value of Pi as follows:
125
125
--kubernetes-namespace default \
126
126
--conf spark.executor.instances=5 \
127
127
--conf spark.app.name=spark-pi \
128
- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \
129
- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \
130
- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \
128
+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \
129
+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \
130
+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \
131
131
--conf spark.kubernetes.resourceStagingServer.uri=http://<address-of-any-cluster-node>:31000 \
132
132
examples/jars/spark_examples_2.11-2.2.0.jar
133
133
@@ -168,9 +168,9 @@ If our local proxy were listening on port 8001, we would have our submission loo
168
168
--kubernetes-namespace default \
169
169
--conf spark.executor.instances=5 \
170
170
--conf spark.app.name=spark-pi \
171
- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \
172
- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \
173
- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \
171
+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \
172
+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \
173
+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \
174
174
local:///opt/spark/examples/jars/spark_examples_2.11-2.2.0.jar
175
175
176
176
Communication between Spark and Kubernetes clusters is performed using the fabric8 kubernetes-client library.
@@ -284,9 +284,9 @@ communicate with the resource staging server over TLS. The trustStore can be set
284
284
--kubernetes-namespace default \
285
285
--conf spark.executor.instances=5 \
286
286
--conf spark.app.name=spark-pi \
287
- --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.1.0-alpha.2 \
288
- --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.1.0-alpha.2 \
289
- --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.1.0-alpha.2 \
287
+ --conf spark.kubernetes.driver.docker.image=kubespark/spark-driver:v2.1.0-kubernetes-0.2.0 \
288
+ --conf spark.kubernetes.executor.docker.image=kubespark/spark-executor:v2.1.0-kubernetes-0.2.0 \
289
+ --conf spark.kubernetes.initcontainer.docker.image=kubespark/spark-init:v2.1.0-kubernetes-0.2.0 \
290
290
--conf spark.kubernetes.resourceStagingServer.uri=https://<address-of-any-cluster-node>:31000 \
291
291
--conf spark.ssl.kubernetes.resourceStagingServer.enabled=true \
292
292
--conf spark.ssl.kubernetes.resourceStagingServer.clientCertPem=/home/myuser/cert.pem \
0 commit comments