Skip to content
This repository was archived by the owner on Jan 9, 2020. It is now read-only.

Commit 55d12b5

Browse files
committed
fixed Integration tests and modified HADOOP_CONF_DIR variable to be FILE_DIR for Volume mount
1 parent 0935968 commit 55d12b5

File tree

4 files changed

+172
-3
lines changed

4 files changed

+172
-3
lines changed

resource-managers/kubernetes/core/src/main/scala/org/apache/spark/deploy/kubernetes/HadoopConfBootstrap.scala

Lines changed: 5 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -21,6 +21,8 @@ import java.io.File
2121
import io.fabric8.kubernetes.api.model.{ContainerBuilder, KeyToPathBuilder, PodBuilder}
2222

2323
import org.apache.spark.deploy.kubernetes.constants._
24+
import org.apache.spark.internal.Logging
25+
2426

2527
/**
2628
* This is separated out from the HadoopConf steps API because this component can be reused to
@@ -38,12 +40,13 @@ private[spark] trait HadoopConfBootstrap {
3840

3941
private[spark] class HadoopConfBootstrapImpl(
4042
hadoopConfConfigMapName: String,
41-
hadoopConfigFiles: Array[File]) extends HadoopConfBootstrap {
43+
hadoopConfigFiles: Array[File]) extends HadoopConfBootstrap with Logging{
4244

4345
override def bootstrapMainContainerAndVolumes(
4446
originalPodWithMainContainer: PodWithMainContainer)
4547
: PodWithMainContainer = {
4648
import scala.collection.JavaConverters._
49+
logInfo("HADOOP_CONF_DIR defined. Mounting HDFS specific .xml files")
4750
val keyPaths = hadoopConfigFiles.map(file =>
4851
new KeyToPathBuilder().withKey(file.toPath.getFileName.toString)
4952
.withPath(file.toPath.getFileName.toString).build()).toList
@@ -66,7 +69,7 @@ private[spark] class HadoopConfBootstrapImpl(
6669
.endVolumeMount()
6770
.addNewEnv()
6871
.withName(HADOOP_CONF_DIR)
69-
.withValue(s"$HADOOP_FILE_DIR/$HADOOP_FILE_VOLUME")
72+
.withValue(HADOOP_FILE_DIR)
7073
.endEnv()
7174
.build()
7275
PodWithMainContainer(

resource-managers/kubernetes/integration-tests/src/test/scala/org/apache/spark/deploy/kubernetes/integrationtest/KubernetesSuite.scala

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -74,7 +74,7 @@ private[spark] class KubernetesSuite extends SparkFunSuite with BeforeAndAfter {
7474

7575
test("Include HADOOP_CONF for HDFS based jobs ") {
7676
assume(testBackend.name == MINIKUBE_TEST_BACKEND)
77-
77+
sparkConf.setJars(Seq(CONTAINER_LOCAL_HELPER_JAR_PATH))
7878
runSparkPiAndVerifyCompletion(CONTAINER_LOCAL_MAIN_APP_RESOURCE)
7979
}
8080

Lines changed: 34 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,34 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+
<!--
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License. See accompanying LICENSE file.
15+
-->
16+
17+
<!-- Put site-specific property overrides in this file. -->
18+
19+
<configuration>
20+
<property>
21+
<name>hadoop.security.authentication</name>
22+
<value>kerberos</value>
23+
</property>
24+
25+
<property>
26+
<name>hadoop.security.authorization</name>
27+
<value>true</value>
28+
</property>
29+
30+
<property>
31+
<name>fs.defaultFS</name>
32+
<value>hdfs://nn.default.svc.cluster.local:9000</value>
33+
</property>
34+
</configuration>
Lines changed: 132 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,132 @@
1+
<?xml version="1.0" encoding="UTF-8"?>
2+
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
3+
<!--
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License. See accompanying LICENSE file.
15+
-->
16+
17+
<!-- Put site-specific property overrides in this file. -->
18+
19+
<configuration>
20+
21+
<property>
22+
<name>dfs.replication</name>
23+
<value>1</value>
24+
</property>
25+
26+
<!-- General HDFS security config -->
27+
<property>
28+
<name>dfs.permissions</name>
29+
<value>true</value>
30+
</property>
31+
<property>
32+
<name>dfs.block.access.token.enable</name>
33+
<value>true</value>
34+
</property>
35+
36+
<!-- NameNode security config -->
37+
<property>
38+
<name>dfs.namenode.keytab.file</name>
39+
<value>/var/keytabs/hdfs.keytab</value>
40+
</property>
41+
<property>
42+
<name>dfs.namenode.kerberos.principal</name>
43+
<value>hdfs/[email protected]</value>
44+
</property>
45+
<property>
46+
<name>dfs.namenode.kerberos.internal.spnego.principal</name>
47+
<value>HTTP/[email protected]</value>
48+
</property>
49+
50+
51+
<!-- For testing, we want tokens to expire FAST -->
52+
<property>
53+
<name>dfs.namenode.delegation.token.max-lifetime</name>
54+
<value>18000000</value> <!-- 300 minutes -->
55+
</property>
56+
<property>
57+
<name>dfs.namenode.delegation.token.renew-interval</name>
58+
<value>1800000</value> <!-- 30 minutes -->
59+
</property>
60+
61+
62+
63+
<!-- DataNode security config -->
64+
<property>
65+
<name>dfs.data.transfer.protection</name>
66+
<value>integrity</value>
67+
</property>
68+
69+
<property>
70+
<name>dfs.datanode.address</name>
71+
<value>0.0.0.0:10019</value>
72+
</property>
73+
74+
<property>
75+
<name>dfs.datanode.http.address</name>
76+
<value>0.0.0.0:10022</value>
77+
</property>
78+
79+
<property>
80+
<name>dfs.http.policy</name>
81+
<value>HTTPS_ONLY</value>
82+
</property>
83+
84+
85+
<property>
86+
<name>dfs.namenode.keytab.file</name>
87+
<value>/var/keytabs/hdfs.keytab</value>
88+
</property>
89+
<property>
90+
<name>dfs.namenode.kerberos.principal</name>
91+
<value>hdfs/[email protected]</value>
92+
</property>
93+
<property>
94+
<name>dfs.namenode.kerberos.internal.spnego.principal</name>
95+
<value>HTTP/[email protected]</value>
96+
</property>
97+
98+
<!-- prevent those errors -->
99+
<property>
100+
<name>dfs.namenode.datanode.registration.ip-hostname-check</name>
101+
<value>false</value>
102+
</property>
103+
104+
105+
<property>
106+
<name>dfs.datanode.data.dir.perm</name>
107+
<value>700</value>
108+
</property>
109+
<property>
110+
<name>dfs.datanode.keytab.file</name>
111+
<value>/var/keytabs/hdfs.keytab</value> <!-- path to the HDFS keytab -->
112+
</property>
113+
<property>
114+
<name>dfs.datanode.kerberos.principal</name>
115+
<value>hdfs/[email protected]</value>
116+
</property>
117+
118+
<!-- Web Authentication config -->
119+
<property>
120+
<name>dfs.webhdfs.enabled</name>
121+
<value>true</value>
122+
</property>
123+
<property>
124+
<name>dfs.web.authentication.kerberos.principal</name>
125+
<value>HTTP/[email protected]</value>
126+
</property>
127+
<property>
128+
<name>dfs.web.authentication.kerberos.keytab</name>
129+
<value>/var/keytabs/hdfs.keytab</value> <!-- path to the HTTP keytab -->
130+
</property>
131+
132+
</configuration>

0 commit comments

Comments
 (0)