@@ -18,22 +18,23 @@ package org.apache.spark.scheduler.cluster.kubernetes
18
18
19
19
import java .util .concurrent .{ExecutorService , ScheduledExecutorService , TimeUnit }
20
20
21
+ import scala .collection .JavaConverters ._
22
+ import scala .concurrent .Future
23
+
21
24
import io .fabric8 .kubernetes .api .model .{DoneablePod , Pod , PodBuilder , PodList }
22
25
import io .fabric8 .kubernetes .client .{KubernetesClient , Watch , Watcher }
23
26
import io .fabric8 .kubernetes .client .Watcher .Action
24
27
import io .fabric8 .kubernetes .client .dsl .{FilterWatchListDeletable , MixedOperation , NonNamespaceOperation , PodResource }
25
28
import org .mockito .{AdditionalAnswers , ArgumentCaptor , Mock , MockitoAnnotations }
26
29
import org .mockito .Matchers .{any , eq => mockitoEq }
27
- import org .mockito .Mockito .{doNothing , never , times , verify , when }
30
+ import org .mockito .Mockito .{mock => _ , _ }
28
31
import org .scalatest .BeforeAndAfter
29
32
import org .scalatest .mock .MockitoSugar ._
30
- import scala .collection .JavaConverters ._
31
- import scala .concurrent .Future
32
33
33
34
import org .apache .spark .{SparkConf , SparkContext , SparkFunSuite }
34
35
import org .apache .spark .deploy .kubernetes .config ._
35
36
import org .apache .spark .deploy .kubernetes .constants ._
36
- import org .apache .spark .rpc .{ RpcAddress , RpcCallContext , RpcEndpoint , RpcEndpointAddress , RpcEndpointRef , RpcEnv , RpcTimeout }
37
+ import org .apache .spark .rpc ._
37
38
import org .apache .spark .scheduler .{ExecutorExited , LiveListenerBus , SlaveLost , TaskSchedulerImpl }
38
39
import org .apache .spark .scheduler .cluster .CoarseGrainedClusterMessages .{RegisterExecutor , RemoveExecutor }
39
40
import org .apache .spark .scheduler .cluster .CoarseGrainedSchedulerBackend
@@ -174,7 +175,7 @@ private[spark] class KubernetesClusterSchedulerBackendSuite
174
175
}
175
176
176
177
test(" Basic lifecycle expectations when starting and stopping the scheduler." ) {
177
- val scheduler = newSchedulerBackend(true )
178
+ val scheduler = newSchedulerBackend()
178
179
scheduler.start()
179
180
assert(executorPodsWatcherArgument.getValue != null )
180
181
assert(allocatorRunnable.getValue != null )
@@ -186,7 +187,7 @@ private[spark] class KubernetesClusterSchedulerBackendSuite
186
187
sparkConf
187
188
.set(KUBERNETES_ALLOCATION_BATCH_SIZE , 2 )
188
189
.set(org.apache.spark.internal.config.EXECUTOR_INSTANCES , 2 )
189
- val scheduler = newSchedulerBackend(true )
190
+ val scheduler = newSchedulerBackend()
190
191
scheduler.start()
191
192
requestExecutorRunnable.getValue.run()
192
193
expectPodCreationWithId(1 , FIRST_EXECUTOR_POD )
@@ -201,7 +202,7 @@ private[spark] class KubernetesClusterSchedulerBackendSuite
201
202
sparkConf
202
203
.set(KUBERNETES_ALLOCATION_BATCH_SIZE , 2 )
203
204
.set(org.apache.spark.internal.config.EXECUTOR_INSTANCES , 2 )
204
- val scheduler = newSchedulerBackend(true )
205
+ val scheduler = newSchedulerBackend()
205
206
scheduler.start()
206
207
requestExecutorRunnable.getValue.run()
207
208
expectPodCreationWithId(1 , FIRST_EXECUTOR_POD )
@@ -219,7 +220,7 @@ private[spark] class KubernetesClusterSchedulerBackendSuite
219
220
sparkConf
220
221
.set(KUBERNETES_ALLOCATION_BATCH_SIZE , 1 )
221
222
.set(org.apache.spark.internal.config.EXECUTOR_INSTANCES , 2 )
222
- val scheduler = newSchedulerBackend(true )
223
+ val scheduler = newSchedulerBackend()
223
224
scheduler.start()
224
225
requestExecutorRunnable.getValue.run()
225
226
when(podOperations.create(any(classOf [Pod ])))
@@ -243,7 +244,7 @@ private[spark] class KubernetesClusterSchedulerBackendSuite
243
244
sparkConf
244
245
.set(KUBERNETES_ALLOCATION_BATCH_SIZE , 1 )
245
246
.set(org.apache.spark.internal.config.EXECUTOR_INSTANCES , 1 )
246
- val scheduler = newSchedulerBackend(true )
247
+ val scheduler = newSchedulerBackend()
247
248
scheduler.start()
248
249
requestExecutorRunnable.getValue.run()
249
250
when(podOperations.create(any(classOf [Pod ])))
@@ -280,7 +281,7 @@ private[spark] class KubernetesClusterSchedulerBackendSuite
280
281
sparkConf
281
282
.set(KUBERNETES_ALLOCATION_BATCH_SIZE , 1 )
282
283
.set(org.apache.spark.internal.config.EXECUTOR_INSTANCES , 1 )
283
- val scheduler = newSchedulerBackend(true )
284
+ val scheduler = newSchedulerBackend()
284
285
scheduler.start()
285
286
expectPodCreationWithId(1 , FIRST_EXECUTOR_POD )
286
287
when(podOperations.create(any(classOf [Pod ]))).thenAnswer(AdditionalAnswers .returnsFirstArg())
@@ -315,7 +316,7 @@ private[spark] class KubernetesClusterSchedulerBackendSuite
315
316
sparkConf
316
317
.set(KUBERNETES_ALLOCATION_BATCH_SIZE , 1 )
317
318
.set(org.apache.spark.internal.config.EXECUTOR_INSTANCES , 1 )
318
- val scheduler = newSchedulerBackend(true )
319
+ val scheduler = newSchedulerBackend()
319
320
scheduler.start()
320
321
expectPodCreationWithId(1 , FIRST_EXECUTOR_POD )
321
322
when(podOperations.create(any(classOf [Pod ]))).thenAnswer(AdditionalAnswers .returnsFirstArg())
@@ -340,7 +341,7 @@ private[spark] class KubernetesClusterSchedulerBackendSuite
340
341
RemoveExecutor (" 1" , SlaveLost (" Executor lost for unknown reasons." )))
341
342
}
342
343
343
- private def newSchedulerBackend (externalShuffle : Boolean ): KubernetesClusterSchedulerBackend = {
344
+ private def newSchedulerBackend (): KubernetesClusterSchedulerBackend = {
344
345
new KubernetesClusterSchedulerBackend (
345
346
taskSchedulerImpl,
346
347
rpcEnv,
0 commit comments