@@ -32,6 +32,7 @@ import (
32
32
"k8s.io/klog"
33
33
"k8s.io/kubernetes/test/e2e/framework"
34
34
e2enode "k8s.io/kubernetes/test/e2e/framework/node"
35
+ e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
35
36
testutils "k8s.io/kubernetes/test/utils"
36
37
imageutils "k8s.io/kubernetes/test/utils/image"
37
38
@@ -345,8 +346,8 @@ var _ = framework.KubeDescribe("Cluster size autoscaler scalability [Slow]", fun
345
346
totalMemReservation := unschedulableMemReservation * unschedulablePodReplicas
346
347
timeToWait := 5 * time .Minute
347
348
podsConfig := reserveMemoryRCConfig (f , "unschedulable-pod" , unschedulablePodReplicas , totalMemReservation , timeToWait )
348
- framework .RunRC (* podsConfig ) // Ignore error (it will occur because pods are unschedulable)
349
- defer framework .DeleteRCAndWaitForGC (f .ClientSet , f .Namespace .Name , podsConfig .Name )
349
+ e2erc .RunRC (* podsConfig ) // Ignore error (it will occur because pods are unschedulable)
350
+ defer e2erc .DeleteRCAndWaitForGC (f .ClientSet , f .Namespace .Name , podsConfig .Name )
350
351
351
352
// Ensure that no new nodes have been added so far.
352
353
readyNodeCount , _ := e2enode .TotalReady (f .ClientSet )
@@ -379,7 +380,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
379
380
// run rc based on config
380
381
ginkgo .By (fmt .Sprintf ("Running RC %v from config" , config .extraPods .Name ))
381
382
start := time .Now ()
382
- framework .ExpectNoError (framework .RunRC (* config .extraPods ))
383
+ framework .ExpectNoError (e2erc .RunRC (* config .extraPods ))
383
384
// check results
384
385
if tolerateMissingNodeCount > 0 {
385
386
// Tolerate some number of nodes not to be created.
@@ -397,7 +398,7 @@ func simpleScaleUpTestWithTolerance(f *framework.Framework, config *scaleUpTestC
397
398
}
398
399
timeTrack (start , fmt .Sprintf ("Scale up to %v" , config .expectedResult .nodes ))
399
400
return func () error {
400
- return framework .DeleteRCAndWaitForGC (f .ClientSet , f .Namespace .Name , config .extraPods .Name )
401
+ return e2erc .DeleteRCAndWaitForGC (f .ClientSet , f .Namespace .Name , config .extraPods .Name )
401
402
}
402
403
}
403
404
@@ -475,10 +476,10 @@ func createHostPortPodsWithMemory(f *framework.Framework, id string, replicas, p
475
476
HostPorts : map [string ]int {"port1" : port },
476
477
MemRequest : request ,
477
478
}
478
- err := framework .RunRC (* config )
479
+ err := e2erc .RunRC (* config )
479
480
framework .ExpectNoError (err )
480
481
return func () error {
481
- return framework .DeleteRCAndWaitForGC (f .ClientSet , f .Namespace .Name , id )
482
+ return e2erc .DeleteRCAndWaitForGC (f .ClientSet , f .Namespace .Name , id )
482
483
}
483
484
}
484
485
@@ -515,10 +516,10 @@ func distributeLoad(f *framework.Framework, namespace string, id string, podDist
515
516
framework .ExpectNoError (waitForAllCaPodsReadyInNamespace (f , f .ClientSet ))
516
517
// Create the target RC
517
518
rcConfig := reserveMemoryRCConfig (f , id , totalPods , totalPods * podMemRequestMegabytes , timeout )
518
- framework .ExpectNoError (framework .RunRC (* rcConfig ))
519
+ framework .ExpectNoError (e2erc .RunRC (* rcConfig ))
519
520
framework .ExpectNoError (waitForAllCaPodsReadyInNamespace (f , f .ClientSet ))
520
521
return func () error {
521
- return framework .DeleteRCAndWaitForGC (f .ClientSet , f .Namespace .Name , id )
522
+ return e2erc .DeleteRCAndWaitForGC (f .ClientSet , f .Namespace .Name , id )
522
523
}
523
524
}
524
525
0 commit comments