@@ -31,8 +31,6 @@ import (
31
31
"testing"
32
32
"time"
33
33
34
- "github.com/google/go-cmp/cmp"
35
-
36
34
v1 "k8s.io/api/core/v1"
37
35
apierrors "k8s.io/apimachinery/pkg/api/errors"
38
36
"k8s.io/apimachinery/pkg/api/meta"
@@ -764,31 +762,34 @@ func initTestOutput(tb testing.TB) io.Writer {
764
762
return output
765
763
}
766
764
767
- type cleanupKeyType struct {}
765
+ var perfSchedulingLabelFilter = flag . String ( "perf-scheduling-label-filter" , "performance" , "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling" )
768
766
769
- var cleanupKey = cleanupKeyType {}
767
+ func setupTestCase (t testing.TB , tc * testCase , output io.Writer , outOfTreePluginRegistry frameworkruntime.Registry ) (informers.SharedInformerFactory , ktesting.TContext ) {
768
+ tCtx := ktesting .Init (t , initoption .PerTestOutput (* useTestingLog ))
770
769
771
- // shouldCleanup returns true if a function should clean up resource in the
772
- // apiserver when the test is done. This is true for unit tests (etcd and
773
- // apiserver get reused) and false for benchmarks (each benchmark starts with a
774
- // clean state, so cleaning up just wastes time).
775
- //
776
- // The default if not explicitly set in the context is true.
777
- func shouldCleanup (ctx context.Context ) bool {
778
- val := ctx .Value (cleanupKey )
779
- if enabled , ok := val .(bool ); ok {
780
- return enabled
770
+ // Ensure that there are no leaked
771
+ // goroutines. They could influence
772
+ // performance of the next benchmark.
773
+ // This must *after* RedirectKlog
774
+ // because then during cleanup, the
775
+ // test will wait for goroutines to
776
+ // quit *before* restoring klog settings.
777
+ framework .GoleakCheck (t )
778
+
779
+ // Now that we are ready to run, start
780
+ // etcd.
781
+ framework .StartEtcd (t , output )
782
+
783
+ for feature , flag := range tc .FeatureGates {
784
+ featuregatetesting .SetFeatureGateDuringTest (t , utilfeature .DefaultFeatureGate , feature , flag )
781
785
}
782
- return true
783
- }
784
786
785
- // withCleanup sets whether cleaning up resources in the apiserver
786
- // should be done. The default is true.
787
- func withCleanup (tCtx ktesting.TContext , enabled bool ) ktesting.TContext {
788
- return ktesting .WithValue (tCtx , cleanupKey , enabled )
789
- }
787
+ // 30 minutes should be plenty enough even for the 5000-node tests.
788
+ timeout := 30 * time .Minute
789
+ tCtx = ktesting .WithTimeout (tCtx , timeout , fmt .Sprintf ("timed out after the %s per-test timeout" , timeout ))
790
790
791
- var perfSchedulingLabelFilter = flag .String ("perf-scheduling-label-filter" , "performance" , "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by BenchmarkPerfScheduling" )
791
+ return setupClusterForWorkload (tCtx , tc .SchedulerConfigPath , tc .FeatureGates , outOfTreePluginRegistry )
792
+ }
792
793
793
794
// RunBenchmarkPerfScheduling runs the scheduler performance tests.
794
795
//
@@ -821,33 +822,8 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr
821
822
if ! enabled (* perfSchedulingLabelFilter , append (tc .Labels , w .Labels ... )... ) {
822
823
b .Skipf ("disabled by label filter %q" , * perfSchedulingLabelFilter )
823
824
}
824
- tCtx := ktesting .Init (b , initoption .PerTestOutput (* useTestingLog ))
825
-
826
- // Ensure that there are no leaked
827
- // goroutines. They could influence
828
- // performance of the next benchmark.
829
- // This must *after* RedirectKlog
830
- // because then during cleanup, the
831
- // test will wait for goroutines to
832
- // quit *before* restoring klog settings.
833
- framework .GoleakCheck (b )
834
-
835
- // Now that we are ready to run, start
836
- // etcd.
837
- framework .StartEtcd (b , output )
838
-
839
- // 30 minutes should be plenty enough even for the 5000-node tests.
840
- timeout := 30 * time .Minute
841
- tCtx = ktesting .WithTimeout (tCtx , timeout , fmt .Sprintf ("timed out after the %s per-test timeout" , timeout ))
842
-
843
- for feature , flag := range tc .FeatureGates {
844
- featuregatetesting .SetFeatureGateDuringTest (b , utilfeature .DefaultFeatureGate , feature , flag )
845
- }
846
- informerFactory , tCtx := setupClusterForWorkload (tCtx , tc .SchedulerConfigPath , tc .FeatureGates , outOfTreePluginRegistry )
847
825
848
- // No need to clean up, each benchmark testcase starts with an empty
849
- // etcd database.
850
- tCtx = withCleanup (tCtx , false )
826
+ informerFactory , tCtx := setupTestCase (b , tc , output , outOfTreePluginRegistry )
851
827
852
828
results := runWorkload (tCtx , tc , w , informerFactory )
853
829
dataItems .DataItems = append (dataItems .DataItems , results ... )
@@ -889,16 +865,6 @@ func RunBenchmarkPerfScheduling(b *testing.B, outOfTreePluginRegistry frameworkr
889
865
890
866
var testSchedulingLabelFilter = flag .String ("test-scheduling-label-filter" , "integration-test" , "comma-separated list of labels which a testcase must have (no prefix or +) or must not have (-), used by TestScheduling" )
891
867
892
- type schedulerConfig struct {
893
- schedulerConfigPath string
894
- featureGates map [featuregate.Feature ]bool
895
- }
896
-
897
- func (c schedulerConfig ) equals (tc * testCase ) bool {
898
- return c .schedulerConfigPath == tc .SchedulerConfigPath &&
899
- cmp .Equal (c .featureGates , tc .FeatureGates )
900
- }
901
-
902
868
func loadSchedulerConfig (file string ) (* config.KubeSchedulerConfiguration , error ) {
903
869
data , err := os .ReadFile (file )
904
870
if err != nil {
@@ -997,7 +963,6 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
997
963
b .ReportMetric (duration .Seconds (), "runtime_seconds" )
998
964
})
999
965
}
1000
- cleanup := shouldCleanup (tCtx )
1001
966
1002
967
// Disable error checking of the sampling interval length in the
1003
968
// throughput collector by default. When running benchmarks, report
@@ -1028,11 +993,6 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
1028
993
// All namespaces listed in numPodsScheduledPerNamespace will be cleaned up.
1029
994
numPodsScheduledPerNamespace := make (map [string ]int )
1030
995
1031
- if cleanup {
1032
- // This must run before controllers get shut down.
1033
- defer cleanupWorkload (tCtx , tc , numPodsScheduledPerNamespace )
1034
- }
1035
-
1036
996
for opIndex , op := range unrollWorkloadTemplate (tCtx , tc .WorkloadTemplate , w ) {
1037
997
realOp , err := op .realOp .patchParams (w )
1038
998
if err != nil {
@@ -1052,13 +1012,6 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
1052
1012
if err := nodePreparer .PrepareNodes (tCtx , nextNodeIndex ); err != nil {
1053
1013
tCtx .Fatalf ("op %d: %v" , opIndex , err )
1054
1014
}
1055
- if cleanup {
1056
- defer func () {
1057
- if err := nodePreparer .CleanupNodes (tCtx ); err != nil {
1058
- tCtx .Fatalf ("failed to clean up nodes, error: %v" , err )
1059
- }
1060
- }()
1061
- }
1062
1015
nextNodeIndex += concreteOp .Count
1063
1016
1064
1017
case * createNamespacesOp :
@@ -1333,51 +1286,6 @@ func runWorkload(tCtx ktesting.TContext, tc *testCase, w *workload, informerFact
1333
1286
return dataItems
1334
1287
}
1335
1288
1336
- // cleanupWorkload ensures that everything is removed from the API server that
1337
- // might have been created by runWorkload. This must be done before starting
1338
- // the next workload because otherwise it might stumble over previously created
1339
- // objects. For example, the namespaces are the same in different workloads, so
1340
- // not deleting them would cause the next one to fail with "cannot create
1341
- // namespace: already exists".
1342
- //
1343
- // Calling cleanupWorkload can be skipped if it is known that the next workload
1344
- // will run with a fresh etcd instance.
1345
- func cleanupWorkload (tCtx ktesting.TContext , tc * testCase , numPodsScheduledPerNamespace map [string ]int ) {
1346
- deleteNow := * metav1 .NewDeleteOptions (0 )
1347
- for namespace := range numPodsScheduledPerNamespace {
1348
- // Pods have to be deleted explicitly, with no grace period. Normally
1349
- // kubelet will set the DeletionGracePeriodSeconds to zero when it's okay
1350
- // to remove a deleted pod, but we don't run kubelet...
1351
- if err := tCtx .Client ().CoreV1 ().Pods (namespace ).DeleteCollection (tCtx , deleteNow , metav1.ListOptions {}); err != nil {
1352
- tCtx .Fatalf ("failed to delete pods in namespace %q: %v" , namespace , err )
1353
- }
1354
- if err := tCtx .Client ().CoreV1 ().Namespaces ().Delete (tCtx , namespace , deleteNow ); err != nil {
1355
- tCtx .Fatalf ("Deleting Namespace %q in numPodsScheduledPerNamespace: %v" , namespace , err )
1356
- }
1357
- }
1358
-
1359
- // We need to wait here because even with deletion timestamp set,
1360
- // actually removing a namespace can take some time (garbage collecting
1361
- // other generated object like secrets, etc.) and we don't want to
1362
- // start the next workloads while that cleanup is still going on.
1363
- if err := wait .PollUntilContextTimeout (tCtx , time .Second , 5 * time .Minute , false , func (ctx context.Context ) (bool , error ) {
1364
- namespaces , err := tCtx .Client ().CoreV1 ().Namespaces ().List (ctx , metav1.ListOptions {})
1365
- if err != nil {
1366
- return false , err
1367
- }
1368
- for _ , namespace := range namespaces .Items {
1369
- if _ , ok := numPodsScheduledPerNamespace [namespace .Name ]; ok {
1370
- // A namespace created by the workload, need to wait.
1371
- return false , nil
1372
- }
1373
- }
1374
- // All namespaces gone.
1375
- return true , nil
1376
- }); err != nil {
1377
- tCtx .Fatalf ("failed while waiting for namespace removal: %v" , err )
1378
- }
1379
- }
1380
-
1381
1289
func createNamespaceIfNotPresent (tCtx ktesting.TContext , namespace string , podsPerNamespace * map [string ]int ) {
1382
1290
if _ , ok := (* podsPerNamespace )[namespace ]; ! ok {
1383
1291
// The namespace has not created yet.
0 commit comments