@@ -554,43 +554,6 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
554
554
wg .Wait ()
555
555
})
556
556
557
- f .It ("supports sharing a claim sequentially" , f .WithSlow (), func (ctx context.Context ) {
558
- var objects []klog.KMetadata
559
- objects = append (objects , b .externalClaim ())
560
-
561
- // This test used to test usage of the claim by one pod
562
- // at a time. After removing the "not sharable"
563
- // feature, we have to create more pods than supported
564
- // at the same time to get the same effect.
565
- numPods := resourceapi .ResourceClaimReservedForMaxSize + 10
566
- pods := make ([]* v1.Pod , numPods )
567
- for i := 0 ; i < numPods ; i ++ {
568
- pod := b .podExternal ()
569
- pods [i ] = pod
570
- objects = append (objects , pod )
571
- }
572
-
573
- b .create (ctx , objects ... )
574
-
575
- // We don't know the order. All that matters is that all of them get scheduled eventually.
576
- f .Timeouts .PodStartSlow *= time .Duration (numPods )
577
- var wg sync.WaitGroup
578
- wg .Add (numPods )
579
- for i := 0 ; i < numPods ; i ++ {
580
- pod := pods [i ]
581
- go func () {
582
- defer ginkgo .GinkgoRecover ()
583
- defer wg .Done ()
584
- b .testPod (ctx , f .ClientSet , pod , expectedEnv ... )
585
- // We need to delete each running pod, otherwise the others cannot use the claim.
586
- err := f .ClientSet .CoreV1 ().Pods (pod .Namespace ).Delete (ctx , pod .Name , metav1.DeleteOptions {})
587
- framework .ExpectNoError (err , "delete pod" )
588
- framework .ExpectNoError (e2epod .WaitForPodNotFoundInNamespace (ctx , f .ClientSet , pod .Name , pod .Namespace , f .Timeouts .PodStartSlow ))
589
- }()
590
- }
591
- wg .Wait ()
592
- })
593
-
594
557
ginkgo .It ("retries pod scheduling after creating device class" , func (ctx context.Context ) {
595
558
var objects []klog.KMetadata
596
559
pod , template := b .podInline ()
@@ -667,7 +630,7 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
667
630
// The following tests only make sense when there is more than one node.
668
631
// They get skipped when there's only one node.
669
632
multiNodeTests := func () {
670
- nodes := NewNodes (f , 2 , 8 )
633
+ nodes := NewNodes (f , 3 , 8 )
671
634
672
635
ginkgo .Context ("with different ResourceSlices" , func () {
673
636
firstDevice := "pre-defined-device-01"
@@ -790,6 +753,117 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
790
753
}
791
754
})
792
755
})
756
+
757
+ ginkgo .Context ("with network-attached resources" , func () {
758
+ driver := NewDriver (f , nodes , networkResources )
759
+ b := newBuilder (f , driver )
760
+
761
+ f .It ("supports sharing a claim sequentially" , f .WithSlow (), func (ctx context.Context ) {
762
+ var objects []klog.KMetadata
763
+ objects = append (objects , b .externalClaim ())
764
+
765
+ // This test used to test usage of the claim by one pod
766
+ // at a time. After removing the "not sharable"
767
+ // feature and bumping up the maximum number of
768
+ // consumers this is now a stress test which runs
769
+ // the maximum number of pods per claim in parallel.
770
+ // This only works on clusters with >= 3 nodes.
771
+ numMaxPods := resourceapi .ResourceClaimReservedForMaxSize
772
+ ginkgo .By (fmt .Sprintf ("Creating %d pods sharing the same claim" , numMaxPods ))
773
+ pods := make ([]* v1.Pod , numMaxPods )
774
+ for i := 0 ; i < numMaxPods ; i ++ {
775
+ pod := b .podExternal ()
776
+ pods [i ] = pod
777
+ objects = append (objects , pod )
778
+ }
779
+ b .create (ctx , objects ... )
780
+
781
+ timeout := f .Timeouts .PodStartSlow * time .Duration (numMaxPods )
782
+ ensureDuration := f .Timeouts .PodStart // Don't check for too long, even if it is less precise.
783
+ podIsPending := gomega .HaveField ("Spec.NodeName" , gomega .BeEmpty ())
784
+ waitForPodScheduled := func (pod * v1.Pod ) {
785
+ ginkgo .GinkgoHelper ()
786
+ gomega .Eventually (ctx , framework .GetObject (f .ClientSet .CoreV1 ().Pods (pod .Namespace ).Get , pod .Name , metav1.GetOptions {})).
787
+ WithTimeout (timeout ).
788
+ WithPolling (10 * time .Second ).
789
+ ShouldNot (podIsPending , "Pod should get scheduled." )
790
+ }
791
+ ensurePodNotScheduled := func (pod * v1.Pod ) {
792
+ ginkgo .GinkgoHelper ()
793
+ gomega .Consistently (ctx , framework .GetObject (f .ClientSet .CoreV1 ().Pods (pod .Namespace ).Get , pod .Name , metav1.GetOptions {})).
794
+ WithTimeout (ensureDuration ).
795
+ WithPolling (10 * time .Second ).
796
+ Should (podIsPending , "Pod should remain pending." )
797
+ }
798
+
799
+ // We don't know the order. All that matters is that all of them get scheduled eventually.
800
+ ginkgo .By (fmt .Sprintf ("Waiting for %d pods to be scheduled" , numMaxPods ))
801
+ f .Timeouts .PodStartSlow *= time .Duration (numMaxPods )
802
+ var wg sync.WaitGroup
803
+ wg .Add (numMaxPods )
804
+ for i := 0 ; i < numMaxPods ; i ++ {
805
+ pod := pods [i ]
806
+ go func () {
807
+ defer ginkgo .GinkgoRecover ()
808
+ defer wg .Done ()
809
+ waitForPodScheduled (pod )
810
+ }()
811
+ }
812
+ wg .Wait ()
813
+
814
+ numMorePods := 10
815
+ ginkgo .By (fmt .Sprintf ("Creating %d additional pods for the same claim" , numMorePods ))
816
+ morePods := make ([]* v1.Pod , numMorePods )
817
+ objects = nil
818
+ for i := 0 ; i < numMorePods ; i ++ {
819
+ pod := b .podExternal ()
820
+ morePods [i ] = pod
821
+ objects = append (objects , pod )
822
+ }
823
+ b .create (ctx , objects ... )
824
+
825
+ // None of the additional pods can run because of the ReservedFor limit.
826
+ ginkgo .By (fmt .Sprintf ("Check for %s that the additional pods don't get scheduled" , ensureDuration ))
827
+ wg .Add (numMorePods )
828
+ for i := 0 ; i < numMorePods ; i ++ {
829
+ pod := morePods [i ]
830
+ go func () {
831
+ defer ginkgo .GinkgoRecover ()
832
+ defer wg .Done ()
833
+ ensurePodNotScheduled (pod )
834
+ }()
835
+ }
836
+ wg .Wait ()
837
+
838
+ // We need to delete each running pod, otherwise the new ones cannot use the claim.
839
+ ginkgo .By (fmt .Sprintf ("Deleting the initial %d pods" , numMaxPods ))
840
+ wg .Add (numMaxPods )
841
+ for i := 0 ; i < numMaxPods ; i ++ {
842
+ pod := pods [i ]
843
+ go func () {
844
+ defer ginkgo .GinkgoRecover ()
845
+ defer wg .Done ()
846
+ err := f .ClientSet .CoreV1 ().Pods (pod .Namespace ).Delete (ctx , pod .Name , metav1.DeleteOptions {})
847
+ framework .ExpectNoError (err , "delete pod" )
848
+ framework .ExpectNoError (e2epod .WaitForPodNotFoundInNamespace (ctx , f .ClientSet , pod .Name , pod .Namespace , f .Timeouts .PodStartSlow ))
849
+ }()
850
+ }
851
+ wg .Wait ()
852
+
853
+ // Now those should also run - eventually...
854
+ ginkgo .By (fmt .Sprintf ("Waiting for the additional %d pods to be scheduled" , numMorePods ))
855
+ wg .Add (numMorePods )
856
+ for i := 0 ; i < numMorePods ; i ++ {
857
+ pod := morePods [i ]
858
+ go func () {
859
+ defer ginkgo .GinkgoRecover ()
860
+ defer wg .Done ()
861
+ waitForPodScheduled (pod )
862
+ }()
863
+ }
864
+ wg .Wait ()
865
+ })
866
+ })
793
867
}
794
868
795
869
ginkgo .Context ("on single node" , func () {
0 commit comments