@@ -39,8 +39,10 @@ import (
39
39
"k8s.io/apimachinery/pkg/api/resource"
40
40
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
41
41
"k8s.io/apimachinery/pkg/runtime"
42
+ "k8s.io/apimachinery/pkg/util/validation"
42
43
applyv1 "k8s.io/client-go/applyconfigurations/core/v1"
43
44
"k8s.io/client-go/kubernetes"
45
+ "k8s.io/dynamic-resource-allocation/resourceslice"
44
46
"k8s.io/klog/v2"
45
47
"k8s.io/kubernetes/test/e2e/feature"
46
48
"k8s.io/kubernetes/test/e2e/framework"
@@ -727,6 +729,97 @@ var _ = framework.SIGDescribe("node")("DRA", feature.DynamicResourceAllocation,
727
729
// TODO (https://github.com/kubernetes/kubernetes/issues/123699): move most of the test below into `testDriver` so that they get
728
730
// executed with different parameters.
729
731
732
+ ginkgo .Context ("ResourceSlice Controller" , func () {
733
+ // This is a stress test for creating many large slices.
734
+ // Each slice is as large as API limits allow.
735
+ //
736
+ // Could become a conformance test because it only depends
737
+ // on the apiserver.
738
+ f .It ("creates slices" , func (ctx context.Context ) {
739
+ // Define desired resource slices.
740
+ driverName := f .Namespace .Name
741
+ numSlices := 100
742
+ devicePrefix := "dev-"
743
+ domainSuffix := ".example.com"
744
+ poolName := "network-attached"
745
+ domain := strings .Repeat ("x" , 63 /* TODO(pohly): add to API */ - len (domainSuffix )) + domainSuffix
746
+ stringValue := strings .Repeat ("v" , resourceapi .DeviceAttributeMaxValueLength )
747
+ pool := resourceslice.Pool {
748
+ Slices : make ([]resourceslice.Slice , numSlices ),
749
+ }
750
+ for i := 0 ; i < numSlices ; i ++ {
751
+ devices := make ([]resourceapi.Device , resourceapi .ResourceSliceMaxDevices )
752
+ for e := 0 ; e < resourceapi .ResourceSliceMaxDevices ; e ++ {
753
+ device := resourceapi.Device {
754
+ Name : devicePrefix + strings .Repeat ("x" , validation .DNS1035LabelMaxLength - len (devicePrefix )- 4 ) + fmt .Sprintf ("%04d" , e ),
755
+ Basic : & resourceapi.BasicDevice {
756
+ Attributes : make (map [resourceapi.QualifiedName ]resourceapi.DeviceAttribute , resourceapi .ResourceSliceMaxAttributesAndCapacitiesPerDevice ),
757
+ },
758
+ }
759
+ for j := 0 ; j < resourceapi .ResourceSliceMaxAttributesAndCapacitiesPerDevice ; j ++ {
760
+ name := resourceapi .QualifiedName (domain + "/" + strings .Repeat ("x" , resourceapi .DeviceMaxIDLength - 4 ) + fmt .Sprintf ("%04d" , j ))
761
+ device .Basic .Attributes [name ] = resourceapi.DeviceAttribute {
762
+ StringValue : & stringValue ,
763
+ }
764
+ }
765
+ devices [e ] = device
766
+ }
767
+ pool .Slices [i ].Devices = devices
768
+ }
769
+ resources := & resourceslice.DriverResources {
770
+ Pools : map [string ]resourceslice.Pool {poolName : pool },
771
+ }
772
+
773
+ ginkgo .By ("Creating slices" )
774
+ mutationCacheTTL := 10 * time .Second
775
+ controller , err := resourceslice .StartController (ctx , resourceslice.Options {
776
+ DriverName : driverName ,
777
+ KubeClient : f .ClientSet ,
778
+ Resources : resources ,
779
+ MutationCacheTTL : & mutationCacheTTL ,
780
+ })
781
+ framework .ExpectNoError (err , "start controller" )
782
+ ginkgo .DeferCleanup (func (ctx context.Context ) {
783
+ controller .Stop ()
784
+ err := f .ClientSet .ResourceV1alpha3 ().ResourceSlices ().DeleteCollection (ctx , metav1.DeleteOptions {}, metav1.ListOptions {
785
+ FieldSelector : resourceapi .ResourceSliceSelectorDriver + "=" + driverName ,
786
+ })
787
+ framework .ExpectNoError (err , "delete resource slices" )
788
+ })
789
+
790
+ // Eventually we should have all desired slices.
791
+ listSlices := framework .ListObjects (f .ClientSet .ResourceV1alpha3 ().ResourceSlices ().List , metav1.ListOptions {
792
+ FieldSelector : resourceapi .ResourceSliceSelectorDriver + "=" + driverName ,
793
+ })
794
+ gomega .Eventually (ctx , listSlices ).WithTimeout (time .Minute ).Should (gomega .HaveField ("Items" , gomega .HaveLen (numSlices )))
795
+
796
+ // Verify state.
797
+ expectSlices , err := listSlices (ctx )
798
+ framework .ExpectNoError (err )
799
+ gomega .Expect (expectSlices .Items ).ShouldNot (gomega .BeEmpty ())
800
+ framework .Logf ("Protobuf size of one slice is %d bytes = %d KB." , expectSlices .Items [0 ].Size (), expectSlices .Items [0 ].Size ()/ 1024 )
801
+ gomega .Expect (expectSlices .Items [0 ].Size ()).Should (gomega .BeNumerically (">=" , 600 * 1024 ), "ResourceSlice size" )
802
+ gomega .Expect (expectSlices .Items [0 ].Size ()).Should (gomega .BeNumerically ("<" , 1024 * 1024 ), "ResourceSlice size" )
803
+ expectStats := resourceslice.Stats {NumCreates : int64 (numSlices )}
804
+ gomega .Expect (controller .GetStats ()).Should (gomega .Equal (expectStats ))
805
+
806
+ // No further changes expected now, after after checking again.
807
+ gomega .Consistently (ctx , controller .GetStats ).WithTimeout (2 * mutationCacheTTL ).Should (gomega .Equal (expectStats ))
808
+
809
+ // Ask the controller to delete all slices except for one empty slice.
810
+ ginkgo .By ("Deleting slices" )
811
+ resources = resources .DeepCopy ()
812
+ resources .Pools [poolName ] = resourceslice.Pool {Slices : []resourceslice.Slice {{}}}
813
+ controller .Update (resources )
814
+
815
+ // One empty slice should remain, after removing the full ones and adding the empty one.
816
+ emptySlice := gomega .HaveField ("Spec.Devices" , gomega .BeEmpty ())
817
+ gomega .Eventually (ctx , listSlices ).WithTimeout (time .Minute ).Should (gomega .HaveField ("Items" , gomega .ConsistOf (emptySlice )))
818
+ expectStats = resourceslice.Stats {NumCreates : int64 (numSlices ) + 1 , NumDeletes : int64 (numSlices )}
819
+ gomega .Consistently (ctx , controller .GetStats ).WithTimeout (2 * mutationCacheTTL ).Should (gomega .Equal (expectStats ))
820
+ })
821
+ })
822
+
730
823
ginkgo .Context ("cluster" , func () {
731
824
nodes := NewNodes (f , 1 , 1 )
732
825
driver := NewDriver (f , nodes , networkResources )
0 commit comments