Skip to content

Commit 35029fe

Browse files
committed
Add PredicateSnapshot benchmark for scheduling with DRA objects.
1 parent 3fdf196 commit 35029fe

File tree

2 files changed

+403
-2
lines changed

2 files changed

+403
-2
lines changed
Lines changed: 397 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,397 @@
1+
/*
2+
Copyright 2025 The Kubernetes Authors.
3+
4+
Licensed under the Apache License, Version 2.0 (the "License");
5+
you may not use this file except in compliance with the License.
6+
You may obtain a copy of the License at
7+
8+
http://www.apache.org/licenses/LICENSE-2.0
9+
10+
Unless required by applicable law or agreed to in writing, software
11+
distributed under the License is distributed on an "AS IS" BASIS,
12+
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13+
See the License for the specific language governing permissions and
14+
limitations under the License.
15+
*/
16+
17+
package predicate
18+
19+
import (
20+
"fmt"
21+
"testing"
22+
23+
"github.com/google/uuid"
24+
apiv1 "k8s.io/api/core/v1"
25+
resourceapi "k8s.io/api/resource/v1beta1"
26+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27+
"k8s.io/apimachinery/pkg/types"
28+
"k8s.io/apiserver/pkg/util/feature"
29+
drasnapshot "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/snapshot"
30+
drautils "k8s.io/autoscaler/cluster-autoscaler/simulator/dynamicresources/utils"
31+
"k8s.io/autoscaler/cluster-autoscaler/simulator/framework"
32+
featuretesting "k8s.io/component-base/featuregate/testing"
33+
"k8s.io/kubernetes/pkg/features"
34+
35+
. "k8s.io/autoscaler/cluster-autoscaler/utils/test"
36+
)
37+
38+
func createTestResourceSlice(nodeName string, devicesPerSlice int, slicesPerNode int, driver string, device resourceapi.BasicDevice) *resourceapi.ResourceSlice {
39+
sliceId := uuid.New().String()
40+
name := fmt.Sprintf("rs-%s", sliceId)
41+
uid := types.UID(fmt.Sprintf("rs-%s-uid", sliceId))
42+
devices := make([]resourceapi.Device, devicesPerSlice)
43+
for deviceIndex := 0; deviceIndex < devicesPerSlice; deviceIndex++ {
44+
deviceName := fmt.Sprintf("rs-dev-%s-%d", sliceId, deviceIndex)
45+
deviceCopy := device
46+
devices[deviceIndex] = resourceapi.Device{Name: deviceName, Basic: &deviceCopy}
47+
}
48+
49+
return &resourceapi.ResourceSlice{
50+
ObjectMeta: metav1.ObjectMeta{Name: name, UID: uid},
51+
Spec: resourceapi.ResourceSliceSpec{
52+
NodeName: nodeName,
53+
Driver: driver,
54+
Pool: resourceapi.ResourcePool{
55+
Name: nodeName,
56+
ResourceSliceCount: int64(slicesPerNode),
57+
},
58+
Devices: devices,
59+
},
60+
}
61+
}
62+
63+
func createTestResourceClaim(requestsPerClaim int, devicesPerRequest int, driver string, deviceClass string) *resourceapi.ResourceClaim {
64+
claimId := uuid.New().String()
65+
name := fmt.Sprintf("claim-%s", claimId)
66+
uid := types.UID(fmt.Sprintf("claim-%s-uid", claimId))
67+
expression := fmt.Sprintf(`device.driver == "%s"`, driver)
68+
69+
requests := make([]resourceapi.DeviceRequest, requestsPerClaim)
70+
for requestIndex := 0; requestIndex < requestsPerClaim; requestIndex++ {
71+
requests[requestIndex] = resourceapi.DeviceRequest{
72+
Name: fmt.Sprintf("deviceRequest-%d", requestIndex),
73+
DeviceClassName: deviceClass,
74+
Selectors: []resourceapi.DeviceSelector{{CEL: &resourceapi.CELDeviceSelector{Expression: expression}}},
75+
AllocationMode: resourceapi.DeviceAllocationModeExactCount,
76+
Count: int64(devicesPerRequest),
77+
}
78+
}
79+
80+
return &resourceapi.ResourceClaim{
81+
ObjectMeta: metav1.ObjectMeta{Name: name, UID: uid, Namespace: "default"},
82+
Spec: resourceapi.ResourceClaimSpec{
83+
Devices: resourceapi.DeviceClaim{Requests: requests},
84+
},
85+
}
86+
}
87+
88+
// allocateResourceSlicesForClaim attempts to allocate devices from the provided ResourceSlices
89+
// to satisfy the requests in the given ResourceClaim. It iterates through the claim's device
90+
// requests and, for each request, tries to find enough available devices in the provided slices.
91+
//
92+
// The function returns a new ResourceClaim object with the allocation result (if successful)
93+
// and a boolean indicating whether all requests in the claim were satisfied.
94+
//
95+
// If not all requests can be satisfied with the given slices, the returned ResourceClaim will
96+
// have a partial or empty allocation, and the boolean will be false.
97+
// The original ResourceClaim object is not modified.
98+
func allocateResourceSlicesForClaim(claim *resourceapi.ResourceClaim, nodeName string, slices ...*resourceapi.ResourceSlice) (*resourceapi.ResourceClaim, bool) {
99+
allocatedDevices := make([]resourceapi.DeviceRequestAllocationResult, 0, len(claim.Spec.Devices.Requests))
100+
sliceIndex, deviceIndex := 0, 0
101+
requestSatisfied := true
102+
103+
allocationLoop:
104+
for _, request := range claim.Spec.Devices.Requests {
105+
for devicesRequired := request.Count; devicesRequired > 0; devicesRequired-- {
106+
// Skipping resource slices until we find one with at least a single device available
107+
for sliceIndex < len(slices) && deviceIndex >= len(slices[sliceIndex].Spec.Devices) {
108+
sliceIndex++
109+
deviceIndex = 0
110+
}
111+
112+
// In case in the previous look we weren't able to find a resource slice containing
113+
// at least a single device and there's still device request pending from resource
114+
// claim - terminate allocation loop and indicate the request wasn't fully satisfied
115+
if sliceIndex >= len(slices) {
116+
requestSatisfied = false
117+
break allocationLoop
118+
}
119+
120+
slice := slices[sliceIndex]
121+
device := slice.Spec.Devices[deviceIndex]
122+
deviceAllocation := resourceapi.DeviceRequestAllocationResult{
123+
Request: request.Name,
124+
Driver: slice.Spec.Driver,
125+
Pool: slice.Spec.Pool.Name,
126+
Device: device.Name,
127+
}
128+
129+
allocatedDevices = append(allocatedDevices, deviceAllocation)
130+
deviceIndex++
131+
}
132+
}
133+
134+
allocation := &resourceapi.AllocationResult{
135+
NodeSelector: selectorForNode(nodeName),
136+
Devices: resourceapi.DeviceAllocationResult{Results: allocatedDevices},
137+
}
138+
139+
return drautils.TestClaimWithAllocation(claim, allocation), requestSatisfied
140+
}
141+
142+
func selectorForNode(node string) *apiv1.NodeSelector {
143+
return &apiv1.NodeSelector{
144+
NodeSelectorTerms: []apiv1.NodeSelectorTerm{
145+
{
146+
MatchFields: []apiv1.NodeSelectorRequirement{
147+
{
148+
Key: "metadata.name",
149+
Operator: apiv1.NodeSelectorOpIn,
150+
Values: []string{node},
151+
},
152+
},
153+
},
154+
},
155+
}
156+
}
157+
158+
// BenchmarkScheduleRevert measures the performance of scheduling pods which interact with Dynamic Resources Allocation
159+
// API onto nodes within a cluster snapshot, followed by snapshot manipulation operations (fork, commit, revert).
160+
//
161+
// The benchmark iterates tests for various configurations, varying:
162+
// - The number of nodes in the initial snapshot.
163+
// - The number of pods being scheduled, categorized by whether they use shared or pod-owned ResourceClaims.
164+
// - The number of snapshot operations (Fork, Commit, Revert) performed before/after scheduling.
165+
//
166+
// For each configuration and snapshot type, the benchmark performs the following steps:
167+
// 1. Initializes a cluster snapshot with a predefined set of nodes, ResourceSlices, DeviceClasses, and pre-allocated ResourceClaims (both shared and potentially pod-owned).
168+
// 2. Iterates through a subset of the nodes based on the configuration.
169+
// 3. For each node:
170+
// a. Performs the configured number of snapshot Forks.
171+
// b. Adds the node's NodeInfo (including its ResourceSlices) to the snapshot.
172+
// c. Schedules a configured number of pods that reference a shared ResourceClaim onto the node.
173+
// d. Schedules a configured number of pods that reference their own pre-allocated pod-owned ResourceClaims onto the node.
174+
// e. Performs the configured number of snapshot Commits.
175+
// f. Performs the configured number of snapshot Reverts.
176+
//
177+
// This benchmark helps evaluate the efficiency of:
178+
// - Scheduling pods with different types of DRA claims.
179+
// - Adding nodes with DRA resources to the snapshot.
180+
// - The overhead of snapshot Fork, Commit, and Revert operations, especially in scenarios involving DRA objects.
181+
func BenchmarkScheduleRevert(b *testing.B) {
182+
featuretesting.SetFeatureGateDuringTest(b, feature.DefaultFeatureGate, features.DynamicResourceAllocation, true)
183+
184+
const maxNodesCount = 100
185+
const devicesPerSlice = 100
186+
const maxPodsCount = 100
187+
const deviceClassName = "defaultClass"
188+
const driverName = "driver.foo.com"
189+
190+
configurations := map[string]struct {
191+
nodesCount int
192+
193+
sharedClaimPods int
194+
ownedClaimPods int
195+
forks int
196+
commits int
197+
reverts int
198+
}{
199+
// SHARED CLAIMS
200+
"100x32/SharedClaims/ForkRevert": {sharedClaimPods: 32, nodesCount: 100, forks: 1, reverts: 1},
201+
"100x32/SharedClaims/ForkCommit": {sharedClaimPods: 32, nodesCount: 100, forks: 1, commits: 1},
202+
"100x32/SharedClaims/ForkForkCommitRevert": {sharedClaimPods: 32, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
203+
"100x32/SharedClaims/Fork": {sharedClaimPods: 32, nodesCount: 100, forks: 1},
204+
"100x32/SharedClaims/Fork5Revert5": {sharedClaimPods: 32, nodesCount: 100, forks: 5, reverts: 5},
205+
"100x1/SharedClaims/ForkRevert": {sharedClaimPods: 1, nodesCount: 100, forks: 1, reverts: 1},
206+
"100x1/SharedClaims/ForkCommit": {sharedClaimPods: 1, nodesCount: 100, forks: 1, commits: 1},
207+
"100x1/SharedClaims/ForkForkCommitRevert": {sharedClaimPods: 1, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
208+
"100x1/SharedClaims/Fork": {sharedClaimPods: 1, nodesCount: 100, forks: 1},
209+
"100x1/SharedClaims/Fork5Revert5": {sharedClaimPods: 1, nodesCount: 100, forks: 5, reverts: 5},
210+
"10x32/SharedClaims/ForkRevert": {sharedClaimPods: 32, nodesCount: 10, forks: 1, reverts: 1},
211+
"10x32/SharedClaims/ForkCommit": {sharedClaimPods: 32, nodesCount: 10, forks: 1, commits: 1},
212+
"10x32/SharedClaims/ForkForkCommitRevert": {sharedClaimPods: 32, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
213+
"10x32/SharedClaims/Fork": {sharedClaimPods: 32, nodesCount: 10, forks: 1},
214+
"10x32/SharedClaims/Fork5Revert5": {sharedClaimPods: 32, nodesCount: 10, forks: 5, reverts: 5},
215+
"10x1/SharedClaims/ForkRevert": {sharedClaimPods: 1, nodesCount: 10, forks: 1, reverts: 1},
216+
"10x1/SharedClaims/ForkCommit": {sharedClaimPods: 1, nodesCount: 10, forks: 1, commits: 1},
217+
"10x1/SharedClaims/ForkForkCommitRevert": {sharedClaimPods: 1, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
218+
"10x1/SharedClaims/Fork": {sharedClaimPods: 1, nodesCount: 10, forks: 1},
219+
"10x1/SharedClaims/Fork5Revert5": {sharedClaimPods: 1, nodesCount: 10, forks: 5, reverts: 5},
220+
// POD OWNED CLAIMS
221+
"100x100/OwnedClaims/ForkRevert": {ownedClaimPods: 100, nodesCount: 100, forks: 1, reverts: 1},
222+
"100x100/OwnedClaims/ForkCommit": {ownedClaimPods: 100, nodesCount: 100, forks: 1, commits: 1},
223+
"100x100/OwnedClaims/ForkForkCommitRevert": {ownedClaimPods: 100, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
224+
"100x100/OwnedClaims/Fork": {ownedClaimPods: 100, nodesCount: 100, forks: 1},
225+
"100x100/OwnedClaims/Fork5Revert5": {ownedClaimPods: 100, nodesCount: 100, forks: 5, reverts: 5},
226+
"100х1/OwnedClaims/ForkRevert": {ownedClaimPods: 1, nodesCount: 100, forks: 1, reverts: 1},
227+
"100x1/OwnedClaims/ForkCommit": {ownedClaimPods: 1, nodesCount: 100, forks: 1, commits: 1},
228+
"100x1/OwnedClaims/ForkForkCommitRevert": {ownedClaimPods: 1, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
229+
"100x1/OwnedClaims/Fork": {ownedClaimPods: 1, nodesCount: 100, forks: 1},
230+
"100x1/OwnedClaims/Fork5Revert5": {ownedClaimPods: 1, nodesCount: 100, forks: 5, reverts: 5},
231+
"10x100/OwnedClaims/ForkRevert": {ownedClaimPods: 100, nodesCount: 10, forks: 1, reverts: 1},
232+
"10x100/OwnedClaims/ForkCommit": {ownedClaimPods: 100, nodesCount: 10, forks: 1, commits: 1},
233+
"10x100/OwnedClaims/ForkForkCommitRevert": {ownedClaimPods: 100, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
234+
"10x100/OwnedClaims/Fork": {ownedClaimPods: 100, nodesCount: 10, forks: 1},
235+
"10x100/OwnedClaims/Fork5Revert5": {ownedClaimPods: 100, nodesCount: 10, forks: 5, reverts: 5},
236+
"10х1/OwnedClaims/ForkRevert": {ownedClaimPods: 1, nodesCount: 10, forks: 1, reverts: 1},
237+
"10x1/OwnedClaims/ForkCommit": {ownedClaimPods: 1, nodesCount: 10, forks: 1, commits: 1},
238+
"10x1/OwnedClaims/ForkForkCommitRevert": {ownedClaimPods: 1, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
239+
"10x1/OwnedClaims/Fork": {ownedClaimPods: 1, nodesCount: 10, forks: 1},
240+
"10x1/OwnedClaims/Fork5Revert5": {ownedClaimPods: 1, nodesCount: 10, forks: 5, reverts: 5},
241+
// MIXED CLAIMS
242+
"100x32x50/MixedClaims/ForkRevert": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 1, reverts: 1},
243+
"100x32x50/MixedClaims/ForkCommit": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 1, commits: 1},
244+
"100x32x50/MixedClaims/ForkForkCommitRevert": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
245+
"100x32x50/MixedClaims/Fork": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 1},
246+
"100x32x50/MixedClaims/Fork5Revert5": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 100, forks: 5, reverts: 5},
247+
"100x1x1/MixedClaims/ForkRevert": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 1, reverts: 1},
248+
"100x1x1/MixedClaims/ForkCommit": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 1, commits: 1},
249+
"100x1x1/MixedClaims/ForkForkCommitRevert": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 2, reverts: 1, commits: 1},
250+
"100x1x1/MixedClaims/Fork": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 1},
251+
"100x1x1/MixedClaims/Fork5Revert5": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 100, forks: 5, reverts: 5},
252+
"10x32x50/MixedClaims/ForkRevert": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 1, reverts: 1},
253+
"10x32x50/MixedClaims/ForkCommit": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 1, commits: 1},
254+
"10x32x50/MixedClaims/ForkForkCommitRevert": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
255+
"10x32x50/MixedClaims/Fork": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 1},
256+
"10x32x50/MixedClaims/Fork5Revert5": {sharedClaimPods: 32, ownedClaimPods: 50, nodesCount: 10, forks: 5, reverts: 5},
257+
"10x1x1/MixedClaims/ForkRevert": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 1, reverts: 1},
258+
"10x1x1/MixedClaims/ForkCommit": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 1, commits: 1},
259+
"10x1x1/MixedClaims/ForkForkCommitRevert": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 2, reverts: 1, commits: 1},
260+
"10x1x1/MixedClaims/Fork": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 1},
261+
"10x1x1/MixedClaims/Fork5Revert5": {sharedClaimPods: 1, ownedClaimPods: 1, nodesCount: 10, forks: 5, reverts: 5},
262+
}
263+
264+
devicesClasses := map[string]*resourceapi.DeviceClass{
265+
deviceClassName: {ObjectMeta: metav1.ObjectMeta{Name: deviceClassName, UID: "defaultClassUid"}},
266+
}
267+
268+
nodeInfos := make([]*framework.NodeInfo, maxNodesCount)
269+
sharedClaims := make([]*resourceapi.ResourceClaim, maxNodesCount)
270+
ownedClaims := make([][]*resourceapi.ResourceClaim, maxNodesCount)
271+
owningPods := make([][]*apiv1.Pod, maxNodesCount)
272+
for nodeIndex := 0; nodeIndex < maxNodesCount; nodeIndex++ {
273+
nodeName := fmt.Sprintf("node-%d", nodeIndex)
274+
node := BuildTestNode(nodeName, 10000, 10000)
275+
nodeSlice := createTestResourceSlice(node.Name, devicesPerSlice, 1, driverName, resourceapi.BasicDevice{})
276+
nodeInfo := framework.NewNodeInfo(node, []*resourceapi.ResourceSlice{nodeSlice})
277+
278+
sharedClaim := createTestResourceClaim(devicesPerSlice, 1, driverName, deviceClassName)
279+
sharedClaim, satisfied := allocateResourceSlicesForClaim(sharedClaim, nodeName, nodeSlice)
280+
if !satisfied {
281+
b.Errorf("Error during setup, claim allocation cannot be satistied")
282+
}
283+
284+
claimsOnNode := make([]*resourceapi.ResourceClaim, maxPodsCount)
285+
podsOnNode := make([]*apiv1.Pod, maxPodsCount)
286+
for podIndex := 0; podIndex < maxPodsCount; podIndex++ {
287+
podName := fmt.Sprintf("pod-%d-%d", nodeIndex, podIndex)
288+
ownedClaim := createTestResourceClaim(1, 1, driverName, deviceClassName)
289+
pod := BuildTestPod(
290+
podName,
291+
1,
292+
1,
293+
WithResourceClaim(ownedClaim.Name, ownedClaim.Name, ""),
294+
)
295+
296+
ownedClaim = drautils.TestClaimWithPodOwnership(pod, ownedClaim)
297+
ownedClaim, satisfied := allocateResourceSlicesForClaim(ownedClaim, nodeName, nodeSlice)
298+
if !satisfied {
299+
b.Errorf("Error during setup, claim allocation cannot be satistied")
300+
}
301+
302+
podsOnNode[podIndex] = pod
303+
claimsOnNode[podIndex] = ownedClaim
304+
}
305+
306+
nodeInfos[nodeIndex] = nodeInfo
307+
sharedClaims[nodeIndex] = sharedClaim
308+
ownedClaims[nodeIndex] = claimsOnNode
309+
owningPods[nodeIndex] = podsOnNode
310+
}
311+
312+
b.ResetTimer()
313+
for snapshotName, snapshotFactory := range snapshots {
314+
b.Run(snapshotName, func(b *testing.B) {
315+
for cfgName, cfg := range configurations {
316+
b.Run(cfgName, func(b *testing.B) {
317+
for i := 0; i < b.N; i++ {
318+
snapshot, err := snapshotFactory()
319+
if err != nil {
320+
b.Errorf("Failed to create a snapshot: %v", err)
321+
}
322+
323+
draSnapshot := drasnapshot.NewSnapshot(
324+
nil,
325+
nil,
326+
nil,
327+
devicesClasses,
328+
)
329+
330+
draSnapshot.AddClaims(sharedClaims)
331+
for nodeIndex := 0; nodeIndex < cfg.nodesCount; nodeIndex++ {
332+
draSnapshot.AddClaims(ownedClaims[nodeIndex])
333+
}
334+
335+
err = snapshot.SetClusterState(nil, nil, draSnapshot)
336+
if err != nil {
337+
b.Errorf("Failed to set cluster state: %v", err)
338+
}
339+
340+
for nodeIndex := 0; nodeIndex < cfg.nodesCount; nodeIndex++ {
341+
nodeInfo := nodeInfos[nodeIndex]
342+
for i := 0; i < cfg.forks; i++ {
343+
snapshot.Fork()
344+
}
345+
346+
err := snapshot.AddNodeInfo(nodeInfo)
347+
if err != nil {
348+
b.Errorf("Failed to add node info to snapshot: %v", err)
349+
}
350+
351+
sharedClaim := sharedClaims[nodeIndex]
352+
for podIndex := 0; podIndex < cfg.sharedClaimPods; podIndex++ {
353+
pod := BuildTestPod(
354+
fmt.Sprintf("pod-%d", podIndex),
355+
1,
356+
1,
357+
WithResourceClaim(sharedClaim.Name, sharedClaim.Name, ""),
358+
)
359+
360+
err := snapshot.SchedulePod(pod, nodeInfo.Node().Name)
361+
if err != nil {
362+
b.Errorf(
363+
"Failed to schedule a pod %s to node %s: %v",
364+
pod.Name,
365+
nodeInfo.Node().Name,
366+
err,
367+
)
368+
}
369+
}
370+
371+
for podIndex := 0; podIndex < cfg.ownedClaimPods; podIndex++ {
372+
owningPod := owningPods[nodeIndex][podIndex]
373+
err := snapshot.SchedulePod(owningPod, nodeInfo.Node().Name)
374+
if err != nil {
375+
b.Errorf(
376+
"Failed to schedule a pod %s to node %s: %v",
377+
owningPod.Name,
378+
nodeInfo.Node().Name,
379+
err,
380+
)
381+
}
382+
}
383+
384+
for i := 0; i < cfg.commits; i++ {
385+
snapshot.Commit()
386+
}
387+
388+
for i := 0; i < cfg.reverts; i++ {
389+
snapshot.Revert()
390+
}
391+
}
392+
}
393+
})
394+
}
395+
})
396+
}
397+
}

0 commit comments

Comments
 (0)