Skip to content

Commit 814a6f2

Browse files
committed
remove FakeVolumeBinderConfig and test new statues and states
1 parent c413836 commit 814a6f2

File tree

3 files changed

+246
-53
lines changed

3 files changed

+246
-53
lines changed

pkg/controller/volume/scheduling/scheduler_binder.go

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -256,8 +256,6 @@ func (b *volumeBinder) FindPodVolumes(pod *v1.Pod, boundClaims, claimsToBind []*
256256

257257
// Check PV node affinity on bound volumes
258258
if len(boundClaims) > 0 {
259-
// TODO if node affinity does not match, we should
260-
// UnschedulableAndUnresolvable error back to scheduler framework
261259
boundVolumesSatisfied, err = b.checkBoundClaims(boundClaims, node, podName)
262260
if err != nil {
263261
return nil, err

pkg/scheduler/framework/plugins/volumebinding/BUILD

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -36,8 +36,15 @@ go_test(
3636
srcs = ["volume_binding_test.go"],
3737
embed = [":go_default_library"],
3838
deps = [
39+
"//pkg/controller/volume/persistentvolume/util:go_default_library",
3940
"//pkg/controller/volume/scheduling:go_default_library",
41+
"//pkg/scheduler/apis/config:go_default_library",
4042
"//pkg/scheduler/framework/v1alpha1:go_default_library",
4143
"//staging/src/k8s.io/api/core/v1:go_default_library",
44+
"//staging/src/k8s.io/api/storage/v1:go_default_library",
45+
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
46+
"//staging/src/k8s.io/client-go/informers:go_default_library",
47+
"//staging/src/k8s.io/client-go/kubernetes/fake:go_default_library",
48+
"//vendor/k8s.io/utils/pointer:go_default_library",
4249
],
4350
)

pkg/scheduler/framework/plugins/volumebinding/volume_binding_test.go

Lines changed: 239 additions & 51 deletions
Original file line numberDiff line numberDiff line change
@@ -18,99 +18,287 @@ package volumebinding
1818

1919
import (
2020
"context"
21-
"fmt"
2221
"reflect"
2322
"testing"
2423

2524
v1 "k8s.io/api/core/v1"
25+
storagev1 "k8s.io/api/storage/v1"
26+
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
27+
"k8s.io/client-go/informers"
28+
"k8s.io/client-go/kubernetes/fake"
29+
pvutil "k8s.io/kubernetes/pkg/controller/volume/persistentvolume/util"
2630
"k8s.io/kubernetes/pkg/controller/volume/scheduling"
31+
"k8s.io/kubernetes/pkg/scheduler/apis/config"
2732
framework "k8s.io/kubernetes/pkg/scheduler/framework/v1alpha1"
33+
"k8s.io/utils/pointer"
2834
)
2935

30-
func TestVolumeBinding(t *testing.T) {
31-
findErr := fmt.Errorf("find err")
32-
volState := v1.PodSpec{
33-
Volumes: []v1.Volume{
34-
{
35-
VolumeSource: v1.VolumeSource{
36-
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{},
36+
var (
37+
immediate = storagev1.VolumeBindingImmediate
38+
waitForFirstConsumer = storagev1.VolumeBindingWaitForFirstConsumer
39+
immediateSC = &storagev1.StorageClass{
40+
ObjectMeta: metav1.ObjectMeta{
41+
Name: "immediate-sc",
42+
},
43+
VolumeBindingMode: &immediate,
44+
}
45+
waitSC = &storagev1.StorageClass{
46+
ObjectMeta: metav1.ObjectMeta{
47+
Name: "wait-sc",
48+
},
49+
VolumeBindingMode: &waitForFirstConsumer,
50+
}
51+
)
52+
53+
func makePV(name string) *v1.PersistentVolume {
54+
return &v1.PersistentVolume{
55+
ObjectMeta: metav1.ObjectMeta{
56+
Name: name,
57+
},
58+
}
59+
}
60+
61+
func addPVNodeAffinity(pv *v1.PersistentVolume, volumeNodeAffinity *v1.VolumeNodeAffinity) *v1.PersistentVolume {
62+
pv.Spec.NodeAffinity = volumeNodeAffinity
63+
return pv
64+
}
65+
66+
func makePVC(name string, boundPVName string, storageClassName string) *v1.PersistentVolumeClaim {
67+
pvc := &v1.PersistentVolumeClaim{
68+
ObjectMeta: metav1.ObjectMeta{
69+
Name: name,
70+
Namespace: v1.NamespaceDefault,
71+
},
72+
Spec: v1.PersistentVolumeClaimSpec{
73+
StorageClassName: pointer.StringPtr(storageClassName),
74+
},
75+
}
76+
if boundPVName != "" {
77+
pvc.Spec.VolumeName = boundPVName
78+
metav1.SetMetaDataAnnotation(&pvc.ObjectMeta, pvutil.AnnBindCompleted, "true")
79+
}
80+
return pvc
81+
}
82+
83+
func makePod(name string, pvcNames []string) *v1.Pod {
84+
p := &v1.Pod{
85+
ObjectMeta: metav1.ObjectMeta{
86+
Name: name,
87+
Namespace: v1.NamespaceDefault,
88+
},
89+
}
90+
p.Spec.Volumes = make([]v1.Volume, 0)
91+
for _, pvcName := range pvcNames {
92+
p.Spec.Volumes = append(p.Spec.Volumes, v1.Volume{
93+
VolumeSource: v1.VolumeSource{
94+
PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
95+
ClaimName: pvcName,
3796
},
3897
},
39-
},
98+
})
4099
}
100+
return p
101+
}
102+
103+
func TestVolumeBinding(t *testing.T) {
41104
table := []struct {
42-
name string
43-
pod *v1.Pod
44-
node *v1.Node
45-
volumeBinderConfig *scheduling.FakeVolumeBinderConfig
46-
wantStatus *framework.Status
105+
name string
106+
pod *v1.Pod
107+
node *v1.Node
108+
pvcs []*v1.PersistentVolumeClaim
109+
pvs []*v1.PersistentVolume
110+
wantPreFilterStatus *framework.Status
111+
wantStateAfterPreFilter *stateData
112+
wantFilterStatus *framework.Status
47113
}{
48114
{
49-
name: "nothing",
50-
pod: &v1.Pod{},
51-
node: &v1.Node{},
52-
wantStatus: nil,
115+
name: "pod has not pvcs",
116+
pod: makePod("pod-a", nil),
117+
node: &v1.Node{},
118+
wantStateAfterPreFilter: &stateData{
119+
skip: true,
120+
},
53121
},
54122
{
55123
name: "all bound",
56-
pod: &v1.Pod{Spec: volState},
124+
pod: makePod("pod-a", []string{"pvc-a"}),
125+
node: &v1.Node{},
126+
pvcs: []*v1.PersistentVolumeClaim{
127+
makePVC("pvc-a", "pv-a", waitSC.Name),
128+
},
129+
pvs: []*v1.PersistentVolume{
130+
makePV("pv-a"),
131+
},
132+
wantStateAfterPreFilter: &stateData{
133+
boundClaims: []*v1.PersistentVolumeClaim{
134+
makePVC("pvc-a", "pv-a", waitSC.Name),
135+
},
136+
claimsToBind: []*v1.PersistentVolumeClaim{},
137+
},
138+
},
139+
{
140+
name: "immediate claims not bound",
141+
pod: makePod("pod-a", []string{"pvc-a"}),
57142
node: &v1.Node{},
58-
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
59-
AllBound: true,
143+
pvcs: []*v1.PersistentVolumeClaim{
144+
makePVC("pvc-a", "", immediateSC.Name),
60145
},
61-
wantStatus: nil,
146+
wantPreFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, "pod has unbound immediate PersistentVolumeClaims"),
62147
},
63148
{
64-
name: "unbound/no matches",
65-
pod: &v1.Pod{Spec: volState},
149+
name: "unbound claims no matches",
150+
pod: makePod("pod-a", []string{"pvc-a"}),
66151
node: &v1.Node{},
67-
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
68-
FindReasons: []scheduling.ConflictReason{scheduling.ErrReasonBindConflict},
152+
pvcs: []*v1.PersistentVolumeClaim{
153+
makePVC("pvc-a", "", waitSC.Name),
69154
},
70-
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonBindConflict)),
155+
wantStateAfterPreFilter: &stateData{
156+
boundClaims: []*v1.PersistentVolumeClaim{},
157+
claimsToBind: []*v1.PersistentVolumeClaim{
158+
makePVC("pvc-a", "", waitSC.Name),
159+
},
160+
},
161+
wantFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonBindConflict)),
71162
},
72163
{
73164
name: "bound and unbound unsatisfied",
74-
pod: &v1.Pod{Spec: volState},
75-
node: &v1.Node{},
76-
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
77-
FindReasons: []scheduling.ConflictReason{scheduling.ErrReasonBindConflict, scheduling.ErrReasonNodeConflict},
165+
pod: makePod("pod-a", []string{"pvc-a", "pvc-b"}),
166+
node: &v1.Node{
167+
ObjectMeta: metav1.ObjectMeta{
168+
Labels: map[string]string{
169+
"foo": "barbar",
170+
},
171+
},
78172
},
79-
wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonBindConflict), string(scheduling.ErrReasonNodeConflict)),
173+
pvcs: []*v1.PersistentVolumeClaim{
174+
makePVC("pvc-a", "pv-a", waitSC.Name),
175+
makePVC("pvc-b", "", waitSC.Name),
176+
},
177+
pvs: []*v1.PersistentVolume{
178+
addPVNodeAffinity(makePV("pv-a"), &v1.VolumeNodeAffinity{
179+
Required: &v1.NodeSelector{
180+
NodeSelectorTerms: []v1.NodeSelectorTerm{
181+
{
182+
MatchExpressions: []v1.NodeSelectorRequirement{
183+
{
184+
Key: "foo",
185+
Operator: v1.NodeSelectorOpIn,
186+
Values: []string{"bar"},
187+
},
188+
},
189+
},
190+
},
191+
},
192+
}),
193+
},
194+
wantStateAfterPreFilter: &stateData{
195+
boundClaims: []*v1.PersistentVolumeClaim{
196+
makePVC("pvc-a", "pv-a", waitSC.Name),
197+
},
198+
claimsToBind: []*v1.PersistentVolumeClaim{
199+
makePVC("pvc-b", "", waitSC.Name),
200+
},
201+
},
202+
wantFilterStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, string(scheduling.ErrReasonNodeConflict), string(scheduling.ErrReasonBindConflict)),
80203
},
81204
{
82-
name: "unbound/found matches/bind succeeds",
83-
pod: &v1.Pod{Spec: volState},
84-
node: &v1.Node{},
85-
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{},
86-
wantStatus: nil,
205+
name: "pvc not found",
206+
pod: makePod("pod-a", []string{"pvc-a"}),
207+
node: &v1.Node{},
208+
wantPreFilterStatus: framework.NewStatus(framework.Error, `error getting PVC "default/pvc-a": could not find v1.PersistentVolumeClaim "default/pvc-a"`),
209+
wantFilterStatus: nil,
87210
},
88211
{
89-
name: "predicate error",
90-
pod: &v1.Pod{Spec: volState},
212+
name: "pv not found",
213+
pod: makePod("pod-a", []string{"pvc-a"}),
91214
node: &v1.Node{},
92-
volumeBinderConfig: &scheduling.FakeVolumeBinderConfig{
93-
FindErr: findErr,
215+
pvcs: []*v1.PersistentVolumeClaim{
216+
makePVC("pvc-a", "pv-a", waitSC.Name),
217+
},
218+
wantPreFilterStatus: nil,
219+
wantStateAfterPreFilter: &stateData{
220+
boundClaims: []*v1.PersistentVolumeClaim{
221+
makePVC("pvc-a", "pv-a", waitSC.Name),
222+
},
223+
claimsToBind: []*v1.PersistentVolumeClaim{},
94224
},
95-
wantStatus: framework.NewStatus(framework.Error, findErr.Error()),
225+
wantFilterStatus: framework.NewStatus(framework.Error, `could not find v1.PersistentVolume "pv-a"`),
96226
},
97227
}
98228

99229
for _, item := range table {
100230
t.Run(item.name, func(t *testing.T) {
231+
ctx := context.Background()
232+
client := fake.NewSimpleClientset()
233+
informerFactory := informers.NewSharedInformerFactory(client, 0)
234+
opts := []framework.Option{
235+
framework.WithClientSet(client),
236+
framework.WithInformerFactory(informerFactory),
237+
}
238+
fh, err := framework.NewFramework(nil, nil, nil, opts...)
239+
if err != nil {
240+
t.Fatal(err)
241+
}
242+
pl, err := New(&config.VolumeBindingArgs{
243+
BindTimeoutSeconds: 300,
244+
}, fh)
245+
if err != nil {
246+
t.Fatal(err)
247+
}
248+
249+
// Start informer factory after initialization
250+
informerFactory.Start(ctx.Done())
251+
252+
// Feed testing data and wait for them to be synced
253+
client.StorageV1().StorageClasses().Create(ctx, immediateSC, metav1.CreateOptions{})
254+
client.StorageV1().StorageClasses().Create(ctx, waitSC, metav1.CreateOptions{})
255+
if item.node != nil {
256+
client.CoreV1().Nodes().Create(ctx, item.node, metav1.CreateOptions{})
257+
}
258+
if len(item.pvcs) > 0 {
259+
for _, pvc := range item.pvcs {
260+
client.CoreV1().PersistentVolumeClaims(pvc.Namespace).Create(ctx, pvc, metav1.CreateOptions{})
261+
}
262+
}
263+
if len(item.pvs) > 0 {
264+
for _, pv := range item.pvs {
265+
client.CoreV1().PersistentVolumes().Create(ctx, pv, metav1.CreateOptions{})
266+
}
267+
}
268+
caches := informerFactory.WaitForCacheSync(ctx.Done())
269+
for _, synced := range caches {
270+
if !synced {
271+
t.Errorf("error waiting for informer cache sync")
272+
}
273+
}
274+
275+
// Verify
276+
p := pl.(*VolumeBinding)
101277
nodeInfo := framework.NewNodeInfo()
102278
nodeInfo.SetNode(item.node)
103-
fakeVolumeBinder := scheduling.NewFakeVolumeBinder(item.volumeBinderConfig)
104-
p := &VolumeBinding{
105-
Binder: fakeVolumeBinder,
106-
}
107279
state := framework.NewCycleState()
108-
p.PreFilter(context.Background(), state, item.pod)
109-
gotStatus := p.Filter(context.Background(), state, item.pod, nodeInfo)
110-
if !reflect.DeepEqual(gotStatus, item.wantStatus) {
111-
t.Errorf("status does not match: %v, want: %v", gotStatus, item.wantStatus)
280+
t.Logf("call PreFilter and check status")
281+
gotPreFilterStatus := p.PreFilter(ctx, state, item.pod)
282+
if !reflect.DeepEqual(gotPreFilterStatus, item.wantPreFilterStatus) {
283+
t.Errorf("filter prefilter status does not match: %v, want: %v", gotPreFilterStatus, item.wantPreFilterStatus)
284+
}
285+
if !gotPreFilterStatus.IsSuccess() {
286+
// scheduler framework will skip Filter if PreFilter fails
287+
return
288+
}
289+
t.Logf("check state after prefilter phase")
290+
stateData, err := getStateData(state)
291+
if err != nil {
292+
t.Fatal(err)
293+
}
294+
if !reflect.DeepEqual(stateData, item.wantStateAfterPreFilter) {
295+
t.Errorf("state got after prefilter does not match: %v, want: %v", stateData, item.wantStateAfterPreFilter)
296+
}
297+
t.Logf("call Filter and check status")
298+
gotStatus := p.Filter(ctx, state, item.pod, nodeInfo)
299+
if !reflect.DeepEqual(gotStatus, item.wantFilterStatus) {
300+
t.Errorf("filter status does not match: %v, want: %v", gotStatus, item.wantFilterStatus)
112301
}
113-
114302
})
115303
}
116304
}

0 commit comments

Comments
 (0)