Skip to content

Commit dc2099e

Browse files
authored
Merge pull request kubernetes#87055 from denkensk/clean-unused-predicate-error
Cleanup unused predicate error types.
2 parents 942b526 + 0bab010 commit dc2099e

File tree

3 files changed

+25
-71
lines changed

3 files changed

+25
-71
lines changed

pkg/scheduler/algorithm/predicates/error.go

Lines changed: 2 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -37,35 +37,17 @@ var (
3737
ErrPodNotMatchHostName = NewPredicateFailureError("HostName", "node(s) didn't match the requested hostname")
3838
// ErrPodNotFitsHostPorts is used for PodFitsHostPorts predicate error.
3939
ErrPodNotFitsHostPorts = NewPredicateFailureError("PodFitsHostPorts", "node(s) didn't have free ports for the requested pod ports")
40-
// ErrNodeUnderMemoryPressure is used for NodeUnderMemoryPressure predicate error.
41-
ErrNodeUnderMemoryPressure = NewPredicateFailureError("NodeUnderMemoryPressure", "node(s) had memory pressure")
42-
// ErrNodeUnderDiskPressure is used for NodeUnderDiskPressure predicate error.
43-
ErrNodeUnderDiskPressure = NewPredicateFailureError("NodeUnderDiskPressure", "node(s) had disk pressure")
44-
// ErrNodeUnderPIDPressure is used for NodeUnderPIDPressure predicate error.
45-
ErrNodeUnderPIDPressure = NewPredicateFailureError("NodeUnderPIDPressure", "node(s) had pid pressure")
46-
// ErrNodeNotReady is used for NodeNotReady predicate error.
47-
ErrNodeNotReady = NewPredicateFailureError("NodeNotReady", "node(s) were not ready")
48-
// ErrNodeNetworkUnavailable is used for NodeNetworkUnavailable predicate error.
49-
ErrNodeNetworkUnavailable = NewPredicateFailureError("NodeNetworkUnavailable", "node(s) had unavailable network")
5040
// ErrNodeUnknownCondition is used for NodeUnknownCondition predicate error.
5141
ErrNodeUnknownCondition = NewPredicateFailureError("NodeUnknownCondition", "node(s) had unknown conditions")
52-
// ErrFakePredicate is used for test only. The fake predicates returning false also returns error
53-
// as ErrFakePredicate.
54-
ErrFakePredicate = NewPredicateFailureError("FakePredicateError", "Nodes failed the fake predicate")
5542
)
5643

5744
var unresolvablePredicateFailureErrors = map[PredicateFailureReason]struct{}{
5845
ErrNodeSelectorNotMatch: {},
5946
ErrPodNotMatchHostName: {},
6047
// Node conditions won't change when scheduler simulates removal of preemption victims.
6148
// So, it is pointless to try nodes that have not been able to host the pod due to node
62-
// conditions. These include ErrNodeNotReady, ErrNodeUnderPIDPressure, ErrNodeUnderMemoryPressure, ....
63-
ErrNodeNotReady: {},
64-
ErrNodeNetworkUnavailable: {},
65-
ErrNodeUnderDiskPressure: {},
66-
ErrNodeUnderPIDPressure: {},
67-
ErrNodeUnderMemoryPressure: {},
68-
ErrNodeUnknownCondition: {},
49+
// conditions.
50+
ErrNodeUnknownCondition: {},
6951
}
7052

7153
// UnresolvablePredicateExists checks if there is at least one unresolvable predicate failure reason.

pkg/scheduler/core/generic_scheduler_test.go

Lines changed: 20 additions & 48 deletions
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ import (
2727
"testing"
2828
"time"
2929

30-
"k8s.io/api/core/v1"
30+
v1 "k8s.io/api/core/v1"
3131
"k8s.io/apimachinery/pkg/api/resource"
3232
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
3333
"k8s.io/apimachinery/pkg/runtime"
@@ -65,6 +65,8 @@ var (
6565
errPrioritize = fmt.Errorf("priority map encounters an error")
6666
)
6767

68+
const ErrReasonFake = "Nodes failed the fake predicate"
69+
6870
type trueFilterPlugin struct{}
6971

7072
// Name returns name of the plugin.
@@ -91,7 +93,7 @@ func (pl *falseFilterPlugin) Name() string {
9193

9294
// Filter invoked at the filter extension point.
9395
func (pl *falseFilterPlugin) Filter(_ context.Context, _ *framework.CycleState, pod *v1.Pod, nodeInfo *nodeinfo.NodeInfo) *framework.Status {
94-
return framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason())
96+
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
9597
}
9698

9799
// NewFalseFilterPlugin initializes a falseFilterPlugin and returns it.
@@ -115,7 +117,7 @@ func (pl *matchFilterPlugin) Filter(_ context.Context, _ *framework.CycleState,
115117
if pod.Name == node.Name {
116118
return nil
117119
}
118-
return framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason())
120+
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
119121
}
120122

121123
// NewMatchFilterPlugin initializes a matchFilterPlugin and returns it.
@@ -135,7 +137,7 @@ func (pl *noPodsFilterPlugin) Filter(_ context.Context, _ *framework.CycleState,
135137
if len(nodeInfo.Pods()) == 0 {
136138
return nil
137139
}
138-
return framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason())
140+
return framework.NewStatus(framework.Unschedulable, ErrReasonFake)
139141
}
140142

141143
// NewNoPodsFilterPlugin initializes a noPodsFilterPlugin and returns it.
@@ -391,8 +393,8 @@ func TestGenericScheduler(t *testing.T) {
391393
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
392394
NumAllNodes: 2,
393395
FilteredNodesStatuses: framework.NodeToStatusMap{
394-
"machine1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
395-
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
396+
"machine1": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
397+
"machine2": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
396398
},
397399
},
398400
},
@@ -464,9 +466,9 @@ func TestGenericScheduler(t *testing.T) {
464466
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
465467
NumAllNodes: 3,
466468
FilteredNodesStatuses: framework.NodeToStatusMap{
467-
"3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
468-
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
469-
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
469+
"3": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
470+
"2": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
471+
"1": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
470472
},
471473
},
472474
},
@@ -494,8 +496,8 @@ func TestGenericScheduler(t *testing.T) {
494496
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
495497
NumAllNodes: 2,
496498
FilteredNodesStatuses: framework.NodeToStatusMap{
497-
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
498-
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrFakePredicate.GetReason()),
499+
"1": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
500+
"2": framework.NewStatus(framework.Unschedulable, ErrReasonFake),
499501
},
500502
},
501503
},
@@ -860,7 +862,7 @@ func TestFindFitAllError(t *testing.T) {
860862
t.Errorf("failed to find node %v in %v", node.Name, nodeToStatusMap)
861863
}
862864
reasons := status.Reasons()
863-
if len(reasons) != 1 || reasons[0] != algorithmpredicates.ErrFakePredicate.GetReason() {
865+
if len(reasons) != 1 || reasons[0] != ErrReasonFake {
864866
t.Errorf("unexpected failure reasons: %v", reasons)
865867
}
866868
})
@@ -896,7 +898,7 @@ func TestFindFitSomeError(t *testing.T) {
896898
t.Errorf("failed to find node %v in %v", node.Name, nodeToStatusMap)
897899
}
898900
reasons := status.Reasons()
899-
if len(reasons) != 1 || reasons[0] != algorithmpredicates.ErrFakePredicate.GetReason() {
901+
if len(reasons) != 1 || reasons[0] != ErrReasonFake {
900902
t.Errorf("unexpected failures: %v", reasons)
901903
}
902904
})
@@ -985,24 +987,6 @@ func makeNode(node string, milliCPU, memory int64) *v1.Node {
985987
}
986988
}
987989

988-
func TestHumanReadableFitError(t *testing.T) {
989-
err := &FitError{
990-
Pod: &v1.Pod{ObjectMeta: metav1.ObjectMeta{Name: "2", UID: types.UID("2")}},
991-
NumAllNodes: 3,
992-
FilteredNodesStatuses: framework.NodeToStatusMap{
993-
"1": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderMemoryPressure.GetReason()),
994-
"2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
995-
"3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
996-
},
997-
}
998-
if strings.Contains(err.Error(), "0/3 nodes are available") {
999-
if strings.Contains(err.Error(), "2 node(s) had disk pressure") && strings.Contains(err.Error(), "1 node(s) had memory pressure") {
1000-
return
1001-
}
1002-
}
1003-
t.Errorf("Error message doesn't have all the information content: [" + err.Error() + "]")
1004-
}
1005-
1006990
// The point of this test is to show that you:
1007991
// - get the same priority for a zero-request pod as for a pod with the defaults requests,
1008992
// both when the zero-request pod is already on the machine and when the zero-request pod
@@ -1903,29 +1887,17 @@ func TestNodesWherePreemptionMightHelp(t *testing.T) {
19031887
{
19041888
name: "Mix of failed predicates works fine",
19051889
nodesStatuses: framework.NodeToStatusMap{
1906-
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
1907-
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
1908-
"machine3": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
1909-
},
1910-
expected: map[string]bool{"machine3": true, "machine4": true},
1911-
},
1912-
{
1913-
name: "Node condition errors should be considered unresolvable",
1914-
nodesStatuses: framework.NodeToStatusMap{
1915-
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderDiskPressure.GetReason()),
1916-
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderPIDPressure.GetReason()),
1917-
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeUnderMemoryPressure.GetReason()),
1890+
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, volumerestrictions.ErrReasonDiskConflict),
1891+
"machine2": framework.NewStatus(framework.Unschedulable, algorithmpredicates.NewInsufficientResourceError(v1.ResourceMemory, 1000, 600, 400).GetReason()),
19181892
},
1919-
expected: map[string]bool{"machine4": true},
1893+
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
19201894
},
19211895
{
19221896
name: "Node condition errors should be considered unresolvable",
19231897
nodesStatuses: framework.NodeToStatusMap{
1924-
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeNotReady.GetReason()),
1925-
"machine2": framework.NewStatus(framework.UnschedulableAndUnresolvable, algorithmpredicates.ErrNodeNetworkUnavailable.GetReason()),
1926-
"machine3": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnknownCondition),
1898+
"machine1": framework.NewStatus(framework.UnschedulableAndUnresolvable, nodeunschedulable.ErrReasonUnknownCondition),
19271899
},
1928-
expected: map[string]bool{"machine4": true},
1900+
expected: map[string]bool{"machine2": true, "machine3": true, "machine4": true},
19291901
},
19301902
{
19311903
name: "ErrVolume... errors should not be tried as it indicates that the pod is unschedulable due to no matching volumes for pod on node",

pkg/scheduler/framework/plugins/serviceaffinity/service_affinity.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -38,8 +38,8 @@ const (
3838
// Using the name of the plugin will likely help us avoid collisions with other plugins.
3939
preFilterStateKey = "PreFilter" + Name
4040

41-
// ErrServiceAffinityViolated is used for CheckServiceAffinity predicate error.
42-
ErrServiceAffinityViolated = "node(s) didn't match service affinity"
41+
// ErrReason is used for CheckServiceAffinity predicate error.
42+
ErrReason = "node(s) didn't match service affinity"
4343
)
4444

4545
// Args holds the args that are used to configure the plugin.
@@ -277,7 +277,7 @@ func (pl *ServiceAffinity) Filter(ctx context.Context, cycleState *framework.Cyc
277277
return nil
278278
}
279279

280-
return framework.NewStatus(framework.Unschedulable, ErrServiceAffinityViolated)
280+
return framework.NewStatus(framework.Unschedulable, ErrReason)
281281
}
282282

283283
// Score invoked at the Score extension point.

0 commit comments

Comments
 (0)