Skip to content

Commit 5103297

Browse files
committed
Clarify that we're flaking on any occurrence of alert, not failing
1 parent f990e70 commit 5103297

File tree

3 files changed

+13
-14
lines changed

3 files changed

+13
-14
lines changed

pkg/monitortestlibrary/allowedalerts/all.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -19,10 +19,10 @@ func AllAlertTests(jobType *platformidentification.JobType, etcdAllowance AlertT
1919
// In CI firing is rare, but does happen a few times a month. Pending however occurs all the time, which implies operators are routinely in
2020
// a state they're not supposed to be during upgrade.
2121
// https://github.com/openshift/enhancements/blob/cb81452fddf86c1099acd87610b88369cd6192db/dev-guide/cluster-version-operator/dev/clusteroperator.md#there-are-a-set-of-guarantees-components-are-expected-to-honor-in-return
22-
ret = append(ret, newAlert("Cluster Version Operator", "ClusterOperatorDown", jobType).pending().alwaysFail().toTests()...)
23-
ret = append(ret, newAlert("Cluster Version Operator", "ClusterOperatorDegraded", jobType).pending().alwaysFail().toTests()...)
24-
ret = append(ret, newAlert("Cluster Version Operator", "ClusterOperatorDown", jobType).firing().alwaysFail().toTests()...)
25-
ret = append(ret, newAlert("Cluster Version Operator", "ClusterOperatorDegraded", jobType).firing().alwaysFail().toTests()...)
22+
ret = append(ret, newAlert("Cluster Version Operator", "ClusterOperatorDown", jobType).pending().alwaysFlake().toTests()...)
23+
ret = append(ret, newAlert("Cluster Version Operator", "ClusterOperatorDegraded", jobType).pending().alwaysFlake().toTests()...)
24+
ret = append(ret, newAlert("Cluster Version Operator", "ClusterOperatorDown", jobType).firing().alwaysFlake().toTests()...)
25+
ret = append(ret, newAlert("Cluster Version Operator", "ClusterOperatorDegraded", jobType).firing().alwaysFlake().toTests()...)
2626

2727
ret = append(ret, newAlert("etcd", "etcdMembersDown", jobType).pending().neverFail().toTests()...)
2828
ret = append(ret, newAlert("etcd", "etcdMembersDown", jobType).firing().toTests()...)

pkg/monitortestlibrary/allowedalerts/basic_alert.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,8 +111,8 @@ func (a *alertBuilder) neverFail() *alertBuilder {
111111
return a
112112
}
113113

114-
func (a *alertBuilder) alwaysFail() *alertBuilder {
115-
a.allowanceCalculator = alwaysFail()
114+
func (a *alertBuilder) alwaysFlake() *alertBuilder {
115+
a.allowanceCalculator = alwaysFlake()
116116
return a
117117
}
118118

pkg/monitortestlibrary/allowedalerts/matches.go

Lines changed: 7 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -55,20 +55,19 @@ func getClosestPercentilesValues(key historicaldata2.AlertDataKey) (historicalda
5555
return getCurrentResults().BestMatchDuration(key)
5656
}
5757

58-
func alwaysFail() AlertTestAllowanceCalculator {
59-
return &alwaysFailAllowance{}
58+
func alwaysFlake() AlertTestAllowanceCalculator {
59+
return &alwaysFlakeAllowance{}
6060
}
6161

62-
// alwaysFailAllowance is for alerts we want to fail a test if they occur at all.
63-
type alwaysFailAllowance struct {
62+
// alwaysFlakeAllowance is for alerts we want to flake a test if they occur at all.
63+
type alwaysFlakeAllowance struct {
6464
}
6565

66-
func (d *alwaysFailAllowance) FailAfter(key historicaldata2.AlertDataKey) (time.Duration, error) {
67-
// TODO: right now we're just flaking until we're certain this doesn't happen too often. Once we're sure,
68-
// change to 1 second.
66+
func (d *alwaysFlakeAllowance) FailAfter(key historicaldata2.AlertDataKey) (time.Duration, error) {
67+
// make it effectively impossible for a test failure here, we only want flakes
6968
return 24 * time.Hour, nil
7069
}
7170

72-
func (d *alwaysFailAllowance) FlakeAfter(key historicaldata2.AlertDataKey) time.Duration {
71+
func (d *alwaysFlakeAllowance) FlakeAfter(key historicaldata2.AlertDataKey) time.Duration {
7372
return 1 * time.Second
7473
}

0 commit comments

Comments
 (0)