Skip to content

Commit cccc9f2

Browse files
committed
adding new test case for ceph analyzers
1 parent ff43538 commit cccc9f2

File tree

2 files changed

+41
-4
lines changed

2 files changed

+41
-4
lines changed

pkg/analyze/ceph.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -228,7 +228,7 @@ func detailedCephMessage(outcomeMessage string, status CephStatus) string {
228228

229229
if status.PgMap.TotalBytes > 0 {
230230
pgUsage := 100 * float64(status.PgMap.UsedBytes) / float64(status.PgMap.TotalBytes)
231-
msg = append(msg, fmt.Sprintf("PG storage usage is %.1f%%.", pgUsage))
231+
msg = append(msg, fmt.Sprintf("PG storage usage is %.1f%%", pgUsage))
232232
}
233233

234234
if status.Health.Checks != nil {

pkg/analyze/ceph_test.go

Lines changed: 40 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@ func Test_cephStatus(t *testing.T) {
5656
IsWarn: true,
5757
IsFail: false,
5858
Title: "Ceph Status",
59-
Message: "Ceph status is HEALTH_WARN. 5/5 OSDs up. OSD disk is nearly full. PG storage usage is 85.0%.",
59+
Message: "Ceph status is HEALTH_WARN\n5/5 OSDs up\nOSD disk is nearly full\nPG storage usage is 85.0%",
6060
URI: "https://rook.io/docs/rook/v1.4/ceph-common-issues.html",
6161
IconKey: "rook",
6262
IconURI: "https://troubleshoot.sh/images/analyzer-icons/rook.svg?w=11&h=16",
@@ -89,7 +89,7 @@ func Test_cephStatus(t *testing.T) {
8989
IsWarn: false,
9090
IsFail: true,
9191
Title: "Ceph Status",
92-
Message: "Ceph status is HEALTH_ERR. 4/5 OSDs up. OSD disk is full. PG storage usage is 95.0%.",
92+
Message: "Ceph status is HEALTH_ERR\n4/5 OSDs up\nOSD disk is full\nPG storage usage is 95.0%",
9393
URI: "https://rook.io/docs/rook/v1.4/ceph-common-issues.html",
9494
IconKey: "rook",
9595
IconURI: "https://troubleshoot.sh/images/analyzer-icons/rook.svg?w=11&h=16",
@@ -172,7 +172,7 @@ func Test_cephStatus(t *testing.T) {
172172
IsWarn: false,
173173
IsFail: true,
174174
Title: "Ceph Status",
175-
Message: "custom message WARN. 5/5 OSDs up. OSD disk is nearly full. PG storage usage is 85.0%.",
175+
Message: "custom message WARN\n5/5 OSDs up\nOSD disk is nearly full\nPG storage usage is 85.0%",
176176
URI: "custom uri WARN",
177177
IconKey: "rook",
178178
IconURI: "https://troubleshoot.sh/images/analyzer-icons/rook.svg?w=11&h=16",
@@ -218,6 +218,43 @@ func Test_cephStatus(t *testing.T) {
218218
}
219219
}`,
220220
},
221+
{
222+
name: "warn case with multiple health status messages",
223+
analyzer: troubleshootv1beta2.CephStatusAnalyze{},
224+
expectResult: AnalyzeResult{
225+
IsPass: false,
226+
IsWarn: true,
227+
IsFail: false,
228+
Title: "Ceph Status",
229+
Message: "Ceph status is HEALTH_WARN\nPOOL_NO_REDUNDANCY: 11 pool(s) have no replicas configured\nPOOL_PG_NUM_NOT_POWER_OF_TWO: 8 pool(s) have non-power-of-two pg_num",
230+
URI: "https://rook.io/docs/rook/v1.4/ceph-common-issues.html",
231+
IconKey: "rook",
232+
IconURI: "https://troubleshoot.sh/images/analyzer-icons/rook.svg?w=11&h=16",
233+
},
234+
filePath: "ceph/status.json",
235+
file: `{
236+
"fsid": "96a8178c-6aa2-4adf-a309-9e8869a79611",
237+
"health": {
238+
"status": "HEALTH_WARN",
239+
"checks": {
240+
"POOL_NO_REDUNDANCY": {
241+
"severity": "HEALTH_WARN",
242+
"summary": {
243+
"message": "11 pool(s) have no replicas configured",
244+
"count": 11
245+
},
246+
"muted": false
247+
},
248+
"POOL_PG_NUM_NOT_POWER_OF_TWO": {
249+
"severity": "HEALTH_WARN",
250+
"summary": {
251+
"message": "8 pool(s) have non-power-of-two pg_num"
252+
}
253+
}
254+
}
255+
}
256+
}`,
257+
},
221258
}
222259

223260
for _, test := range tests {

0 commit comments

Comments
 (0)