Skip to content

Commit ab28e72

Browse files
Hyesoo Yuopsiff
authored andcommitted
mm: slub: Print the broken data before restoring them
[ Upstream commit ed5ec2e ] Previously, the restore occurred after printing the object in slub. After commit 47d911b ("slab: make check_object() more consistent"), the bytes are printed after the restore. This information about the bytes before the restore is highly valuable for debugging purpose. For instance, in a event of cache issue, it displays byte patterns by breaking them down into 64-bytes units. Without this information, we can only speculate on how it was broken. Hence the corrupted regions should be printed prior to the restoration process. However if an object breaks in multiple places, the same log may be output multiple times. Therefore the slub log is reported only once to prevent redundant printing, by sending a parameter indicating whether an error has occurred previously. Signed-off-by: Hyesoo Yu <[email protected]> Reviewed-by: Harry Yoo <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]> Stable-dep-of: b4efccec8d06 ("mm/slub: avoid accessing metadata when pointer is invalid in object_err()") Signed-off-by: Sasha Levin <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> (cherry picked from commit 20a54a8db4dd85a30e2005081ab386f0c4cb3d3d)
1 parent 4ef86c9 commit ab28e72

File tree

1 file changed

+14
-18
lines changed

1 file changed

+14
-18
lines changed

mm/slub.c

Lines changed: 14 additions & 18 deletions
Original file line numberDiff line numberDiff line change
@@ -1191,8 +1191,8 @@ static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
11911191

11921192
static pad_check_attributes int
11931193
check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
1194-
u8 *object, char *what,
1195-
u8 *start, unsigned int value, unsigned int bytes)
1194+
u8 *object, char *what, u8 *start, unsigned int value,
1195+
unsigned int bytes, bool slab_obj_print)
11961196
{
11971197
u8 *fault;
11981198
u8 *end;
@@ -1211,10 +1211,11 @@ check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
12111211
if (slab_add_kunit_errors())
12121212
goto skip_bug_print;
12131213

1214-
slab_bug(s, "%s overwritten", what);
1215-
pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1216-
fault, end - 1, fault - addr,
1217-
fault[0], value);
1214+
pr_err("[%s overwritten] 0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
1215+
what, fault, end - 1, fault - addr, fault[0], value);
1216+
1217+
if (slab_obj_print)
1218+
object_err(s, slab, object, "Object corrupt");
12181219

12191220
skip_bug_print:
12201221
restore_bytes(s, what, value, fault, end);
@@ -1278,7 +1279,7 @@ static int check_pad_bytes(struct kmem_cache *s, struct slab *slab, u8 *p)
12781279
return 1;
12791280

12801281
return check_bytes_and_report(s, slab, p, "Object padding",
1281-
p + off, POISON_INUSE, size_from_object(s) - off);
1282+
p + off, POISON_INUSE, size_from_object(s) - off, true);
12821283
}
12831284

12841285
/* Check the pad bytes at the end of a slab page */
@@ -1328,11 +1329,11 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
13281329

13291330
if (s->flags & SLAB_RED_ZONE) {
13301331
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
1331-
object - s->red_left_pad, val, s->red_left_pad))
1332+
object - s->red_left_pad, val, s->red_left_pad, ret))
13321333
ret = 0;
13331334

13341335
if (!check_bytes_and_report(s, slab, object, "Right Redzone",
1335-
endobject, val, s->inuse - s->object_size))
1336+
endobject, val, s->inuse - s->object_size, ret))
13361337
ret = 0;
13371338

13381339
if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
@@ -1341,15 +1342,15 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
13411342
if (s->object_size > orig_size &&
13421343
!check_bytes_and_report(s, slab, object,
13431344
"kmalloc Redzone", p + orig_size,
1344-
val, s->object_size - orig_size)) {
1345+
val, s->object_size - orig_size, ret)) {
13451346
ret = 0;
13461347
}
13471348
}
13481349
} else {
13491350
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
13501351
if (!check_bytes_and_report(s, slab, p, "Alignment padding",
13511352
endobject, POISON_INUSE,
1352-
s->inuse - s->object_size))
1353+
s->inuse - s->object_size, ret))
13531354
ret = 0;
13541355
}
13551356
}
@@ -1365,11 +1366,11 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
13651366
if (kasan_meta_size < s->object_size - 1 &&
13661367
!check_bytes_and_report(s, slab, p, "Poison",
13671368
p + kasan_meta_size, POISON_FREE,
1368-
s->object_size - kasan_meta_size - 1))
1369+
s->object_size - kasan_meta_size - 1, ret))
13691370
ret = 0;
13701371
if (kasan_meta_size < s->object_size &&
13711372
!check_bytes_and_report(s, slab, p, "End Poison",
1372-
p + s->object_size - 1, POISON_END, 1))
1373+
p + s->object_size - 1, POISON_END, 1, ret))
13731374
ret = 0;
13741375
}
13751376
/*
@@ -1395,11 +1396,6 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
13951396
ret = 0;
13961397
}
13971398

1398-
if (!ret && !slab_in_kunit_test()) {
1399-
print_trailer(s, slab, object);
1400-
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1401-
}
1402-
14031399
return ret;
14041400
}
14051401

0 commit comments

Comments
 (0)