Skip to content

Commit 47d911b

Browse files
cmzxotehcaster
authored andcommitted
slab: make check_object() more consistent
Now check_object() calls check_bytes_and_report() multiple times to check every section of the object it cares about, like left and right redzones, object poison, paddings poison and freepointer. It will abort the checking process and return 0 once it finds an error. There are two inconsistencies in check_object(), which are alignment padding checking and object padding checking. We only print the error messages but don't return 0 to tell callers that something is wrong and needs to be handled. Please see alloc_debug_processing() and free_debug_processing() for details. We want to do all checks without skipping, so use a local variable "ret" to save each check result and change check_bytes_and_report() to only report specific error findings. Then at end of check_object(), print the trailer once if any found an error. Suggested-by: Vlastimil Babka <[email protected]> Signed-off-by: Chengming Zhou <[email protected]> Reviewed-by: Vlastimil Babka <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 4d2bcef commit 47d911b

File tree

1 file changed

+41
-21
lines changed

1 file changed

+41
-21
lines changed

mm/slub.c

Lines changed: 41 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -788,8 +788,24 @@ static bool slab_add_kunit_errors(void)
788788
kunit_put_resource(resource);
789789
return true;
790790
}
791+
792+
static bool slab_in_kunit_test(void)
793+
{
794+
struct kunit_resource *resource;
795+
796+
if (!kunit_get_current_test())
797+
return false;
798+
799+
resource = kunit_find_named_resource(current->kunit_test, "slab_errors");
800+
if (!resource)
801+
return false;
802+
803+
kunit_put_resource(resource);
804+
return true;
805+
}
791806
#else
792807
static inline bool slab_add_kunit_errors(void) { return false; }
808+
static inline bool slab_in_kunit_test(void) { return false; }
793809
#endif
794810

795811
static inline unsigned int size_from_object(struct kmem_cache *s)
@@ -1190,8 +1206,6 @@ static int check_bytes_and_report(struct kmem_cache *s, struct slab *slab,
11901206
pr_err("0x%p-0x%p @offset=%tu. First byte 0x%x instead of 0x%x\n",
11911207
fault, end - 1, fault - addr,
11921208
fault[0], value);
1193-
print_trailer(s, slab, object);
1194-
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
11951209

11961210
skip_bug_print:
11971211
restore_bytes(s, what, value, fault, end);
@@ -1300,15 +1314,16 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
13001314
u8 *p = object;
13011315
u8 *endobject = object + s->object_size;
13021316
unsigned int orig_size, kasan_meta_size;
1317+
int ret = 1;
13031318

13041319
if (s->flags & SLAB_RED_ZONE) {
13051320
if (!check_bytes_and_report(s, slab, object, "Left Redzone",
13061321
object - s->red_left_pad, val, s->red_left_pad))
1307-
return 0;
1322+
ret = 0;
13081323

13091324
if (!check_bytes_and_report(s, slab, object, "Right Redzone",
13101325
endobject, val, s->inuse - s->object_size))
1311-
return 0;
1326+
ret = 0;
13121327

13131328
if (slub_debug_orig_size(s) && val == SLUB_RED_ACTIVE) {
13141329
orig_size = get_orig_size(s, object);
@@ -1317,14 +1332,15 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
13171332
!check_bytes_and_report(s, slab, object,
13181333
"kmalloc Redzone", p + orig_size,
13191334
val, s->object_size - orig_size)) {
1320-
return 0;
1335+
ret = 0;
13211336
}
13221337
}
13231338
} else {
13241339
if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
1325-
check_bytes_and_report(s, slab, p, "Alignment padding",
1340+
if (!check_bytes_and_report(s, slab, p, "Alignment padding",
13261341
endobject, POISON_INUSE,
1327-
s->inuse - s->object_size);
1342+
s->inuse - s->object_size))
1343+
ret = 0;
13281344
}
13291345
}
13301346

@@ -1340,37 +1356,41 @@ static int check_object(struct kmem_cache *s, struct slab *slab,
13401356
!check_bytes_and_report(s, slab, p, "Poison",
13411357
p + kasan_meta_size, POISON_FREE,
13421358
s->object_size - kasan_meta_size - 1))
1343-
return 0;
1359+
ret = 0;
13441360
if (kasan_meta_size < s->object_size &&
13451361
!check_bytes_and_report(s, slab, p, "End Poison",
13461362
p + s->object_size - 1, POISON_END, 1))
1347-
return 0;
1363+
ret = 0;
13481364
}
13491365
/*
13501366
* check_pad_bytes cleans up on its own.
13511367
*/
1352-
check_pad_bytes(s, slab, p);
1368+
if (!check_pad_bytes(s, slab, p))
1369+
ret = 0;
13531370
}
13541371

1355-
if (!freeptr_outside_object(s) && val == SLUB_RED_ACTIVE)
1356-
/*
1357-
* Object and freepointer overlap. Cannot check
1358-
* freepointer while object is allocated.
1359-
*/
1360-
return 1;
1361-
1362-
/* Check free pointer validity */
1363-
if (!check_valid_pointer(s, slab, get_freepointer(s, p))) {
1372+
/*
1373+
* Cannot check freepointer while object is allocated if
1374+
* object and freepointer overlap.
1375+
*/
1376+
if ((freeptr_outside_object(s) || val != SLUB_RED_ACTIVE) &&
1377+
!check_valid_pointer(s, slab, get_freepointer(s, p))) {
13641378
object_err(s, slab, p, "Freepointer corrupt");
13651379
/*
13661380
* No choice but to zap it and thus lose the remainder
13671381
* of the free objects in this slab. May cause
13681382
* another error because the object count is now wrong.
13691383
*/
13701384
set_freepointer(s, p, NULL);
1371-
return 0;
1385+
ret = 0;
13721386
}
1373-
return 1;
1387+
1388+
if (!ret && !slab_in_kunit_test()) {
1389+
print_trailer(s, slab, object);
1390+
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
1391+
}
1392+
1393+
return ret;
13741394
}
13751395

13761396
static int check_slab(struct kmem_cache *s, struct slab *slab)

0 commit comments

Comments
 (0)