Skip to content

Commit b3c3424

Browse files
thejhtehcaster
authored andcommitted
kasan: catch invalid free before SLUB reinitializes the object
Currently, when KASAN is combined with init-on-free behavior, the initialization happens before KASAN's "invalid free" checks. More importantly, a subsequent commit will want to RCU-delay the actual SLUB freeing of an object, and we'd like KASAN to still validate synchronously that freeing the object is permitted. (Otherwise this change will make the existing testcase kmem_cache_invalid_free fail.) So add a new KASAN hook that allows KASAN to pre-validate a kmem_cache_free() operation before SLUB actually starts modifying the object or its metadata. Inside KASAN, this: - moves checks from poison_slab_object() into check_slab_allocation() - moves kasan_arch_is_ready() up into callers of poison_slab_object() - removes "ip" argument of poison_slab_object() and __kasan_slab_free() (since those functions no longer do any reporting) Acked-by: Vlastimil Babka <[email protected]> #slub Reviewed-by: Andrey Konovalov <[email protected]> Signed-off-by: Jann Horn <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 4e1c44b commit b3c3424

File tree

3 files changed

+94
-28
lines changed

3 files changed

+94
-28
lines changed

include/linux/kasan.h

Lines changed: 51 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -175,13 +175,55 @@ static __always_inline void * __must_check kasan_init_slab_obj(
175175
return (void *)object;
176176
}
177177

178-
bool __kasan_slab_free(struct kmem_cache *s, void *object,
179-
unsigned long ip, bool init);
178+
bool __kasan_slab_pre_free(struct kmem_cache *s, void *object,
179+
unsigned long ip);
180+
/**
181+
* kasan_slab_pre_free - Check whether freeing a slab object is safe.
182+
* @object: Object to be freed.
183+
*
184+
* This function checks whether freeing the given object is safe. It may
185+
* check for double-free and invalid-free bugs and report them.
186+
*
187+
* This function is intended only for use by the slab allocator.
188+
*
189+
* @Return true if freeing the object is unsafe; false otherwise.
190+
*/
191+
static __always_inline bool kasan_slab_pre_free(struct kmem_cache *s,
192+
void *object)
193+
{
194+
if (kasan_enabled())
195+
return __kasan_slab_pre_free(s, object, _RET_IP_);
196+
return false;
197+
}
198+
199+
bool __kasan_slab_free(struct kmem_cache *s, void *object, bool init);
200+
/**
201+
* kasan_slab_free - Poison, initialize, and quarantine a slab object.
202+
* @object: Object to be freed.
203+
* @init: Whether to initialize the object.
204+
*
205+
* This function informs that a slab object has been freed and is not
206+
* supposed to be accessed anymore, except for objects in
207+
* SLAB_TYPESAFE_BY_RCU caches.
208+
*
209+
* For KASAN modes that have integrated memory initialization
210+
* (kasan_has_integrated_init() == true), this function also initializes
211+
* the object's memory. For other modes, the @init argument is ignored.
212+
*
213+
* This function might also take ownership of the object to quarantine it.
214+
* When this happens, KASAN will defer freeing the object to a later
215+
* stage and handle it internally until then. The return value indicates
216+
* whether KASAN took ownership of the object.
217+
*
218+
* This function is intended only for use by the slab allocator.
219+
*
220+
* @Return true if KASAN took ownership of the object; false otherwise.
221+
*/
180222
static __always_inline bool kasan_slab_free(struct kmem_cache *s,
181223
void *object, bool init)
182224
{
183225
if (kasan_enabled())
184-
return __kasan_slab_free(s, object, _RET_IP_, init);
226+
return __kasan_slab_free(s, object, init);
185227
return false;
186228
}
187229

@@ -371,6 +413,12 @@ static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
371413
{
372414
return (void *)object;
373415
}
416+
417+
static inline bool kasan_slab_pre_free(struct kmem_cache *s, void *object)
418+
{
419+
return false;
420+
}
421+
374422
static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
375423
{
376424
return false;

mm/kasan/common.c

Lines changed: 36 additions & 25 deletions
Original file line numberDiff line numberDiff line change
@@ -208,53 +208,59 @@ void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
208208
return (void *)object;
209209
}
210210

211-
static inline bool poison_slab_object(struct kmem_cache *cache, void *object,
212-
unsigned long ip, bool init)
211+
/* Returns true when freeing the object is not safe. */
212+
static bool check_slab_allocation(struct kmem_cache *cache, void *object,
213+
unsigned long ip)
213214
{
214-
void *tagged_object;
215-
216-
if (!kasan_arch_is_ready())
217-
return false;
215+
void *tagged_object = object;
218216

219-
tagged_object = object;
220217
object = kasan_reset_tag(object);
221218

222219
if (unlikely(nearest_obj(cache, virt_to_slab(object), object) != object)) {
223220
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_INVALID_FREE);
224221
return true;
225222
}
226223

227-
/* RCU slabs could be legally used after free within the RCU period. */
228-
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
229-
return false;
230-
231224
if (!kasan_byte_accessible(tagged_object)) {
232225
kasan_report_invalid_free(tagged_object, ip, KASAN_REPORT_DOUBLE_FREE);
233226
return true;
234227
}
235228

229+
return false;
230+
}
231+
232+
static inline void poison_slab_object(struct kmem_cache *cache, void *object,
233+
bool init)
234+
{
235+
void *tagged_object = object;
236+
237+
object = kasan_reset_tag(object);
238+
239+
/* RCU slabs could be legally used after free within the RCU period. */
240+
if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
241+
return;
242+
236243
kasan_poison(object, round_up(cache->object_size, KASAN_GRANULE_SIZE),
237244
KASAN_SLAB_FREE, init);
238245

239246
if (kasan_stack_collection_enabled())
240247
kasan_save_free_info(cache, tagged_object);
248+
}
241249

242-
return false;
250+
bool __kasan_slab_pre_free(struct kmem_cache *cache, void *object,
251+
unsigned long ip)
252+
{
253+
if (!kasan_arch_is_ready() || is_kfence_address(object))
254+
return false;
255+
return check_slab_allocation(cache, object, ip);
243256
}
244257

245-
bool __kasan_slab_free(struct kmem_cache *cache, void *object,
246-
unsigned long ip, bool init)
258+
bool __kasan_slab_free(struct kmem_cache *cache, void *object, bool init)
247259
{
248-
if (is_kfence_address(object))
260+
if (!kasan_arch_is_ready() || is_kfence_address(object))
249261
return false;
250262

251-
/*
252-
* If the object is buggy, do not let slab put the object onto the
253-
* freelist. The object will thus never be allocated again and its
254-
* metadata will never get released.
255-
*/
256-
if (poison_slab_object(cache, object, ip, init))
257-
return true;
263+
poison_slab_object(cache, object, init);
258264

259265
/*
260266
* If the object is put into quarantine, do not let slab put the object
@@ -504,11 +510,16 @@ bool __kasan_mempool_poison_object(void *ptr, unsigned long ip)
504510
return true;
505511
}
506512

507-
if (is_kfence_address(ptr))
508-
return false;
513+
if (is_kfence_address(ptr) || !kasan_arch_is_ready())
514+
return true;
509515

510516
slab = folio_slab(folio);
511-
return !poison_slab_object(slab->slab_cache, ptr, ip, false);
517+
518+
if (check_slab_allocation(slab->slab_cache, ptr, ip))
519+
return false;
520+
521+
poison_slab_object(slab->slab_cache, ptr, false);
522+
return true;
512523
}
513524

514525
void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip)

mm/slub.c

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -2226,6 +2226,13 @@ bool slab_free_hook(struct kmem_cache *s, void *x, bool init)
22262226
if (kfence_free(x))
22272227
return false;
22282228

2229+
/*
2230+
* Give KASAN a chance to notice an invalid free operation before we
2231+
* modify the object.
2232+
*/
2233+
if (kasan_slab_pre_free(s, x))
2234+
return false;
2235+
22292236
/*
22302237
* As memory initialization might be integrated into KASAN,
22312238
* kasan_slab_free and initialization memset's must be

0 commit comments

Comments
 (0)