Skip to content

Commit c2092c1

Browse files
committed
mm/slub: Finish struct page to struct slab conversion
Update comments mentioning pages to mention slabs where appropriate. Also some goto labels. Signed-off-by: Vlastimil Babka <[email protected]> Reviewed-by: Roman Gushchin <[email protected]>
1 parent bb192ed commit c2092c1

File tree

2 files changed

+53
-54
lines changed

2 files changed

+53
-54
lines changed

include/linux/slub_def.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -99,7 +99,7 @@ struct kmem_cache {
9999
#ifdef CONFIG_SLUB_CPU_PARTIAL
100100
/* Number of per cpu partial objects to keep around */
101101
unsigned int cpu_partial;
102-
/* Number of per cpu partial pages to keep around */
102+
/* Number of per cpu partial slabs to keep around */
103103
unsigned int cpu_partial_slabs;
104104
#endif
105105
struct kmem_cache_order_objects oo;

mm/slub.c

Lines changed: 52 additions & 53 deletions
Original file line numberDiff line numberDiff line change
@@ -48,7 +48,7 @@
4848
* 1. slab_mutex (Global Mutex)
4949
* 2. node->list_lock (Spinlock)
5050
* 3. kmem_cache->cpu_slab->lock (Local lock)
51-
* 4. slab_lock(page) (Only on some arches or for debugging)
51+
* 4. slab_lock(slab) (Only on some arches or for debugging)
5252
* 5. object_map_lock (Only for debugging)
5353
*
5454
* slab_mutex
@@ -64,19 +64,19 @@
6464
*
6565
* The slab_lock is only used for debugging and on arches that do not
6666
* have the ability to do a cmpxchg_double. It only protects:
67-
* A. page->freelist -> List of object free in a page
68-
* B. page->inuse -> Number of objects in use
69-
* C. page->objects -> Number of objects in page
70-
* D. page->frozen -> frozen state
67+
* A. slab->freelist -> List of free objects in a slab
68+
* B. slab->inuse -> Number of objects in use
69+
* C. slab->objects -> Number of objects in slab
70+
* D. slab->frozen -> frozen state
7171
*
7272
* Frozen slabs
7373
*
7474
* If a slab is frozen then it is exempt from list management. It is not
7575
* on any list except per cpu partial list. The processor that froze the
76-
* slab is the one who can perform list operations on the page. Other
76+
* slab is the one who can perform list operations on the slab. Other
7777
* processors may put objects onto the freelist but the processor that
7878
* froze the slab is the only one that can retrieve the objects from the
79-
* page's freelist.
79+
* slab's freelist.
8080
*
8181
* list_lock
8282
*
@@ -135,7 +135,7 @@
135135
* minimal so we rely on the page allocators per cpu caches for
136136
* fast frees and allocs.
137137
*
138-
* page->frozen The slab is frozen and exempt from list processing.
138+
* slab->frozen The slab is frozen and exempt from list processing.
139139
* This means that the slab is dedicated to a purpose
140140
* such as satisfying allocations for a specific
141141
* processor. Objects may be freed in the slab while
@@ -250,7 +250,7 @@ static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
250250

251251
#define OO_SHIFT 16
252252
#define OO_MASK ((1 << OO_SHIFT) - 1)
253-
#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
253+
#define MAX_OBJS_PER_PAGE 32767 /* since slab.objects is u15 */
254254

255255
/* Internal SLUB flags */
256256
/* Poison object */
@@ -423,8 +423,8 @@ static void slub_set_cpu_partial(struct kmem_cache *s, unsigned int nr_objects)
423423

424424
/*
425425
* We take the number of objects but actually limit the number of
426-
* pages on the per cpu partial list, in order to limit excessive
427-
* growth of the list. For simplicity we assume that the pages will
426+
* slabs on the per cpu partial list, in order to limit excessive
427+
* growth of the list. For simplicity we assume that the slabs will
428428
* be half-full.
429429
*/
430430
nr_slabs = DIV_ROUND_UP(nr_objects * 2, oo_objects(s->oo));
@@ -594,9 +594,9 @@ static inline bool slab_add_kunit_errors(void) { return false; }
594594
#endif
595595

596596
/*
597-
* Determine a map of object in use on a page.
597+
* Determine a map of objects in use in a slab.
598598
*
599-
* Node listlock must be held to guarantee that the page does
599+
* Node listlock must be held to guarantee that the slab does
600600
* not vanish from under us.
601601
*/
602602
static unsigned long *get_map(struct kmem_cache *s, struct slab *slab)
@@ -1139,7 +1139,7 @@ static int check_slab(struct kmem_cache *s, struct slab *slab)
11391139
}
11401140

11411141
/*
1142-
* Determine if a certain object on a page is on the freelist. Must hold the
1142+
* Determine if a certain object in a slab is on the freelist. Must hold the
11431143
* slab lock to guarantee that the chains are in a consistent state.
11441144
*/
11451145
static int on_freelist(struct kmem_cache *s, struct slab *slab, void *search)
@@ -2184,7 +2184,7 @@ static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
21842184
}
21852185

21862186
/*
2187-
* Get a page from somewhere. Search in increasing NUMA distances.
2187+
* Get a slab from somewhere. Search in increasing NUMA distances.
21882188
*/
21892189
static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
21902190
struct slab **ret_slab)
@@ -2248,7 +2248,7 @@ static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
22482248
}
22492249

22502250
/*
2251-
* Get a partial page, lock it and return it.
2251+
* Get a partial slab, lock it and return it.
22522252
*/
22532253
static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
22542254
struct slab **ret_slab)
@@ -2340,7 +2340,7 @@ static void init_kmem_cache_cpus(struct kmem_cache *s)
23402340
}
23412341

23422342
/*
2343-
* Finishes removing the cpu slab. Merges cpu's freelist with page's freelist,
2343+
* Finishes removing the cpu slab. Merges cpu's freelist with slab's freelist,
23442344
* unfreezes the slabs and puts it on the proper list.
23452345
* Assumes the slab has been already safely taken away from kmem_cache_cpu
23462346
* by the caller.
@@ -2387,18 +2387,18 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
23872387
}
23882388

23892389
/*
2390-
* Stage two: Unfreeze the page while splicing the per-cpu
2391-
* freelist to the head of page's freelist.
2390+
* Stage two: Unfreeze the slab while splicing the per-cpu
2391+
* freelist to the head of slab's freelist.
23922392
*
2393-
* Ensure that the page is unfrozen while the list presence
2393+
* Ensure that the slab is unfrozen while the list presence
23942394
* reflects the actual number of objects during unfreeze.
23952395
*
23962396
* We setup the list membership and then perform a cmpxchg
2397-
* with the count. If there is a mismatch then the page
2398-
* is not unfrozen but the page is on the wrong list.
2397+
* with the count. If there is a mismatch then the slab
2398+
* is not unfrozen but the slab is on the wrong list.
23992399
*
24002400
* Then we restart the process which may have to remove
2401-
* the page from the list that we just put it on again
2401+
* the slab from the list that we just put it on again
24022402
* because the number of objects in the slab may have
24032403
* changed.
24042404
*/
@@ -2426,9 +2426,8 @@ static void deactivate_slab(struct kmem_cache *s, struct slab *slab,
24262426
if (!lock) {
24272427
lock = 1;
24282428
/*
2429-
* Taking the spinlock removes the possibility
2430-
* that acquire_slab() will see a slab page that
2431-
* is frozen
2429+
* Taking the spinlock removes the possibility that
2430+
* acquire_slab() will see a slab that is frozen
24322431
*/
24332432
spin_lock_irqsave(&n->list_lock, flags);
24342433
}
@@ -2569,8 +2568,8 @@ static void unfreeze_partials_cpu(struct kmem_cache *s,
25692568
}
25702569

25712570
/*
2572-
* Put a page that was just frozen (in __slab_free|get_partial_node) into a
2573-
* partial page slot if available.
2571+
* Put a slab that was just frozen (in __slab_free|get_partial_node) into a
2572+
* partial slab slot if available.
25742573
*
25752574
* If we did not find a slot then simply move all the partials to the
25762575
* per node partial list.
@@ -2841,12 +2840,12 @@ static inline bool pfmemalloc_match(struct slab *slab, gfp_t gfpflags)
28412840
}
28422841

28432842
/*
2844-
* Check the page->freelist of a page and either transfer the freelist to the
2845-
* per cpu freelist or deactivate the page.
2843+
* Check the slab->freelist and either transfer the freelist to the
2844+
* per cpu freelist or deactivate the slab.
28462845
*
2847-
* The page is still frozen if the return value is not NULL.
2846+
* The slab is still frozen if the return value is not NULL.
28482847
*
2849-
* If this function returns NULL then the page has been unfrozen.
2848+
* If this function returns NULL then the slab has been unfrozen.
28502849
*/
28512850
static inline void *get_freelist(struct kmem_cache *s, struct slab *slab)
28522851
{
@@ -2902,7 +2901,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
29022901

29032902
stat(s, ALLOC_SLOWPATH);
29042903

2905-
reread_page:
2904+
reread_slab:
29062905

29072906
slab = READ_ONCE(c->slab);
29082907
if (!slab) {
@@ -2939,11 +2938,11 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
29392938
if (unlikely(!pfmemalloc_match(slab, gfpflags)))
29402939
goto deactivate_slab;
29412940

2942-
/* must check again c->page in case we got preempted and it changed */
2941+
/* must check again c->slab in case we got preempted and it changed */
29432942
local_lock_irqsave(&s->cpu_slab->lock, flags);
29442943
if (unlikely(slab != c->slab)) {
29452944
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2946-
goto reread_page;
2945+
goto reread_slab;
29472946
}
29482947
freelist = c->freelist;
29492948
if (freelist)
@@ -2966,8 +2965,8 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
29662965

29672966
/*
29682967
* freelist is pointing to the list of objects to be used.
2969-
* page is pointing to the page from which the objects are obtained.
2970-
* That page must be frozen for per cpu allocations to work.
2968+
* slab is pointing to the slab from which the objects are obtained.
2969+
* That slab must be frozen for per cpu allocations to work.
29712970
*/
29722971
VM_BUG_ON(!c->slab->frozen);
29732972
c->freelist = get_freepointer(s, freelist);
@@ -2980,7 +2979,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
29802979
local_lock_irqsave(&s->cpu_slab->lock, flags);
29812980
if (slab != c->slab) {
29822981
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2983-
goto reread_page;
2982+
goto reread_slab;
29842983
}
29852984
freelist = c->freelist;
29862985
c->slab = NULL;
@@ -2994,7 +2993,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
29942993
local_lock_irqsave(&s->cpu_slab->lock, flags);
29952994
if (unlikely(c->slab)) {
29962995
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
2997-
goto reread_page;
2996+
goto reread_slab;
29982997
}
29992998
if (unlikely(!slub_percpu_partial(c))) {
30002999
local_unlock_irqrestore(&s->cpu_slab->lock, flags);
@@ -3013,7 +3012,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
30133012

30143013
freelist = get_partial(s, gfpflags, node, &slab);
30153014
if (freelist)
3016-
goto check_new_page;
3015+
goto check_new_slab;
30173016

30183017
slub_put_cpu_ptr(s->cpu_slab);
30193018
slab = new_slab(s, gfpflags, node);
@@ -3025,15 +3024,15 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
30253024
}
30263025

30273026
/*
3028-
* No other reference to the page yet so we can
3027+
* No other reference to the slab yet so we can
30293028
* muck around with it freely without cmpxchg
30303029
*/
30313030
freelist = slab->freelist;
30323031
slab->freelist = NULL;
30333032

30343033
stat(s, ALLOC_SLAB);
30353034

3036-
check_new_page:
3035+
check_new_slab:
30373036

30383037
if (kmem_cache_debug(s)) {
30393038
if (!alloc_debug_processing(s, slab, freelist, addr)) {
@@ -3055,7 +3054,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
30553054
*/
30563055
goto return_single;
30573056

3058-
retry_load_page:
3057+
retry_load_slab:
30593058

30603059
local_lock_irqsave(&s->cpu_slab->lock, flags);
30613060
if (unlikely(c->slab)) {
@@ -3072,7 +3071,7 @@ static void *___slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
30723071

30733072
stat(s, CPUSLAB_FLUSH);
30743073

3075-
goto retry_load_page;
3074+
goto retry_load_slab;
30763075
}
30773076
c->slab = slab;
30783077

@@ -3169,9 +3168,9 @@ static __always_inline void *slab_alloc_node(struct kmem_cache *s,
31693168
/*
31703169
* Irqless object alloc/free algorithm used here depends on sequence
31713170
* of fetching cpu_slab's data. tid should be fetched before anything
3172-
* on c to guarantee that object and page associated with previous tid
3171+
* on c to guarantee that object and slab associated with previous tid
31733172
* won't be used with current tid. If we fetch tid first, object and
3174-
* page could be one associated with next tid and our alloc/free
3173+
* slab could be one associated with next tid and our alloc/free
31753174
* request will be failed. In this case, we will retry. So, no problem.
31763175
*/
31773176
barrier();
@@ -3295,7 +3294,7 @@ EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
32953294
* have a longer lifetime than the cpu slabs in most processing loads.
32963295
*
32973296
* So we still attempt to reduce cache line usage. Just take the slab
3298-
* lock and free the item. If there is no additional partial page
3297+
* lock and free the item. If there is no additional partial slab
32993298
* handling required then we can return immediately.
33003299
*/
33013300
static void __slab_free(struct kmem_cache *s, struct slab *slab,
@@ -3373,7 +3372,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
33733372
stat(s, FREE_FROZEN);
33743373
} else if (new.frozen) {
33753374
/*
3376-
* If we just froze the page then put it onto the
3375+
* If we just froze the slab then put it onto the
33773376
* per cpu partial list.
33783377
*/
33793378
put_cpu_partial(s, slab, 1);
@@ -3427,7 +3426,7 @@ static void __slab_free(struct kmem_cache *s, struct slab *slab,
34273426
* with all sorts of special processing.
34283427
*
34293428
* Bulk free of a freelist with several objects (all pointing to the
3430-
* same page) possible by specifying head and tail ptr, plus objects
3429+
* same slab) possible by specifying head and tail ptr, plus objects
34313430
* count (cnt). Bulk free indicated by tail pointer being set.
34323431
*/
34333432
static __always_inline void do_slab_free(struct kmem_cache *s,
@@ -4213,7 +4212,7 @@ static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
42134212
#endif
42144213

42154214
/*
4216-
* The larger the object size is, the more pages we want on the partial
4215+
* The larger the object size is, the more slabs we want on the partial
42174216
* list to avoid pounding the page allocator excessively.
42184217
*/
42194218
set_min_partial(s, ilog2(s->size) / 2);
@@ -4598,12 +4597,12 @@ static int __kmem_cache_do_shrink(struct kmem_cache *s)
45984597
* Build lists of slabs to discard or promote.
45994598
*
46004599
* Note that concurrent frees may occur while we hold the
4601-
* list_lock. page->inuse here is the upper limit.
4600+
* list_lock. slab->inuse here is the upper limit.
46024601
*/
46034602
list_for_each_entry_safe(slab, t, &n->partial, slab_list) {
46044603
int free = slab->objects - slab->inuse;
46054604

4606-
/* Do not reread page->inuse */
4605+
/* Do not reread slab->inuse */
46074606
barrier();
46084607

46094608
/* We do not keep full slabs on the list */
@@ -5482,7 +5481,7 @@ static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
54825481
slabs += slab->slabs;
54835482
}
54845483

5485-
/* Approximate half-full pages , see slub_set_cpu_partial() */
5484+
/* Approximate half-full slabs, see slub_set_cpu_partial() */
54865485
objects = (slabs * oo_objects(s->oo)) / 2;
54875486
len += sysfs_emit_at(buf, len, "%d(%d)", objects, slabs);
54885487

0 commit comments

Comments
 (0)