|
91 | 91 | * The partially empty slabs cached on the CPU partial list are used
|
92 | 92 | * for performance reasons, which speeds up the allocation process.
|
93 | 93 | * These slabs are not frozen, but are also exempt from list management,
|
94 |
| - * by clearing the PG_workingset flag when moving out of the node |
| 94 | + * by clearing the SL_partial flag when moving out of the node |
95 | 95 | * partial list. Please see __slab_free() for more details.
|
96 | 96 | *
|
97 | 97 | * To sum up, the current scheme is:
|
98 |
| - * - node partial slab: PG_Workingset && !frozen |
99 |
| - * - cpu partial slab: !PG_Workingset && !frozen |
100 |
| - * - cpu slab: !PG_Workingset && frozen |
101 |
| - * - full slab: !PG_Workingset && !frozen |
| 98 | + * - node partial slab: SL_partial && !frozen |
| 99 | + * - cpu partial slab: !SL_partial && !frozen |
| 100 | + * - cpu slab: !SL_partial && frozen |
| 101 | + * - full slab: !SL_partial && !frozen |
102 | 102 | *
|
103 | 103 | * list_lock
|
104 | 104 | *
|
|
186 | 186 | /**
|
187 | 187 | * enum slab_flags - How the slab flags bits are used.
|
188 | 188 | * @SL_locked: Is locked with slab_lock()
|
| 189 | + * @SL_partial: On the per-node partial list |
189 | 190 | *
|
190 | 191 | * The slab flags share space with the page flags but some bits have
|
191 | 192 | * different interpretations. The high bits are used for information
|
192 | 193 | * like zone/node/section.
|
193 | 194 | */
|
194 | 195 | enum slab_flags {
|
195 | 196 | SL_locked = PG_locked,
|
| 197 | + SL_partial = PG_workingset, /* Historical reasons for this bit */ |
196 | 198 | };
|
197 | 199 |
|
198 | 200 | /*
|
@@ -2729,23 +2731,19 @@ static void discard_slab(struct kmem_cache *s, struct slab *slab)
|
2729 | 2731 | free_slab(s, slab);
|
2730 | 2732 | }
|
2731 | 2733 |
|
2732 |
| -/* |
2733 |
| - * SLUB reuses PG_workingset bit to keep track of whether it's on |
2734 |
| - * the per-node partial list. |
2735 |
| - */ |
2736 | 2734 | static inline bool slab_test_node_partial(const struct slab *slab)
|
2737 | 2735 | {
|
2738 |
| - return folio_test_workingset(slab_folio(slab)); |
| 2736 | + return test_bit(SL_partial, &slab->flags); |
2739 | 2737 | }
|
2740 | 2738 |
|
2741 | 2739 | static inline void slab_set_node_partial(struct slab *slab)
|
2742 | 2740 | {
|
2743 |
| - set_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); |
| 2741 | + set_bit(SL_partial, &slab->flags); |
2744 | 2742 | }
|
2745 | 2743 |
|
2746 | 2744 | static inline void slab_clear_node_partial(struct slab *slab)
|
2747 | 2745 | {
|
2748 |
| - clear_bit(PG_workingset, folio_flags(slab_folio(slab), 0)); |
| 2746 | + clear_bit(SL_partial, &slab->flags); |
2749 | 2747 | }
|
2750 | 2748 |
|
2751 | 2749 | /*
|
|
0 commit comments