|
5 | 5 | * Internal slab definitions
|
6 | 6 | */
|
7 | 7 |
|
| 8 | +/* Reuses the bits in struct page */ |
| 9 | +struct slab { |
| 10 | + unsigned long __page_flags; |
| 11 | + union { |
| 12 | + struct list_head slab_list; |
| 13 | + struct { /* Partial pages */ |
| 14 | + struct slab *next; |
| 15 | +#ifdef CONFIG_64BIT |
| 16 | + int slabs; /* Nr of slabs left */ |
| 17 | +#else |
| 18 | + short int slabs; |
| 19 | +#endif |
| 20 | + }; |
| 21 | + struct rcu_head rcu_head; |
| 22 | + }; |
| 23 | + struct kmem_cache *slab_cache; /* not slob */ |
| 24 | + /* Double-word boundary */ |
| 25 | + void *freelist; /* first free object */ |
| 26 | + union { |
| 27 | + void *s_mem; /* slab: first object */ |
| 28 | + unsigned long counters; /* SLUB */ |
| 29 | + struct { /* SLUB */ |
| 30 | + unsigned inuse:16; |
| 31 | + unsigned objects:15; |
| 32 | + unsigned frozen:1; |
| 33 | + }; |
| 34 | + }; |
| 35 | + |
| 36 | + union { |
| 37 | + unsigned int active; /* SLAB */ |
| 38 | + int units; /* SLOB */ |
| 39 | + }; |
| 40 | + atomic_t __page_refcount; |
| 41 | +#ifdef CONFIG_MEMCG |
| 42 | + unsigned long memcg_data; |
| 43 | +#endif |
| 44 | +}; |
| 45 | + |
| 46 | +#define SLAB_MATCH(pg, sl) \ |
| 47 | + static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) |
| 48 | +SLAB_MATCH(flags, __page_flags); |
| 49 | +SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */ |
| 50 | +SLAB_MATCH(slab_list, slab_list); |
| 51 | +SLAB_MATCH(rcu_head, rcu_head); |
| 52 | +SLAB_MATCH(slab_cache, slab_cache); |
| 53 | +SLAB_MATCH(s_mem, s_mem); |
| 54 | +SLAB_MATCH(active, active); |
| 55 | +SLAB_MATCH(_refcount, __page_refcount); |
| 56 | +#ifdef CONFIG_MEMCG |
| 57 | +SLAB_MATCH(memcg_data, memcg_data); |
| 58 | +#endif |
| 59 | +#undef SLAB_MATCH |
| 60 | +static_assert(sizeof(struct slab) <= sizeof(struct page)); |
| 61 | + |
| 62 | +/** |
| 63 | + * folio_slab - Converts from folio to slab. |
| 64 | + * @folio: The folio. |
| 65 | + * |
| 66 | + * Currently struct slab is a different representation of a folio where |
| 67 | + * folio_test_slab() is true. |
| 68 | + * |
| 69 | + * Return: The slab which contains this folio. |
| 70 | + */ |
| 71 | +#define folio_slab(folio) (_Generic((folio), \ |
| 72 | + const struct folio *: (const struct slab *)(folio), \ |
| 73 | + struct folio *: (struct slab *)(folio))) |
| 74 | + |
| 75 | +/** |
| 76 | + * slab_folio - The folio allocated for a slab |
| 77 | + * @slab: The slab. |
| 78 | + * |
| 79 | + * Slabs are allocated as folios that contain the individual objects and are |
| 80 | + * using some fields in the first struct page of the folio - those fields are |
| 81 | + * now accessed by struct slab. It is occasionally necessary to convert back to |
| 82 | + * a folio in order to communicate with the rest of the mm. Please use this |
| 83 | + * helper function instead of casting yourself, as the implementation may change |
| 84 | + * in the future. |
| 85 | + */ |
| 86 | +#define slab_folio(s) (_Generic((s), \ |
| 87 | + const struct slab *: (const struct folio *)s, \ |
| 88 | + struct slab *: (struct folio *)s)) |
| 89 | + |
| 90 | +/** |
| 91 | + * page_slab - Converts from first struct page to slab. |
| 92 | + * @p: The first (either head of compound or single) page of slab. |
| 93 | + * |
| 94 | + * A temporary wrapper to convert struct page to struct slab in situations where |
| 95 | + * we know the page is the compound head, or single order-0 page. |
| 96 | + * |
| 97 | + * Long-term ideally everything would work with struct slab directly or go |
| 98 | + * through folio to struct slab. |
| 99 | + * |
| 100 | + * Return: The slab which contains this page |
| 101 | + */ |
| 102 | +#define page_slab(p) (_Generic((p), \ |
| 103 | + const struct page *: (const struct slab *)(p), \ |
| 104 | + struct page *: (struct slab *)(p))) |
| 105 | + |
| 106 | +/** |
| 107 | + * slab_page - The first struct page allocated for a slab |
| 108 | + * @slab: The slab. |
| 109 | + * |
| 110 | + * A convenience wrapper for converting slab to the first struct page of the |
| 111 | + * underlying folio, to communicate with code not yet converted to folio or |
| 112 | + * struct slab. |
| 113 | + */ |
| 114 | +#define slab_page(s) folio_page(slab_folio(s), 0) |
| 115 | + |
| 116 | +/* |
| 117 | + * If network-based swap is enabled, sl*b must keep track of whether pages |
| 118 | + * were allocated from pfmemalloc reserves. |
| 119 | + */ |
| 120 | +static inline bool slab_test_pfmemalloc(const struct slab *slab) |
| 121 | +{ |
| 122 | + return folio_test_active((struct folio *)slab_folio(slab)); |
| 123 | +} |
| 124 | + |
| 125 | +static inline void slab_set_pfmemalloc(struct slab *slab) |
| 126 | +{ |
| 127 | + folio_set_active(slab_folio(slab)); |
| 128 | +} |
| 129 | + |
| 130 | +static inline void slab_clear_pfmemalloc(struct slab *slab) |
| 131 | +{ |
| 132 | + folio_clear_active(slab_folio(slab)); |
| 133 | +} |
| 134 | + |
| 135 | +static inline void __slab_clear_pfmemalloc(struct slab *slab) |
| 136 | +{ |
| 137 | + __folio_clear_active(slab_folio(slab)); |
| 138 | +} |
| 139 | + |
| 140 | +static inline void *slab_address(const struct slab *slab) |
| 141 | +{ |
| 142 | + return folio_address(slab_folio(slab)); |
| 143 | +} |
| 144 | + |
| 145 | +static inline int slab_nid(const struct slab *slab) |
| 146 | +{ |
| 147 | + return folio_nid(slab_folio(slab)); |
| 148 | +} |
| 149 | + |
| 150 | +static inline pg_data_t *slab_pgdat(const struct slab *slab) |
| 151 | +{ |
| 152 | + return folio_pgdat(slab_folio(slab)); |
| 153 | +} |
| 154 | + |
| 155 | +static inline struct slab *virt_to_slab(const void *addr) |
| 156 | +{ |
| 157 | + struct folio *folio = virt_to_folio(addr); |
| 158 | + |
| 159 | + if (!folio_test_slab(folio)) |
| 160 | + return NULL; |
| 161 | + |
| 162 | + return folio_slab(folio); |
| 163 | +} |
| 164 | + |
| 165 | +static inline int slab_order(const struct slab *slab) |
| 166 | +{ |
| 167 | + return folio_order((struct folio *)slab_folio(slab)); |
| 168 | +} |
| 169 | + |
| 170 | +static inline size_t slab_size(const struct slab *slab) |
| 171 | +{ |
| 172 | + return PAGE_SIZE << slab_order(slab); |
| 173 | +} |
| 174 | + |
8 | 175 | #ifdef CONFIG_SLOB
|
9 | 176 | /*
|
10 | 177 | * Common fields provided in kmem_cache by all slab allocators
|
|
0 commit comments