Skip to content

Commit fc0eac5

Browse files
braunertehcaster
authored andcommitted
slab: pull kmem_cache_open() into do_kmem_cache_create()
do_kmem_cache_create() is the only caller and we're going to pass down struct kmem_cache_args in a follow-up patch. Reviewed-by: Kees Cook <[email protected]> Reviewed-by: Jens Axboe <[email protected]> Reviewed-by: Mike Rapoport (Microsoft) <[email protected]> Reviewed-by: Vlastimil Babka <[email protected]> Signed-off-by: Christian Brauner <[email protected]> Reviewed-by: Roman Gushchin <[email protected]> Signed-off-by: Vlastimil Babka <[email protected]>
1 parent 34410a9 commit fc0eac5

File tree

1 file changed

+62
-70
lines changed

1 file changed

+62
-70
lines changed

mm/slub.c

Lines changed: 62 additions & 70 deletions
Original file line numberDiff line numberDiff line change
@@ -5290,65 +5290,6 @@ static int calculate_sizes(struct kmem_cache *s)
52905290
return !!oo_objects(s->oo);
52915291
}
52925292

5293-
static int kmem_cache_open(struct kmem_cache *s, slab_flags_t flags)
5294-
{
5295-
s->flags = kmem_cache_flags(flags, s->name);
5296-
#ifdef CONFIG_SLAB_FREELIST_HARDENED
5297-
s->random = get_random_long();
5298-
#endif
5299-
5300-
if (!calculate_sizes(s))
5301-
goto error;
5302-
if (disable_higher_order_debug) {
5303-
/*
5304-
* Disable debugging flags that store metadata if the min slab
5305-
* order increased.
5306-
*/
5307-
if (get_order(s->size) > get_order(s->object_size)) {
5308-
s->flags &= ~DEBUG_METADATA_FLAGS;
5309-
s->offset = 0;
5310-
if (!calculate_sizes(s))
5311-
goto error;
5312-
}
5313-
}
5314-
5315-
#ifdef system_has_freelist_aba
5316-
if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
5317-
/* Enable fast mode */
5318-
s->flags |= __CMPXCHG_DOUBLE;
5319-
}
5320-
#endif
5321-
5322-
/*
5323-
* The larger the object size is, the more slabs we want on the partial
5324-
* list to avoid pounding the page allocator excessively.
5325-
*/
5326-
s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
5327-
s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
5328-
5329-
set_cpu_partial(s);
5330-
5331-
#ifdef CONFIG_NUMA
5332-
s->remote_node_defrag_ratio = 1000;
5333-
#endif
5334-
5335-
/* Initialize the pre-computed randomized freelist if slab is up */
5336-
if (slab_state >= UP) {
5337-
if (init_cache_random_seq(s))
5338-
goto error;
5339-
}
5340-
5341-
if (!init_kmem_cache_nodes(s))
5342-
goto error;
5343-
5344-
if (alloc_kmem_cache_cpus(s))
5345-
return 0;
5346-
5347-
error:
5348-
__kmem_cache_release(s);
5349-
return -EINVAL;
5350-
}
5351-
53525293
static void list_slab_objects(struct kmem_cache *s, struct slab *slab,
53535294
const char *text)
53545295
{
@@ -5904,26 +5845,77 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
59045845

59055846
int do_kmem_cache_create(struct kmem_cache *s, slab_flags_t flags)
59065847
{
5907-
int err;
5848+
int err = -EINVAL;
59085849

5909-
err = kmem_cache_open(s, flags);
5910-
if (err)
5911-
return err;
5850+
s->flags = kmem_cache_flags(flags, s->name);
5851+
#ifdef CONFIG_SLAB_FREELIST_HARDENED
5852+
s->random = get_random_long();
5853+
#endif
5854+
5855+
if (!calculate_sizes(s))
5856+
goto out;
5857+
if (disable_higher_order_debug) {
5858+
/*
5859+
* Disable debugging flags that store metadata if the min slab
5860+
* order increased.
5861+
*/
5862+
if (get_order(s->size) > get_order(s->object_size)) {
5863+
s->flags &= ~DEBUG_METADATA_FLAGS;
5864+
s->offset = 0;
5865+
if (!calculate_sizes(s))
5866+
goto out;
5867+
}
5868+
}
5869+
5870+
#ifdef system_has_freelist_aba
5871+
if (system_has_freelist_aba() && !(s->flags & SLAB_NO_CMPXCHG)) {
5872+
/* Enable fast mode */
5873+
s->flags |= __CMPXCHG_DOUBLE;
5874+
}
5875+
#endif
5876+
5877+
/*
5878+
* The larger the object size is, the more slabs we want on the partial
5879+
* list to avoid pounding the page allocator excessively.
5880+
*/
5881+
s->min_partial = min_t(unsigned long, MAX_PARTIAL, ilog2(s->size) / 2);
5882+
s->min_partial = max_t(unsigned long, MIN_PARTIAL, s->min_partial);
5883+
5884+
set_cpu_partial(s);
5885+
5886+
#ifdef CONFIG_NUMA
5887+
s->remote_node_defrag_ratio = 1000;
5888+
#endif
5889+
5890+
/* Initialize the pre-computed randomized freelist if slab is up */
5891+
if (slab_state >= UP) {
5892+
if (init_cache_random_seq(s))
5893+
goto out;
5894+
}
5895+
5896+
if (!init_kmem_cache_nodes(s))
5897+
goto out;
5898+
5899+
if (!alloc_kmem_cache_cpus(s))
5900+
goto out;
59125901

59135902
/* Mutex is not taken during early boot */
5914-
if (slab_state <= UP)
5915-
return 0;
5903+
if (slab_state <= UP) {
5904+
err = 0;
5905+
goto out;
5906+
}
59165907

59175908
err = sysfs_slab_add(s);
5918-
if (err) {
5919-
__kmem_cache_release(s);
5920-
return err;
5921-
}
5909+
if (err)
5910+
goto out;
59225911

59235912
if (s->flags & SLAB_STORE_USER)
59245913
debugfs_slab_add(s);
59255914

5926-
return 0;
5915+
out:
5916+
if (err)
5917+
__kmem_cache_release(s);
5918+
return err;
59275919
}
59285920

59295921
#ifdef SLAB_SUPPORTS_SYSFS

0 commit comments

Comments
 (0)