@@ -5290,65 +5290,6 @@ static int calculate_sizes(struct kmem_cache *s)
5290
5290
return !!oo_objects (s -> oo );
5291
5291
}
5292
5292
5293
- static int kmem_cache_open (struct kmem_cache * s , slab_flags_t flags )
5294
- {
5295
- s -> flags = kmem_cache_flags (flags , s -> name );
5296
- #ifdef CONFIG_SLAB_FREELIST_HARDENED
5297
- s -> random = get_random_long ();
5298
- #endif
5299
-
5300
- if (!calculate_sizes (s ))
5301
- goto error ;
5302
- if (disable_higher_order_debug ) {
5303
- /*
5304
- * Disable debugging flags that store metadata if the min slab
5305
- * order increased.
5306
- */
5307
- if (get_order (s -> size ) > get_order (s -> object_size )) {
5308
- s -> flags &= ~DEBUG_METADATA_FLAGS ;
5309
- s -> offset = 0 ;
5310
- if (!calculate_sizes (s ))
5311
- goto error ;
5312
- }
5313
- }
5314
-
5315
- #ifdef system_has_freelist_aba
5316
- if (system_has_freelist_aba () && !(s -> flags & SLAB_NO_CMPXCHG )) {
5317
- /* Enable fast mode */
5318
- s -> flags |= __CMPXCHG_DOUBLE ;
5319
- }
5320
- #endif
5321
-
5322
- /*
5323
- * The larger the object size is, the more slabs we want on the partial
5324
- * list to avoid pounding the page allocator excessively.
5325
- */
5326
- s -> min_partial = min_t (unsigned long , MAX_PARTIAL , ilog2 (s -> size ) / 2 );
5327
- s -> min_partial = max_t (unsigned long , MIN_PARTIAL , s -> min_partial );
5328
-
5329
- set_cpu_partial (s );
5330
-
5331
- #ifdef CONFIG_NUMA
5332
- s -> remote_node_defrag_ratio = 1000 ;
5333
- #endif
5334
-
5335
- /* Initialize the pre-computed randomized freelist if slab is up */
5336
- if (slab_state >= UP ) {
5337
- if (init_cache_random_seq (s ))
5338
- goto error ;
5339
- }
5340
-
5341
- if (!init_kmem_cache_nodes (s ))
5342
- goto error ;
5343
-
5344
- if (alloc_kmem_cache_cpus (s ))
5345
- return 0 ;
5346
-
5347
- error :
5348
- __kmem_cache_release (s );
5349
- return - EINVAL ;
5350
- }
5351
-
5352
5293
static void list_slab_objects (struct kmem_cache * s , struct slab * slab ,
5353
5294
const char * text )
5354
5295
{
@@ -5904,26 +5845,77 @@ __kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
5904
5845
5905
5846
int do_kmem_cache_create (struct kmem_cache * s , slab_flags_t flags )
5906
5847
{
5907
- int err ;
5848
+ int err = - EINVAL ;
5908
5849
5909
- err = kmem_cache_open (s , flags );
5910
- if (err )
5911
- return err ;
5850
+ s -> flags = kmem_cache_flags (flags , s -> name );
5851
+ #ifdef CONFIG_SLAB_FREELIST_HARDENED
5852
+ s -> random = get_random_long ();
5853
+ #endif
5854
+
5855
+ if (!calculate_sizes (s ))
5856
+ goto out ;
5857
+ if (disable_higher_order_debug ) {
5858
+ /*
5859
+ * Disable debugging flags that store metadata if the min slab
5860
+ * order increased.
5861
+ */
5862
+ if (get_order (s -> size ) > get_order (s -> object_size )) {
5863
+ s -> flags &= ~DEBUG_METADATA_FLAGS ;
5864
+ s -> offset = 0 ;
5865
+ if (!calculate_sizes (s ))
5866
+ goto out ;
5867
+ }
5868
+ }
5869
+
5870
+ #ifdef system_has_freelist_aba
5871
+ if (system_has_freelist_aba () && !(s -> flags & SLAB_NO_CMPXCHG )) {
5872
+ /* Enable fast mode */
5873
+ s -> flags |= __CMPXCHG_DOUBLE ;
5874
+ }
5875
+ #endif
5876
+
5877
+ /*
5878
+ * The larger the object size is, the more slabs we want on the partial
5879
+ * list to avoid pounding the page allocator excessively.
5880
+ */
5881
+ s -> min_partial = min_t (unsigned long , MAX_PARTIAL , ilog2 (s -> size ) / 2 );
5882
+ s -> min_partial = max_t (unsigned long , MIN_PARTIAL , s -> min_partial );
5883
+
5884
+ set_cpu_partial (s );
5885
+
5886
+ #ifdef CONFIG_NUMA
5887
+ s -> remote_node_defrag_ratio = 1000 ;
5888
+ #endif
5889
+
5890
+ /* Initialize the pre-computed randomized freelist if slab is up */
5891
+ if (slab_state >= UP ) {
5892
+ if (init_cache_random_seq (s ))
5893
+ goto out ;
5894
+ }
5895
+
5896
+ if (!init_kmem_cache_nodes (s ))
5897
+ goto out ;
5898
+
5899
+ if (!alloc_kmem_cache_cpus (s ))
5900
+ goto out ;
5912
5901
5913
5902
/* Mutex is not taken during early boot */
5914
- if (slab_state <= UP )
5915
- return 0 ;
5903
+ if (slab_state <= UP ) {
5904
+ err = 0 ;
5905
+ goto out ;
5906
+ }
5916
5907
5917
5908
err = sysfs_slab_add (s );
5918
- if (err ) {
5919
- __kmem_cache_release (s );
5920
- return err ;
5921
- }
5909
+ if (err )
5910
+ goto out ;
5922
5911
5923
5912
if (s -> flags & SLAB_STORE_USER )
5924
5913
debugfs_slab_add (s );
5925
5914
5926
- return 0 ;
5915
+ out :
5916
+ if (err )
5917
+ __kmem_cache_release (s );
5918
+ return err ;
5927
5919
}
5928
5920
5929
5921
#ifdef SLAB_SUPPORTS_SYSFS
0 commit comments