@@ -1231,22 +1231,37 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
1231
1231
{
1232
1232
unsigned int locksz = sizeof (spinlock_t );
1233
1233
unsigned int i , nblocks = 1 ;
1234
+ spinlock_t * ptr = NULL ;
1234
1235
1235
- if (locksz != 0 ) {
1236
- /* allocate 2 cache lines or at least one spinlock per cpu */
1237
- nblocks = max (2U * L1_CACHE_BYTES / locksz , 1U );
1238
- nblocks = roundup_pow_of_two (nblocks * num_possible_cpus ());
1236
+ if (locksz == 0 )
1237
+ goto set_mask ;
1239
1238
1240
- /* no more locks than number of hash buckets */
1241
- nblocks = min ( nblocks , hashinfo -> ehash_mask + 1 );
1239
+ /* Allocate 2 cache lines or at least one spinlock per cpu. */
1240
+ nblocks = max ( 2U * L1_CACHE_BYTES / locksz , 1U ) * num_possible_cpus ( );
1242
1241
1243
- hashinfo -> ehash_locks = kvmalloc_array (nblocks , locksz , GFP_KERNEL );
1244
- if (!hashinfo -> ehash_locks )
1245
- return - ENOMEM ;
1242
+ /* At least one page per NUMA node. */
1243
+ nblocks = max (nblocks , num_online_nodes () * PAGE_SIZE / locksz );
1244
+
1245
+ nblocks = roundup_pow_of_two (nblocks );
1246
+
1247
+ /* No more locks than number of hash buckets. */
1248
+ nblocks = min (nblocks , hashinfo -> ehash_mask + 1 );
1246
1249
1247
- for (i = 0 ; i < nblocks ; i ++ )
1248
- spin_lock_init (& hashinfo -> ehash_locks [i ]);
1250
+ if (num_online_nodes () > 1 ) {
1251
+ /* Use vmalloc() to allow NUMA policy to spread pages
1252
+ * on all available nodes if desired.
1253
+ */
1254
+ ptr = vmalloc_array (nblocks , locksz );
1255
+ }
1256
+ if (!ptr ) {
1257
+ ptr = kvmalloc_array (nblocks , locksz , GFP_KERNEL );
1258
+ if (!ptr )
1259
+ return - ENOMEM ;
1249
1260
}
1261
+ for (i = 0 ; i < nblocks ; i ++ )
1262
+ spin_lock_init (& ptr [i ]);
1263
+ hashinfo -> ehash_locks = ptr ;
1264
+ set_mask :
1250
1265
hashinfo -> ehash_locks_mask = nblocks - 1 ;
1251
1266
return 0 ;
1252
1267
}
0 commit comments