@@ -1261,22 +1261,37 @@ int inet_ehash_locks_alloc(struct inet_hashinfo *hashinfo)
12611261{
12621262 unsigned int locksz = sizeof (spinlock_t );
12631263 unsigned int i , nblocks = 1 ;
1264+ spinlock_t * ptr = NULL ;
12641265
1265- if (locksz != 0 ) {
1266- /* allocate 2 cache lines or at least one spinlock per cpu */
1267- nblocks = max (2U * L1_CACHE_BYTES / locksz , 1U );
1268- nblocks = roundup_pow_of_two (nblocks * num_possible_cpus ());
1266+ if (locksz == 0 )
1267+ goto set_mask ;
12691268
1270- /* no more locks than number of hash buckets */
1271- nblocks = min ( nblocks , hashinfo -> ehash_mask + 1 );
1269+ /* Allocate 2 cache lines or at least one spinlock per cpu. */
1270+ nblocks = max ( 2U * L1_CACHE_BYTES / locksz , 1U ) * num_possible_cpus ( );
12721271
1273- hashinfo -> ehash_locks = kvmalloc_array (nblocks , locksz , GFP_KERNEL );
1274- if (!hashinfo -> ehash_locks )
1275- return - ENOMEM ;
1272+ /* At least one page per NUMA node. */
1273+ nblocks = max (nblocks , num_online_nodes () * PAGE_SIZE / locksz );
1274+
1275+ nblocks = roundup_pow_of_two (nblocks );
1276+
1277+ /* No more locks than number of hash buckets. */
1278+ nblocks = min (nblocks , hashinfo -> ehash_mask + 1 );
12761279
1277- for (i = 0 ; i < nblocks ; i ++ )
1278- spin_lock_init (& hashinfo -> ehash_locks [i ]);
1280+ if (num_online_nodes () > 1 ) {
1281+ /* Use vmalloc() to allow NUMA policy to spread pages
1282+ * on all available nodes if desired.
1283+ */
1284+ ptr = vmalloc_array (nblocks , locksz );
1285+ }
1286+ if (!ptr ) {
1287+ ptr = kvmalloc_array (nblocks , locksz , GFP_KERNEL );
1288+ if (!ptr )
1289+ return - ENOMEM ;
12791290 }
1291+ for (i = 0 ; i < nblocks ; i ++ )
1292+ spin_lock_init (& ptr [i ]);
1293+ hashinfo -> ehash_locks = ptr ;
1294+ set_mask :
12801295 hashinfo -> ehash_locks_mask = nblocks - 1 ;
12811296 return 0 ;
12821297}
0 commit comments