Skip to content

Commit 5c0e820

Browse files
edumazetkuba-moo
authored andcommitted
net: factorize code in kmalloc_reserve()
All kmalloc_reserve() callers have to make the same computation, we can factorize them, to prepare following patch in the series. Signed-off-by: Eric Dumazet <[email protected]> Acked-by: Soheil Hassas Yeganeh <[email protected]> Acked-by: Paolo Abeni <[email protected]> Reviewed-by: Alexander Duyck <[email protected]> Signed-off-by: Jakub Kicinski <[email protected]>
1 parent 65998d2 commit 5c0e820

File tree

1 file changed

+11
-16
lines changed

1 file changed

+11
-16
lines changed

net/core/skbuff.c

Lines changed: 11 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -478,25 +478,28 @@ EXPORT_SYMBOL(napi_build_skb);
478478
* may be used. Otherwise, the packet data may be discarded until enough
479479
* memory is free
480480
*/
481-
static void *kmalloc_reserve(size_t size, gfp_t flags, int node,
481+
static void *kmalloc_reserve(unsigned int *size, gfp_t flags, int node,
482482
bool *pfmemalloc)
483483
{
484-
void *obj;
485484
bool ret_pfmemalloc = false;
485+
unsigned int obj_size;
486+
void *obj;
486487

488+
obj_size = SKB_HEAD_ALIGN(*size);
489+
*size = obj_size = kmalloc_size_roundup(obj_size);
487490
/*
488491
* Try a regular allocation, when that fails and we're not entitled
489492
* to the reserves, fail.
490493
*/
491-
obj = kmalloc_node_track_caller(size,
494+
obj = kmalloc_node_track_caller(obj_size,
492495
flags | __GFP_NOMEMALLOC | __GFP_NOWARN,
493496
node);
494497
if (obj || !(gfp_pfmemalloc_allowed(flags)))
495498
goto out;
496499

497500
/* Try again but now we are using pfmemalloc reserves */
498501
ret_pfmemalloc = true;
499-
obj = kmalloc_node_track_caller(size, flags, node);
502+
obj = kmalloc_node_track_caller(obj_size, flags, node);
500503

501504
out:
502505
if (pfmemalloc)
@@ -557,9 +560,7 @@ struct sk_buff *__alloc_skb(unsigned int size, gfp_t gfp_mask,
557560
* aligned memory blocks, unless SLUB/SLAB debug is enabled.
558561
* Both skb->head and skb_shared_info are cache line aligned.
559562
*/
560-
size = SKB_HEAD_ALIGN(size);
561-
size = kmalloc_size_roundup(size);
562-
data = kmalloc_reserve(size, gfp_mask, node, &pfmemalloc);
563+
data = kmalloc_reserve(&size, gfp_mask, node, &pfmemalloc);
563564
if (unlikely(!data))
564565
goto nodata;
565566
/* kmalloc_size_roundup() might give us more room than requested.
@@ -1933,9 +1934,7 @@ int pskb_expand_head(struct sk_buff *skb, int nhead, int ntail,
19331934
if (skb_pfmemalloc(skb))
19341935
gfp_mask |= __GFP_MEMALLOC;
19351936

1936-
size = SKB_HEAD_ALIGN(size);
1937-
size = kmalloc_size_roundup(size);
1938-
data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
1937+
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
19391938
if (!data)
19401939
goto nodata;
19411940
size = SKB_WITH_OVERHEAD(size);
@@ -6283,9 +6282,7 @@ static int pskb_carve_inside_header(struct sk_buff *skb, const u32 off,
62836282
if (skb_pfmemalloc(skb))
62846283
gfp_mask |= __GFP_MEMALLOC;
62856284

6286-
size = SKB_HEAD_ALIGN(size);
6287-
size = kmalloc_size_roundup(size);
6288-
data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
6285+
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
62896286
if (!data)
62906287
return -ENOMEM;
62916288
size = SKB_WITH_OVERHEAD(size);
@@ -6401,9 +6398,7 @@ static int pskb_carve_inside_nonlinear(struct sk_buff *skb, const u32 off,
64016398
if (skb_pfmemalloc(skb))
64026399
gfp_mask |= __GFP_MEMALLOC;
64036400

6404-
size = SKB_HEAD_ALIGN(size);
6405-
size = kmalloc_size_roundup(size);
6406-
data = kmalloc_reserve(size, gfp_mask, NUMA_NO_NODE, NULL);
6401+
data = kmalloc_reserve(&size, gfp_mask, NUMA_NO_NODE, NULL);
64076402
if (!data)
64086403
return -ENOMEM;
64096404
size = SKB_WITH_OVERHEAD(size);

0 commit comments

Comments
 (0)