Skip to content

Commit fa2343e

Browse files
Alexander DuyckJeff Kirsher
authored andcommitted
i40e/i40evf: Break i40e_fetch_rx_buffer up to allow for reuse of frag code
This patch is meant to clean up the code in preparation for us adding support for build_skb. Specifically we deconstruct i40e_fetch_buffer into several functions so that those functions can later be reused when we add a path for build_skb. Specifically with this change we split out the code for adding a page to an exiting skb. Change-ID: Iab1efbab6b8b97cb60ab9fdd0be1d37a056a154d Signed-off-by: Alexander Duyck <[email protected]> Tested-by: Andrew Bowers <[email protected]> Signed-off-by: Jeff Kirsher <[email protected]>
1 parent a0cfc31 commit fa2343e

File tree

2 files changed

+130
-146
lines changed

2 files changed

+130
-146
lines changed

drivers/net/ethernet/intel/i40e/i40e_txrx.c

Lines changed: 65 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1687,61 +1687,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
16871687
* @size: packet length from rx_desc
16881688
*
16891689
* This function will add the data contained in rx_buffer->page to the skb.
1690-
* This is done either through a direct copy if the data in the buffer is
1691-
* less than the skb header size, otherwise it will just attach the page as
1692-
* a frag to the skb.
1690+
* It will just attach the page as a frag to the skb.
16931691
*
1694-
* The function will then update the page offset if necessary and return
1695-
* true if the buffer can be reused by the adapter.
1692+
* The function will then update the page offset.
16961693
**/
16971694
static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
16981695
struct i40e_rx_buffer *rx_buffer,
16991696
struct sk_buff *skb,
17001697
unsigned int size)
17011698
{
1702-
struct page *page = rx_buffer->page;
1703-
unsigned char *va = page_address(page) + rx_buffer->page_offset;
17041699
#if (PAGE_SIZE < 8192)
17051700
unsigned int truesize = I40E_RXBUFFER_2048;
17061701
#else
1707-
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1702+
unsigned int truesize = SKB_DATA_ALIGN(size);
17081703
#endif
1709-
unsigned int pull_len;
1710-
1711-
if (unlikely(skb_is_nonlinear(skb)))
1712-
goto add_tail_frag;
1713-
1714-
/* will the data fit in the skb we allocated? if so, just
1715-
* copy it as it is pretty small anyway
1716-
*/
1717-
if (size <= I40E_RX_HDR_SIZE) {
1718-
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1719-
1720-
/* page is to be freed, increase pagecnt_bias instead of
1721-
* decreasing page count.
1722-
*/
1723-
rx_buffer->pagecnt_bias++;
1724-
return;
1725-
}
1726-
1727-
/* we need the header to contain the greater of either
1728-
* ETH_HLEN or 60 bytes if the skb->len is less than
1729-
* 60 for skb_pad.
1730-
*/
1731-
pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
1732-
1733-
/* align pull length to size of long to optimize
1734-
* memcpy performance
1735-
*/
1736-
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
1737-
1738-
/* update all of the pointers */
1739-
va += pull_len;
1740-
size -= pull_len;
17411704

1742-
add_tail_frag:
1743-
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1744-
(unsigned long)va & ~PAGE_MASK, size, truesize);
1705+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1706+
rx_buffer->page_offset, size, truesize);
17451707

17461708
/* page is being used so we must update the page offset */
17471709
#if (PAGE_SIZE < 8192)
@@ -1781,45 +1743,66 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
17811743
}
17821744

17831745
/**
1784-
* i40e_fetch_rx_buffer - Allocate skb and populate it
1746+
* i40e_construct_skb - Allocate skb and populate it
17851747
* @rx_ring: rx descriptor ring to transact packets on
17861748
* @rx_buffer: rx buffer to pull data from
17871749
* @size: size of buffer to add to skb
17881750
*
1789-
* This function allocates an skb on the fly, and populates it with the page
1790-
* data from the current receive descriptor, taking care to set up the skb
1791-
* correctly, as well as handling calling the page recycle function if
1792-
* necessary.
1751+
* This function allocates an skb. It then populates it with the page
1752+
* data from the current receive descriptor, taking care to set up the
1753+
* skb correctly.
17931754
*/
1794-
static inline
1795-
struct sk_buff *i40e_fetch_rx_buffer(struct i40e_ring *rx_ring,
1796-
struct i40e_rx_buffer *rx_buffer,
1797-
struct sk_buff *skb,
1798-
unsigned int size)
1755+
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1756+
struct i40e_rx_buffer *rx_buffer,
1757+
unsigned int size)
17991758
{
1800-
if (likely(!skb)) {
1801-
void *page_addr = page_address(rx_buffer->page) +
1802-
rx_buffer->page_offset;
1759+
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1760+
#if (PAGE_SIZE < 8192)
1761+
unsigned int truesize = I40E_RXBUFFER_2048;
1762+
#else
1763+
unsigned int truesize = SKB_DATA_ALIGN(size);
1764+
#endif
1765+
unsigned int headlen;
1766+
struct sk_buff *skb;
18031767

1804-
/* prefetch first cache line of first page */
1805-
prefetch(page_addr);
1768+
/* prefetch first cache line of first page */
1769+
prefetch(va);
18061770
#if L1_CACHE_BYTES < 128
1807-
prefetch(page_addr + L1_CACHE_BYTES);
1771+
prefetch(va + L1_CACHE_BYTES);
18081772
#endif
18091773

1810-
/* allocate a skb to store the frags */
1811-
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1812-
I40E_RX_HDR_SIZE,
1813-
GFP_ATOMIC | __GFP_NOWARN);
1814-
if (unlikely(!skb)) {
1815-
rx_ring->rx_stats.alloc_buff_failed++;
1816-
rx_buffer->pagecnt_bias++;
1817-
return NULL;
1818-
}
1819-
}
1774+
/* allocate a skb to store the frags */
1775+
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1776+
I40E_RX_HDR_SIZE,
1777+
GFP_ATOMIC | __GFP_NOWARN);
1778+
if (unlikely(!skb))
1779+
return NULL;
1780+
1781+
/* Determine available headroom for copy */
1782+
headlen = size;
1783+
if (headlen > I40E_RX_HDR_SIZE)
1784+
headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
18201785

1821-
/* pull page into skb */
1822-
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
1786+
/* align pull length to size of long to optimize memcpy performance */
1787+
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1788+
1789+
/* update all of the pointers */
1790+
size -= headlen;
1791+
if (size) {
1792+
skb_add_rx_frag(skb, 0, rx_buffer->page,
1793+
rx_buffer->page_offset + headlen,
1794+
size, truesize);
1795+
1796+
/* buffer is used by skb, update page_offset */
1797+
#if (PAGE_SIZE < 8192)
1798+
rx_buffer->page_offset ^= truesize;
1799+
#else
1800+
rx_buffer->page_offset += truesize;
1801+
#endif
1802+
} else {
1803+
/* buffer is unused, reset bias back to rx_buffer */
1804+
rx_buffer->pagecnt_bias++;
1805+
}
18231806

18241807
return skb;
18251808
}
@@ -1944,9 +1927,18 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
19441927

19451928
rx_buffer = i40e_get_rx_buffer(rx_ring, size);
19461929

1947-
skb = i40e_fetch_rx_buffer(rx_ring, rx_buffer, skb, size);
1948-
if (!skb)
1930+
/* retrieve a buffer from the ring */
1931+
if (skb)
1932+
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
1933+
else
1934+
skb = i40e_construct_skb(rx_ring, rx_buffer, size);
1935+
1936+
/* exit if we failed to retrieve a buffer */
1937+
if (!skb) {
1938+
rx_ring->rx_stats.alloc_buff_failed++;
1939+
rx_buffer->pagecnt_bias++;
19491940
break;
1941+
}
19501942

19511943
i40e_put_rx_buffer(rx_ring, rx_buffer);
19521944
cleaned_count++;

drivers/net/ethernet/intel/i40evf/i40e_txrx.c

Lines changed: 65 additions & 73 deletions
Original file line numberDiff line numberDiff line change
@@ -1045,61 +1045,23 @@ static bool i40e_can_reuse_rx_page(struct i40e_rx_buffer *rx_buffer)
10451045
* @size: packet length from rx_desc
10461046
*
10471047
* This function will add the data contained in rx_buffer->page to the skb.
1048-
* This is done either through a direct copy if the data in the buffer is
1049-
* less than the skb header size, otherwise it will just attach the page as
1050-
* a frag to the skb.
1048+
* It will just attach the page as a frag to the skb.
10511049
*
1052-
* The function will then update the page offset if necessary and return
1053-
* true if the buffer can be reused by the adapter.
1050+
* The function will then update the page offset.
10541051
**/
10551052
static void i40e_add_rx_frag(struct i40e_ring *rx_ring,
10561053
struct i40e_rx_buffer *rx_buffer,
10571054
struct sk_buff *skb,
10581055
unsigned int size)
10591056
{
1060-
struct page *page = rx_buffer->page;
1061-
unsigned char *va = page_address(page) + rx_buffer->page_offset;
10621057
#if (PAGE_SIZE < 8192)
10631058
unsigned int truesize = I40E_RXBUFFER_2048;
10641059
#else
1065-
unsigned int truesize = ALIGN(size, L1_CACHE_BYTES);
1060+
unsigned int truesize = SKB_DATA_ALIGN(size);
10661061
#endif
1067-
unsigned int pull_len;
1068-
1069-
if (unlikely(skb_is_nonlinear(skb)))
1070-
goto add_tail_frag;
1071-
1072-
/* will the data fit in the skb we allocated? if so, just
1073-
* copy it as it is pretty small anyway
1074-
*/
1075-
if (size <= I40E_RX_HDR_SIZE) {
1076-
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
1077-
1078-
/* page is to be freed, increase pagecnt_bias instead of
1079-
* decreasing page count.
1080-
*/
1081-
rx_buffer->pagecnt_bias++;
1082-
return;
1083-
}
1084-
1085-
/* we need the header to contain the greater of either
1086-
* ETH_HLEN or 60 bytes if the skb->len is less than
1087-
* 60 for skb_pad.
1088-
*/
1089-
pull_len = eth_get_headlen(va, I40E_RX_HDR_SIZE);
1090-
1091-
/* align pull length to size of long to optimize
1092-
* memcpy performance
1093-
*/
1094-
memcpy(__skb_put(skb, pull_len), va, ALIGN(pull_len, sizeof(long)));
1095-
1096-
/* update all of the pointers */
1097-
va += pull_len;
1098-
size -= pull_len;
10991062

1100-
add_tail_frag:
1101-
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page,
1102-
(unsigned long)va & ~PAGE_MASK, size, truesize);
1063+
skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page,
1064+
rx_buffer->page_offset, size, truesize);
11031065

11041066
/* page is being used so we must update the page offset */
11051067
#if (PAGE_SIZE < 8192)
@@ -1139,45 +1101,66 @@ static struct i40e_rx_buffer *i40e_get_rx_buffer(struct i40e_ring *rx_ring,
11391101
}
11401102

11411103
/**
1142-
* i40evf_fetch_rx_buffer - Allocate skb and populate it
1104+
* i40e_construct_skb - Allocate skb and populate it
11431105
* @rx_ring: rx descriptor ring to transact packets on
11441106
* @rx_buffer: rx buffer to pull data from
11451107
* @size: size of buffer to add to skb
11461108
*
1147-
* This function allocates an skb on the fly, and populates it with the page
1148-
* data from the current receive descriptor, taking care to set up the skb
1149-
* correctly, as well as handling calling the page recycle function if
1150-
* necessary.
1109+
* This function allocates an skb. It then populates it with the page
1110+
* data from the current receive descriptor, taking care to set up the
1111+
* skb correctly.
11511112
*/
1152-
static inline
1153-
struct sk_buff *i40evf_fetch_rx_buffer(struct i40e_ring *rx_ring,
1154-
struct i40e_rx_buffer *rx_buffer,
1155-
struct sk_buff *skb,
1156-
unsigned int size)
1113+
static struct sk_buff *i40e_construct_skb(struct i40e_ring *rx_ring,
1114+
struct i40e_rx_buffer *rx_buffer,
1115+
unsigned int size)
11571116
{
1158-
if (likely(!skb)) {
1159-
void *page_addr = page_address(rx_buffer->page) +
1160-
rx_buffer->page_offset;
1117+
void *va = page_address(rx_buffer->page) + rx_buffer->page_offset;
1118+
#if (PAGE_SIZE < 8192)
1119+
unsigned int truesize = I40E_RXBUFFER_2048;
1120+
#else
1121+
unsigned int truesize = SKB_DATA_ALIGN(size);
1122+
#endif
1123+
unsigned int headlen;
1124+
struct sk_buff *skb;
11611125

1162-
/* prefetch first cache line of first page */
1163-
prefetch(page_addr);
1126+
/* prefetch first cache line of first page */
1127+
prefetch(va);
11641128
#if L1_CACHE_BYTES < 128
1165-
prefetch(page_addr + L1_CACHE_BYTES);
1129+
prefetch(va + L1_CACHE_BYTES);
11661130
#endif
11671131

1168-
/* allocate a skb to store the frags */
1169-
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1170-
I40E_RX_HDR_SIZE,
1171-
GFP_ATOMIC | __GFP_NOWARN);
1172-
if (unlikely(!skb)) {
1173-
rx_ring->rx_stats.alloc_buff_failed++;
1174-
rx_buffer->pagecnt_bias++;
1175-
return NULL;
1176-
}
1177-
}
1132+
/* allocate a skb to store the frags */
1133+
skb = __napi_alloc_skb(&rx_ring->q_vector->napi,
1134+
I40E_RX_HDR_SIZE,
1135+
GFP_ATOMIC | __GFP_NOWARN);
1136+
if (unlikely(!skb))
1137+
return NULL;
1138+
1139+
/* Determine available headroom for copy */
1140+
headlen = size;
1141+
if (headlen > I40E_RX_HDR_SIZE)
1142+
headlen = eth_get_headlen(va, I40E_RX_HDR_SIZE);
11781143

1179-
/* pull page into skb */
1180-
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
1144+
/* align pull length to size of long to optimize memcpy performance */
1145+
memcpy(__skb_put(skb, headlen), va, ALIGN(headlen, sizeof(long)));
1146+
1147+
/* update all of the pointers */
1148+
size -= headlen;
1149+
if (size) {
1150+
skb_add_rx_frag(skb, 0, rx_buffer->page,
1151+
rx_buffer->page_offset + headlen,
1152+
size, truesize);
1153+
1154+
/* buffer is used by skb, update page_offset */
1155+
#if (PAGE_SIZE < 8192)
1156+
rx_buffer->page_offset ^= truesize;
1157+
#else
1158+
rx_buffer->page_offset += truesize;
1159+
#endif
1160+
} else {
1161+
/* buffer is unused, reset bias back to rx_buffer */
1162+
rx_buffer->pagecnt_bias++;
1163+
}
11811164

11821165
return skb;
11831166
}
@@ -1297,9 +1280,18 @@ static int i40e_clean_rx_irq(struct i40e_ring *rx_ring, int budget)
12971280

12981281
rx_buffer = i40e_get_rx_buffer(rx_ring, size);
12991282

1300-
skb = i40evf_fetch_rx_buffer(rx_ring, rx_buffer, skb, size);
1301-
if (!skb)
1283+
/* retrieve a buffer from the ring */
1284+
if (skb)
1285+
i40e_add_rx_frag(rx_ring, rx_buffer, skb, size);
1286+
else
1287+
skb = i40e_construct_skb(rx_ring, rx_buffer, size);
1288+
1289+
/* exit if we failed to retrieve a buffer */
1290+
if (!skb) {
1291+
rx_ring->rx_stats.alloc_buff_failed++;
1292+
rx_buffer->pagecnt_bias++;
13021293
break;
1294+
}
13031295

13041296
i40e_put_rx_buffer(rx_ring, rx_buffer);
13051297
cleaned_count++;

0 commit comments

Comments
 (0)