Skip to content

Commit 73852e8

Browse files
smagnanidavem330
authored andcommitted
NET_DMA: free skbs periodically
Under NET_DMA, data transfer can grind to a halt when userland issues a large read on a socket with a high RCVLOWAT (i.e., 512 KB for both). This appears to be because the NET_DMA design queues up lots of memcpy operations, but doesn't issue or wait for them (and thus free the associated skbs) until it is time for tcp_recvmesg() to return. The socket hangs when its TCP window goes to zero before enough data is available to satisfy the read. Periodically issue asynchronous memcpy operations, and free skbs for ones that have completed, to prevent sockets from going into zero-window mode. Signed-off-by: Steven J. Magnani <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent f5d410f commit 73852e8

File tree

1 file changed

+43
-20
lines changed

1 file changed

+43
-20
lines changed

net/ipv4/tcp.c

Lines changed: 43 additions & 20 deletions
Original file line numberDiff line numberDiff line change
@@ -1254,6 +1254,39 @@ static void tcp_prequeue_process(struct sock *sk)
12541254
tp->ucopy.memory = 0;
12551255
}
12561256

1257+
#ifdef CONFIG_NET_DMA
1258+
static void tcp_service_net_dma(struct sock *sk, bool wait)
1259+
{
1260+
dma_cookie_t done, used;
1261+
dma_cookie_t last_issued;
1262+
struct tcp_sock *tp = tcp_sk(sk);
1263+
1264+
if (!tp->ucopy.dma_chan)
1265+
return;
1266+
1267+
last_issued = tp->ucopy.dma_cookie;
1268+
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1269+
1270+
do {
1271+
if (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1272+
last_issued, &done,
1273+
&used) == DMA_SUCCESS) {
1274+
/* Safe to free early-copied skbs now */
1275+
__skb_queue_purge(&sk->sk_async_wait_queue);
1276+
break;
1277+
} else {
1278+
struct sk_buff *skb;
1279+
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1280+
(dma_async_is_complete(skb->dma_cookie, done,
1281+
used) == DMA_SUCCESS)) {
1282+
__skb_dequeue(&sk->sk_async_wait_queue);
1283+
kfree_skb(skb);
1284+
}
1285+
}
1286+
} while (wait);
1287+
}
1288+
#endif
1289+
12571290
static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
12581291
{
12591292
struct sk_buff *skb;
@@ -1546,6 +1579,10 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
15461579
/* __ Set realtime policy in scheduler __ */
15471580
}
15481581

1582+
#ifdef CONFIG_NET_DMA
1583+
if (tp->ucopy.dma_chan)
1584+
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1585+
#endif
15491586
if (copied >= target) {
15501587
/* Do not sleep, just process backlog. */
15511588
release_sock(sk);
@@ -1554,6 +1591,7 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
15541591
sk_wait_data(sk, &timeo);
15551592

15561593
#ifdef CONFIG_NET_DMA
1594+
tcp_service_net_dma(sk, false); /* Don't block */
15571595
tp->ucopy.wakeup = 0;
15581596
#endif
15591597

@@ -1633,6 +1671,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
16331671
copied = -EFAULT;
16341672
break;
16351673
}
1674+
1675+
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1676+
16361677
if ((offset + used) == skb->len)
16371678
copied_early = 1;
16381679

@@ -1702,27 +1743,9 @@ int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
17021743
}
17031744

17041745
#ifdef CONFIG_NET_DMA
1705-
if (tp->ucopy.dma_chan) {
1706-
dma_cookie_t done, used;
1707-
1708-
dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1709-
1710-
while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1711-
tp->ucopy.dma_cookie, &done,
1712-
&used) == DMA_IN_PROGRESS) {
1713-
/* do partial cleanup of sk_async_wait_queue */
1714-
while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1715-
(dma_async_is_complete(skb->dma_cookie, done,
1716-
used) == DMA_SUCCESS)) {
1717-
__skb_dequeue(&sk->sk_async_wait_queue);
1718-
kfree_skb(skb);
1719-
}
1720-
}
1746+
tcp_service_net_dma(sk, true); /* Wait for queue to drain */
1747+
tp->ucopy.dma_chan = NULL;
17211748

1722-
/* Safe to free early-copied skbs now */
1723-
__skb_queue_purge(&sk->sk_async_wait_queue);
1724-
tp->ucopy.dma_chan = NULL;
1725-
}
17261749
if (tp->ucopy.pinned_list) {
17271750
dma_unpin_iovec_pages(tp->ucopy.pinned_list);
17281751
tp->ucopy.pinned_list = NULL;

0 commit comments

Comments
 (0)