From aecff16840bc2d875b6087dfbc736b13e44300fb Mon Sep 17 00:00:00 2001 From: Paul Sokolovsky Date: Fri, 28 Apr 2017 22:05:29 +0300 Subject: [PATCH] WIP net: tcp: Explicitly manage TCP receive window. Challange the existing situation that "if application buffers data, it's the problem of application". It's actually the problem of the stack, as it doesn't allow application to control receive window, and without this control, any buffer will overflow, peer packets will be dropped, peer won't receive acks for them, and will employ exponential backoff, the connection will crawl to a halt. This patch adds net_context_tcp_recved() function which an application must explicitly call when it *processes* data, to advance receive window. Jira: ZEP-1999 Change-Id: Id7255df3d4898e289a2d20e7a02fd5f3f8f05291 Signed-off-by: Paul Sokolovsky --- subsys/net/ip/net_context.c | 16 +++++++++++++++- subsys/net/ip/tcp.c | 4 +++- subsys/net/ip/tcp.h | 2 ++ 3 files changed, 20 insertions(+), 2 deletions(-) diff --git a/subsys/net/ip/net_context.c b/subsys/net/ip/net_context.c index b0974308f9281..51afdc2c2eb8a 100644 --- a/subsys/net/ip/net_context.c +++ b/subsys/net/ip/net_context.c @@ -825,10 +825,19 @@ NET_CONN_CB(tcp_established) } set_appdata_values(pkt, IPPROTO_TCP); - context->tcp->send_ack += net_pkt_appdatalen(pkt); + + uint16_t data_len = net_pkt_appdatalen(pkt); + if (data_len > get_recv_wnd(context->tcp)) { + NET_ERR("Context %p: overflow of recv window (%d vs %d), pkt dropped", + context, get_recv_wnd(context->tcp), data_len); + return NET_DROP; + } + context->tcp->recv_wnd -= data_len; ret = packet_received(conn, pkt, context->tcp->recv_user_data); + context->tcp->send_ack += data_len; + if (tcp_flags & NET_TCP_FIN) { /* Sending an ACK in the CLOSE_WAIT state will transition to * LAST_ACK state @@ -2043,3 +2052,8 @@ void net_context_init(void) k_sem_give(&contexts_lock); } + +void net_context_tcp_recved(struct net_context *context, unsigned int len) +{ + context->tcp->recv_wnd += len; +} diff --git a/subsys/net/ip/tcp.c b/subsys/net/ip/tcp.c index 0447b81c07a68..94e1f0d2bb641 100644 --- a/subsys/net/ip/tcp.c +++ b/subsys/net/ip/tcp.c @@ -191,6 +191,7 @@ struct net_tcp *net_tcp_alloc(struct net_context *context) tcp_context[i].send_seq = init_isn(); tcp_context[i].recv_max_ack = tcp_context[i].send_seq + 1u; + tcp_context[i].recv_wnd = min(NET_TCP_MAX_WIN, NET_TCP_BUF_MAX_LEN); tcp_context[i].accept_cb = NULL; @@ -356,8 +357,9 @@ static struct net_pkt *prepare_segment(struct net_tcp *tcp, return pkt; } -static inline u32_t get_recv_wnd(struct net_tcp *tcp) +u32_t get_recv_wnd(struct net_tcp *tcp) { + return tcp->recv_wnd; ARG_UNUSED(tcp); /* We don't queue received data inside the stack, we hand off diff --git a/subsys/net/ip/tcp.h b/subsys/net/ip/tcp.h index 844c9f2031e36..32e3b700e4428 100644 --- a/subsys/net/ip/tcp.h +++ b/subsys/net/ip/tcp.h @@ -151,6 +151,8 @@ struct net_tcp { * Semaphore to signal TCP connection completion */ struct k_sem connect_wait; + + uint16_t recv_wnd; }; static inline bool net_tcp_is_used(struct net_tcp *tcp)