3939
4040#define ALLOC_TIMEOUT K_MSEC(500)
4141
42+ static int net_tcp_queue_pkt (struct net_context * context , struct net_pkt * pkt );
43+
4244/*
4345 * Each TCP connection needs to be tracked by net_context, so
4446 * we need to allocate equal number of control structures here.
@@ -205,7 +207,7 @@ static void tcp_retry_expired(struct k_work *work)
205207 struct net_tcp * tcp = CONTAINER_OF (work , struct net_tcp , retry_timer );
206208 struct net_pkt * pkt ;
207209
208- /* Double the retry period for exponential backoff and resent
210+ /* Double the retry period for exponential backoff and resend
209211 * the first (only the first!) unack'd packet.
210212 */
211213 if (!sys_slist_is_empty (& tcp -> sent_list )) {
@@ -826,6 +828,14 @@ int net_tcp_queue_data(struct net_context *context, struct net_pkt *pkt)
826828
827829 net_stats_update_tcp_sent (net_pkt_iface (pkt ), data_len );
828830
831+ return net_tcp_queue_pkt (context , pkt );
832+ }
833+
834+ /* This function is the sole point of *adding* packets to tcp->sent_list,
835+ * and should remain such.
836+ */
837+ static int net_tcp_queue_pkt (struct net_context * context , struct net_pkt * pkt )
838+ {
829839 sys_slist_append (& context -> tcp -> sent_list , & pkt -> sent_list );
830840
831841 /* We need to restart retry_timer if it is stopped. */
@@ -1005,7 +1015,6 @@ bool net_tcp_ack_received(struct net_context *ctx, u32_t ack)
10051015 sys_slist_t * list = & ctx -> tcp -> sent_list ;
10061016 sys_snode_t * head ;
10071017 struct net_pkt * pkt ;
1008- u32_t seq ;
10091018 bool valid_ack = false;
10101019
10111020 if (net_tcp_seq_greater (ack , ctx -> tcp -> send_seq )) {
@@ -1020,6 +1029,8 @@ bool net_tcp_ack_received(struct net_context *ctx, u32_t ack)
10201029
10211030 while (!sys_slist_is_empty (list )) {
10221031 struct net_tcp_hdr hdr , * tcp_hdr ;
1032+ u32_t last_seq ;
1033+ u32_t seq_len ;
10231034
10241035 head = sys_slist_peek_head (list );
10251036 pkt = CONTAINER_OF (head , struct net_pkt , sent_list );
@@ -1035,9 +1046,26 @@ bool net_tcp_ack_received(struct net_context *ctx, u32_t ack)
10351046 continue ;
10361047 }
10371048
1038- seq = sys_get_be32 (tcp_hdr -> seq ) + net_pkt_appdatalen (pkt ) - 1 ;
1049+ seq_len = net_pkt_appdatalen (pkt );
1050+
1051+ /* Each of SYN and FIN flags are counted
1052+ * as one sequence number.
1053+ */
1054+ if (tcp_hdr -> flags & NET_TCP_SYN ) {
1055+ seq_len += 1 ;
1056+ }
1057+ if (tcp_hdr -> flags & NET_TCP_FIN ) {
1058+ seq_len += 1 ;
1059+ }
1060+
1061+ /* Last sequence number in this packet. */
1062+ last_seq = sys_get_be32 (tcp_hdr -> seq ) + seq_len - 1 ;
10391063
1040- if (!net_tcp_seq_greater (ack , seq )) {
1064+ /* Ack number should be strictly greater to acknowleged numbers
1065+ * below it. For example, ack no. 10 acknowledges all numbers up
1066+ * to and including 9.
1067+ */
1068+ if (!net_tcp_seq_greater (ack , last_seq )) {
10411069 break ;
10421070 }
10431071
@@ -1056,13 +1084,13 @@ bool net_tcp_ack_received(struct net_context *ctx, u32_t ack)
10561084 valid_ack = true;
10571085 }
10581086
1059- /* Restart the timer on a valid inbound ACK. This isn't quite the
1060- * same behavior as per-packet retry timers, but is close in practice
1061- * (it starts retries one timer period after the connection
1087+ /* Restart the timer (if needed) on a valid inbound ACK. This isn't
1088+ * quite the same behavior as per-packet retry timers, but is close in
1089+ * practice (it starts retries one timer period after the connection
10621090 * "got stuck") and avoids the need to track per-packet timers or
10631091 * sent times.
10641092 */
1065- if (valid_ack && net_tcp_get_state ( tcp ) == NET_TCP_ESTABLISHED ) {
1093+ if (valid_ack ) {
10661094 restart_timer (ctx -> tcp );
10671095 }
10681096
@@ -1452,6 +1480,8 @@ static void queue_fin(struct net_context *ctx)
14521480 return ;
14531481 }
14541482
1483+ net_tcp_queue_pkt (ctx , pkt );
1484+
14551485 ret = net_tcp_send_pkt (pkt );
14561486 if (ret < 0 ) {
14571487 net_pkt_unref (pkt );
0 commit comments