@@ -798,16 +798,17 @@ struct sctp_flush_ctx {
798798 struct sctp_transport * transport ;
799799 /* These transports have chunks to send. */
800800 struct list_head transport_list ;
801+ struct sctp_association * asoc ;
802+ /* Packet on the current transport above */
803+ struct sctp_packet * packet ;
801804 gfp_t gfp ;
802805};
803806
804807/* transport: current transport */
805- static bool sctp_outq_select_transport (struct sctp_flush_ctx * ctx ,
808+ static void sctp_outq_select_transport (struct sctp_flush_ctx * ctx ,
806809 struct sctp_chunk * chunk )
807810{
808811 struct sctp_transport * new_transport = chunk -> transport ;
809- struct sctp_association * asoc = ctx -> q -> asoc ;
810- bool changed = false;
811812
812813 if (!new_transport ) {
813814 if (!sctp_chunk_is_data (chunk )) {
@@ -825,15 +826,15 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
825826 & ctx -> transport -> ipaddr ))
826827 new_transport = ctx -> transport ;
827828 else
828- new_transport = sctp_assoc_lookup_paddr (asoc ,
829+ new_transport = sctp_assoc_lookup_paddr (ctx -> asoc ,
829830 & chunk -> dest );
830831 }
831832
832833 /* if we still don't have a new transport, then
833834 * use the current active path.
834835 */
835836 if (!new_transport )
836- new_transport = asoc -> peer .active_path ;
837+ new_transport = ctx -> asoc -> peer .active_path ;
837838 } else {
838839 __u8 type ;
839840
@@ -858,7 +859,7 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
858859 if (type != SCTP_CID_HEARTBEAT &&
859860 type != SCTP_CID_HEARTBEAT_ACK &&
860861 type != SCTP_CID_ASCONF_ACK )
861- new_transport = asoc -> peer .active_path ;
862+ new_transport = ctx -> asoc -> peer .active_path ;
862863 break ;
863864 default :
864865 break ;
@@ -867,27 +868,25 @@ static bool sctp_outq_select_transport(struct sctp_flush_ctx *ctx,
867868
868869 /* Are we switching transports? Take care of transport locks. */
869870 if (new_transport != ctx -> transport ) {
870- changed = true;
871871 ctx -> transport = new_transport ;
872+ ctx -> packet = & ctx -> transport -> packet ;
873+
872874 if (list_empty (& ctx -> transport -> send_ready ))
873875 list_add_tail (& ctx -> transport -> send_ready ,
874876 & ctx -> transport_list );
875877
876- sctp_packet_config (& ctx -> transport -> packet , asoc -> peer .i .init_tag ,
877- asoc -> peer .ecn_capable );
878+ sctp_packet_config (ctx -> packet ,
879+ ctx -> asoc -> peer .i .init_tag ,
880+ ctx -> asoc -> peer .ecn_capable );
878881 /* We've switched transports, so apply the
879882 * Burst limit to the new transport.
880883 */
881884 sctp_transport_burst_limited (ctx -> transport );
882885 }
883-
884- return changed ;
885886}
886887
887888static void sctp_outq_flush_ctrl (struct sctp_flush_ctx * ctx )
888889{
889- struct sctp_association * asoc = ctx -> q -> asoc ;
890- struct sctp_packet * packet = NULL ;
891890 struct sctp_chunk * chunk , * tmp ;
892891 enum sctp_xmit status ;
893892 int one_packet , error ;
@@ -901,7 +900,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
901900 * NOT use the new IP address as a source for ANY SCTP
902901 * packet except on carrying an ASCONF Chunk.
903902 */
904- if (asoc -> src_out_of_asoc_ok &&
903+ if (ctx -> asoc -> src_out_of_asoc_ok &&
905904 chunk -> chunk_hdr -> type != SCTP_CID_ASCONF )
906905 continue ;
907906
@@ -910,8 +909,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
910909 /* Pick the right transport to use. Should always be true for
911910 * the first chunk as we don't have a transport by then.
912911 */
913- if (sctp_outq_select_transport (ctx , chunk ))
914- packet = & ctx -> transport -> packet ;
912+ sctp_outq_select_transport (ctx , chunk );
915913
916914 switch (chunk -> chunk_hdr -> type ) {
917915 /*
@@ -926,14 +924,14 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
926924 error = sctp_packet_singleton (ctx -> transport , chunk ,
927925 ctx -> gfp );
928926 if (error < 0 ) {
929- asoc -> base .sk -> sk_err = - error ;
927+ ctx -> asoc -> base .sk -> sk_err = - error ;
930928 return ;
931929 }
932930 break ;
933931
934932 case SCTP_CID_ABORT :
935933 if (sctp_test_T_bit (chunk ))
936- packet -> vtag = asoc -> c .my_vtag ;
934+ ctx -> packet -> vtag = ctx -> asoc -> c .my_vtag ;
937935 /* fallthru */
938936
939937 /* The following chunks are "response" chunks, i.e.
@@ -959,15 +957,15 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
959957 case SCTP_CID_FWD_TSN :
960958 case SCTP_CID_I_FWD_TSN :
961959 case SCTP_CID_RECONF :
962- status = sctp_packet_transmit_chunk (packet , chunk ,
960+ status = sctp_packet_transmit_chunk (ctx -> packet , chunk ,
963961 one_packet , ctx -> gfp );
964962 if (status != SCTP_XMIT_OK ) {
965963 /* put the chunk back */
966964 list_add (& chunk -> list , & ctx -> q -> control_chunk_list );
967965 break ;
968966 }
969967
970- asoc -> stats .octrlchunks ++ ;
968+ ctx -> asoc -> stats .octrlchunks ++ ;
971969 /* PR-SCTP C5) If a FORWARD TSN is sent, the
972970 * sender MUST assure that at least one T3-rtx
973971 * timer is running.
@@ -978,7 +976,7 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
978976 ctx -> transport -> last_time_sent = jiffies ;
979977 }
980978
981- if (chunk == asoc -> strreset_chunk )
979+ if (chunk == ctx -> asoc -> strreset_chunk )
982980 sctp_transport_reset_reconf_timer (ctx -> transport );
983981
984982 break ;
@@ -994,31 +992,28 @@ static void sctp_outq_flush_ctrl(struct sctp_flush_ctx *ctx)
994992static bool sctp_outq_flush_rtx (struct sctp_flush_ctx * ctx ,
995993 int rtx_timeout )
996994{
997- struct sctp_packet * packet = ctx -> transport ? & ctx -> transport -> packet :
998- NULL ;
999- struct sctp_association * asoc = ctx -> q -> asoc ;
1000995 int error , start_timer = 0 ;
1001996
1002- if (asoc -> peer .retran_path -> state == SCTP_UNCONFIRMED )
997+ if (ctx -> asoc -> peer .retran_path -> state == SCTP_UNCONFIRMED )
1003998 return false;
1004999
1005- if (ctx -> transport != asoc -> peer .retran_path ) {
1000+ if (ctx -> transport != ctx -> asoc -> peer .retran_path ) {
10061001 /* Switch transports & prepare the packet. */
1007- ctx -> transport = asoc -> peer .retran_path ;
1002+ ctx -> transport = ctx -> asoc -> peer .retran_path ;
1003+ ctx -> packet = & ctx -> transport -> packet ;
10081004
10091005 if (list_empty (& ctx -> transport -> send_ready ))
10101006 list_add_tail (& ctx -> transport -> send_ready ,
10111007 & ctx -> transport_list );
10121008
1013- packet = & ctx -> transport -> packet ;
1014- sctp_packet_config (packet , asoc -> peer .i .init_tag ,
1015- asoc -> peer .ecn_capable );
1009+ sctp_packet_config (ctx -> packet , ctx -> asoc -> peer .i .init_tag ,
1010+ ctx -> asoc -> peer .ecn_capable );
10161011 }
10171012
1018- error = __sctp_outq_flush_rtx (ctx -> q , packet , rtx_timeout , & start_timer ,
1019- ctx -> gfp );
1013+ error = __sctp_outq_flush_rtx (ctx -> q , ctx -> packet , rtx_timeout ,
1014+ & start_timer , ctx -> gfp );
10201015 if (error < 0 )
1021- asoc -> base .sk -> sk_err = - error ;
1016+ ctx -> asoc -> base .sk -> sk_err = - error ;
10221017
10231018 if (start_timer ) {
10241019 sctp_transport_reset_t3_rtx (ctx -> transport );
@@ -1028,7 +1023,7 @@ static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
10281023 /* This can happen on COOKIE-ECHO resend. Only
10291024 * one chunk can get bundled with a COOKIE-ECHO.
10301025 */
1031- if (packet -> has_cookie_echo )
1026+ if (ctx -> packet -> has_cookie_echo )
10321027 return false;
10331028
10341029 /* Don't send new data if there is still data
@@ -1043,19 +1038,16 @@ static bool sctp_outq_flush_rtx(struct sctp_flush_ctx *ctx,
10431038static void sctp_outq_flush_data (struct sctp_flush_ctx * ctx ,
10441039 int rtx_timeout )
10451040{
1046- struct sctp_packet * packet = ctx -> transport ? & ctx -> transport -> packet :
1047- NULL ;
1048- struct sctp_association * asoc = ctx -> q -> asoc ;
10491041 struct sctp_chunk * chunk ;
10501042 enum sctp_xmit status ;
10511043
10521044 /* Is it OK to send data chunks? */
1053- switch (asoc -> state ) {
1045+ switch (ctx -> asoc -> state ) {
10541046 case SCTP_STATE_COOKIE_ECHOED :
10551047 /* Only allow bundling when this packet has a COOKIE-ECHO
10561048 * chunk.
10571049 */
1058- if (!packet || !packet -> has_cookie_echo )
1050+ if (!ctx -> packet || !ctx -> packet -> has_cookie_echo )
10591051 return ;
10601052
10611053 /* fallthru */
@@ -1078,12 +1070,9 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
10781070 * are marked for retransmission (limited by the
10791071 * current cwnd).
10801072 */
1081- if (!list_empty (& ctx -> q -> retransmit )) {
1082- if (!sctp_outq_flush_rtx (ctx , rtx_timeout ))
1083- return ;
1084- /* We may have switched current transport */
1085- packet = & ctx -> transport -> packet ;
1086- }
1073+ if (!list_empty (& ctx -> q -> retransmit ) &&
1074+ !sctp_outq_flush_rtx (ctx , rtx_timeout ))
1075+ return ;
10871076
10881077 /* Apply Max.Burst limitation to the current transport in
10891078 * case it will be used for new data. We are going to
@@ -1105,13 +1094,12 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
11051094 continue ;
11061095 }
11071096
1108- if (asoc -> stream .out [sid ].state == SCTP_STREAM_CLOSED ) {
1097+ if (ctx -> asoc -> stream .out [sid ].state == SCTP_STREAM_CLOSED ) {
11091098 sctp_outq_head_data (ctx -> q , chunk );
11101099 break ;
11111100 }
11121101
1113- if (sctp_outq_select_transport (ctx , chunk ))
1114- packet = & ctx -> transport -> packet ;
1102+ sctp_outq_select_transport (ctx , chunk );
11151103
11161104 pr_debug ("%s: outq:%p, chunk:%p[%s], tx-tsn:0x%x skb->head:%p "
11171105 "skb->users:%d\n" ,
@@ -1122,7 +1110,8 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
11221110 refcount_read (& chunk -> skb -> users ) : -1 );
11231111
11241112 /* Add the chunk to the packet. */
1125- status = sctp_packet_transmit_chunk (packet , chunk , 0 , ctx -> gfp );
1113+ status = sctp_packet_transmit_chunk (ctx -> packet , chunk , 0 ,
1114+ ctx -> gfp );
11261115 if (status != SCTP_XMIT_OK ) {
11271116 /* We could not append this chunk, so put
11281117 * the chunk back on the output queue.
@@ -1139,12 +1128,12 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
11391128 * The sender MAY set the I-bit in the DATA
11401129 * chunk header.
11411130 */
1142- if (asoc -> state == SCTP_STATE_SHUTDOWN_PENDING )
1131+ if (ctx -> asoc -> state == SCTP_STATE_SHUTDOWN_PENDING )
11431132 chunk -> chunk_hdr -> flags |= SCTP_DATA_SACK_IMM ;
11441133 if (chunk -> chunk_hdr -> flags & SCTP_DATA_UNORDERED )
1145- asoc -> stats .ouodchunks ++ ;
1134+ ctx -> asoc -> stats .ouodchunks ++ ;
11461135 else
1147- asoc -> stats .oodchunks ++ ;
1136+ ctx -> asoc -> stats .oodchunks ++ ;
11481137
11491138 /* Only now it's safe to consider this
11501139 * chunk as sent, sched-wise.
@@ -1160,7 +1149,7 @@ static void sctp_outq_flush_data(struct sctp_flush_ctx *ctx,
11601149 /* Only let one DATA chunk get bundled with a
11611150 * COOKIE-ECHO chunk.
11621151 */
1163- if (packet -> has_cookie_echo )
1152+ if (ctx -> packet -> has_cookie_echo )
11641153 break ;
11651154 }
11661155}
@@ -1202,6 +1191,8 @@ static void sctp_outq_flush(struct sctp_outq *q, int rtx_timeout, gfp_t gfp)
12021191 .q = q ,
12031192 .transport = NULL ,
12041193 .transport_list = LIST_HEAD_INIT (ctx .transport_list ),
1194+ .asoc = q -> asoc ,
1195+ .packet = NULL ,
12051196 .gfp = gfp ,
12061197 };
12071198
0 commit comments