Skip to content

Commit 1fb6f15

Browse files
Octavian Purdiladavem330
authored andcommitted
tcp: add tcp_conn_request
Create tcp_conn_request and remove most of the code from tcp_v4_conn_request and tcp_v6_conn_request. Signed-off-by: Octavian Purdila <[email protected]> Signed-off-by: David S. Miller <[email protected]>
1 parent 695da14 commit 1fb6f15

File tree

4 files changed

+155
-244
lines changed

4 files changed

+155
-244
lines changed

include/net/tcp.h

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1574,6 +1574,9 @@ void tcp4_proc_exit(void);
15741574
#endif
15751575

15761576
int tcp_rtx_synack(struct sock *sk, struct request_sock *req);
1577+
int tcp_conn_request(struct request_sock_ops *rsk_ops,
1578+
const struct tcp_request_sock_ops *af_ops,
1579+
struct sock *sk, struct sk_buff *skb);
15771580

15781581
/* TCP af-specific functions */
15791582
struct tcp_sock_af_ops {

net/ipv4/tcp_input.c

Lines changed: 148 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -5877,3 +5877,151 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
58775877
return 0;
58785878
}
58795879
EXPORT_SYMBOL(tcp_rcv_state_process);
5880+
5881+
static inline void pr_drop_req(struct request_sock *req, __u16 port, int family)
5882+
{
5883+
struct inet_request_sock *ireq = inet_rsk(req);
5884+
5885+
if (family == AF_INET)
5886+
LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
5887+
&ireq->ir_rmt_addr, port);
5888+
else
5889+
LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI6/%u\n"),
5890+
&ireq->ir_v6_rmt_addr, port);
5891+
}
5892+
5893+
int tcp_conn_request(struct request_sock_ops *rsk_ops,
5894+
const struct tcp_request_sock_ops *af_ops,
5895+
struct sock *sk, struct sk_buff *skb)
5896+
{
5897+
struct tcp_options_received tmp_opt;
5898+
struct request_sock *req;
5899+
struct tcp_sock *tp = tcp_sk(sk);
5900+
struct dst_entry *dst = NULL;
5901+
__u32 isn = TCP_SKB_CB(skb)->when;
5902+
bool want_cookie = false, fastopen;
5903+
struct flowi fl;
5904+
struct tcp_fastopen_cookie foc = { .len = -1 };
5905+
int err;
5906+
5907+
5908+
/* TW buckets are converted to open requests without
5909+
* limitations, they conserve resources and peer is
5910+
* evidently real one.
5911+
*/
5912+
if ((sysctl_tcp_syncookies == 2 ||
5913+
inet_csk_reqsk_queue_is_full(sk)) && !isn) {
5914+
want_cookie = tcp_syn_flood_action(sk, skb, rsk_ops->slab_name);
5915+
if (!want_cookie)
5916+
goto drop;
5917+
}
5918+
5919+
5920+
/* Accept backlog is full. If we have already queued enough
5921+
* of warm entries in syn queue, drop request. It is better than
5922+
* clogging syn queue with openreqs with exponentially increasing
5923+
* timeout.
5924+
*/
5925+
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
5926+
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
5927+
goto drop;
5928+
}
5929+
5930+
req = inet_reqsk_alloc(rsk_ops);
5931+
if (!req)
5932+
goto drop;
5933+
5934+
tcp_rsk(req)->af_specific = af_ops;
5935+
5936+
tcp_clear_options(&tmp_opt);
5937+
tmp_opt.mss_clamp = af_ops->mss_clamp;
5938+
tmp_opt.user_mss = tp->rx_opt.user_mss;
5939+
tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
5940+
5941+
if (want_cookie && !tmp_opt.saw_tstamp)
5942+
tcp_clear_options(&tmp_opt);
5943+
5944+
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
5945+
tcp_openreq_init(req, &tmp_opt, skb, sk);
5946+
5947+
af_ops->init_req(req, sk, skb);
5948+
5949+
if (security_inet_conn_request(sk, skb, req))
5950+
goto drop_and_free;
5951+
5952+
if (!want_cookie || tmp_opt.tstamp_ok)
5953+
TCP_ECN_create_request(req, skb, sock_net(sk));
5954+
5955+
if (want_cookie) {
5956+
isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
5957+
req->cookie_ts = tmp_opt.tstamp_ok;
5958+
} else if (!isn) {
5959+
/* VJ's idea. We save last timestamp seen
5960+
* from the destination in peer table, when entering
5961+
* state TIME-WAIT, and check against it before
5962+
* accepting new connection request.
5963+
*
5964+
* If "isn" is not zero, this request hit alive
5965+
* timewait bucket, so that all the necessary checks
5966+
* are made in the function processing timewait state.
5967+
*/
5968+
if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
5969+
bool strict;
5970+
5971+
dst = af_ops->route_req(sk, &fl, req, &strict);
5972+
if (dst && strict &&
5973+
!tcp_peer_is_proven(req, dst, true)) {
5974+
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
5975+
goto drop_and_release;
5976+
}
5977+
}
5978+
/* Kill the following clause, if you dislike this way. */
5979+
else if (!sysctl_tcp_syncookies &&
5980+
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
5981+
(sysctl_max_syn_backlog >> 2)) &&
5982+
!tcp_peer_is_proven(req, dst, false)) {
5983+
/* Without syncookies last quarter of
5984+
* backlog is filled with destinations,
5985+
* proven to be alive.
5986+
* It means that we continue to communicate
5987+
* to destinations, already remembered
5988+
* to the moment of synflood.
5989+
*/
5990+
pr_drop_req(req, ntohs(tcp_hdr(skb)->source),
5991+
rsk_ops->family);
5992+
goto drop_and_release;
5993+
}
5994+
5995+
isn = af_ops->init_seq(skb);
5996+
}
5997+
if (!dst) {
5998+
dst = af_ops->route_req(sk, &fl, req, NULL);
5999+
if (!dst)
6000+
goto drop_and_free;
6001+
}
6002+
6003+
tcp_rsk(req)->snt_isn = isn;
6004+
tcp_openreq_init_rwin(req, sk, dst);
6005+
fastopen = !want_cookie &&
6006+
tcp_try_fastopen(sk, skb, req, &foc, dst);
6007+
err = af_ops->send_synack(sk, dst, &fl, req,
6008+
skb_get_queue_mapping(skb), &foc);
6009+
if (!fastopen) {
6010+
if (err || want_cookie)
6011+
goto drop_and_free;
6012+
6013+
tcp_rsk(req)->listener = NULL;
6014+
af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
6015+
}
6016+
6017+
return 0;
6018+
6019+
drop_and_release:
6020+
dst_release(dst);
6021+
drop_and_free:
6022+
reqsk_free(req);
6023+
drop:
6024+
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
6025+
return 0;
6026+
}
6027+
EXPORT_SYMBOL(tcp_conn_request);

net/ipv4/tcp_ipv4.c

Lines changed: 2 additions & 126 deletions
Original file line numberDiff line numberDiff line change
@@ -1282,137 +1282,13 @@ static const struct tcp_request_sock_ops tcp_request_sock_ipv4_ops = {
12821282

12831283
int tcp_v4_conn_request(struct sock *sk, struct sk_buff *skb)
12841284
{
1285-
struct tcp_options_received tmp_opt;
1286-
struct request_sock *req;
1287-
struct tcp_sock *tp = tcp_sk(sk);
1288-
struct dst_entry *dst = NULL;
1289-
__be32 saddr = ip_hdr(skb)->saddr;
1290-
__u32 isn = TCP_SKB_CB(skb)->when;
1291-
bool want_cookie = false, fastopen;
1292-
struct flowi4 fl4;
1293-
struct tcp_fastopen_cookie foc = { .len = -1 };
1294-
const struct tcp_request_sock_ops *af_ops;
1295-
int err;
1296-
12971285
/* Never answer to SYNs send to broadcast or multicast */
12981286
if (skb_rtable(skb)->rt_flags & (RTCF_BROADCAST | RTCF_MULTICAST))
12991287
goto drop;
13001288

1301-
/* TW buckets are converted to open requests without
1302-
* limitations, they conserve resources and peer is
1303-
* evidently real one.
1304-
*/
1305-
if ((sysctl_tcp_syncookies == 2 ||
1306-
inet_csk_reqsk_queue_is_full(sk)) && !isn) {
1307-
want_cookie = tcp_syn_flood_action(sk, skb, "TCP");
1308-
if (!want_cookie)
1309-
goto drop;
1310-
}
1311-
1312-
/* Accept backlog is full. If we have already queued enough
1313-
* of warm entries in syn queue, drop request. It is better than
1314-
* clogging syn queue with openreqs with exponentially increasing
1315-
* timeout.
1316-
*/
1317-
if (sk_acceptq_is_full(sk) && inet_csk_reqsk_queue_young(sk) > 1) {
1318-
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENOVERFLOWS);
1319-
goto drop;
1320-
}
1321-
1322-
req = inet_reqsk_alloc(&tcp_request_sock_ops);
1323-
if (!req)
1324-
goto drop;
1325-
1326-
af_ops = tcp_rsk(req)->af_specific = &tcp_request_sock_ipv4_ops;
1327-
1328-
tcp_clear_options(&tmp_opt);
1329-
tmp_opt.mss_clamp = af_ops->mss_clamp;
1330-
tmp_opt.user_mss = tp->rx_opt.user_mss;
1331-
tcp_parse_options(skb, &tmp_opt, 0, want_cookie ? NULL : &foc);
1332-
1333-
if (want_cookie && !tmp_opt.saw_tstamp)
1334-
tcp_clear_options(&tmp_opt);
1335-
1336-
tmp_opt.tstamp_ok = tmp_opt.saw_tstamp;
1337-
tcp_openreq_init(req, &tmp_opt, skb, sk);
1338-
1339-
af_ops->init_req(req, sk, skb);
1340-
1341-
if (security_inet_conn_request(sk, skb, req))
1342-
goto drop_and_free;
1289+
return tcp_conn_request(&tcp_request_sock_ops,
1290+
&tcp_request_sock_ipv4_ops, sk, skb);
13431291

1344-
if (!want_cookie || tmp_opt.tstamp_ok)
1345-
TCP_ECN_create_request(req, skb, sock_net(sk));
1346-
1347-
if (want_cookie) {
1348-
isn = cookie_init_sequence(af_ops, sk, skb, &req->mss);
1349-
req->cookie_ts = tmp_opt.tstamp_ok;
1350-
} else if (!isn) {
1351-
/* VJ's idea. We save last timestamp seen
1352-
* from the destination in peer table, when entering
1353-
* state TIME-WAIT, and check against it before
1354-
* accepting new connection request.
1355-
*
1356-
* If "isn" is not zero, this request hit alive
1357-
* timewait bucket, so that all the necessary checks
1358-
* are made in the function processing timewait state.
1359-
*/
1360-
if (tmp_opt.saw_tstamp && tcp_death_row.sysctl_tw_recycle) {
1361-
bool strict;
1362-
1363-
dst = af_ops->route_req(sk, (struct flowi *)&fl4, req,
1364-
&strict);
1365-
if (dst && strict &&
1366-
!tcp_peer_is_proven(req, dst, true)) {
1367-
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_PAWSPASSIVEREJECTED);
1368-
goto drop_and_release;
1369-
}
1370-
}
1371-
/* Kill the following clause, if you dislike this way. */
1372-
else if (!sysctl_tcp_syncookies &&
1373-
(sysctl_max_syn_backlog - inet_csk_reqsk_queue_len(sk) <
1374-
(sysctl_max_syn_backlog >> 2)) &&
1375-
!tcp_peer_is_proven(req, dst, false)) {
1376-
/* Without syncookies last quarter of
1377-
* backlog is filled with destinations,
1378-
* proven to be alive.
1379-
* It means that we continue to communicate
1380-
* to destinations, already remembered
1381-
* to the moment of synflood.
1382-
*/
1383-
LIMIT_NETDEBUG(KERN_DEBUG pr_fmt("drop open request from %pI4/%u\n"),
1384-
&saddr, ntohs(tcp_hdr(skb)->source));
1385-
goto drop_and_release;
1386-
}
1387-
1388-
isn = af_ops->init_seq(skb);
1389-
}
1390-
if (!dst) {
1391-
dst = af_ops->route_req(sk, (struct flowi *)&fl4, req, NULL);
1392-
if (!dst)
1393-
goto drop_and_free;
1394-
}
1395-
1396-
tcp_rsk(req)->snt_isn = isn;
1397-
tcp_openreq_init_rwin(req, sk, dst);
1398-
fastopen = !want_cookie &&
1399-
tcp_try_fastopen(sk, skb, req, &foc, dst);
1400-
err = af_ops->send_synack(sk, dst, NULL, req,
1401-
skb_get_queue_mapping(skb), &foc);
1402-
if (!fastopen) {
1403-
if (err || want_cookie)
1404-
goto drop_and_free;
1405-
1406-
tcp_rsk(req)->listener = NULL;
1407-
af_ops->queue_hash_add(sk, req, TCP_TIMEOUT_INIT);
1408-
}
1409-
1410-
return 0;
1411-
1412-
drop_and_release:
1413-
dst_release(dst);
1414-
drop_and_free:
1415-
reqsk_free(req);
14161292
drop:
14171293
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_LISTENDROPS);
14181294
return 0;

0 commit comments

Comments
 (0)