2 * NET Generic infrastructure for Network protocols.
4 * Authors: Arnaldo Carvalho de Melo <acme@conectiva.com.br>
6 * From code originally in include/net/tcp.h
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * as published by the Free Software Foundation; either version
11 * 2 of the License, or (at your option) any later version.
14 #include <linux/module.h>
15 #include <linux/random.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/tcp.h>
19 #include <linux/vmalloc.h>
21 #include <net/request_sock.h>
24 * Maximum number of SYN_RECV sockets in queue per LISTEN socket.
25 * One SYN_RECV socket costs about 80bytes on a 32bit machine.
26 * It would be better to replace it with a global counter for all sockets
27 * but then some measure against one socket starving all other sockets
30 * The minimum value of it is 128. Experiments with real servers show that
31 * it is absolutely not enough even at 100conn/sec. 256 cures most
33 * This value is adjusted to 128 for low memory machines,
34 * and it will increase in proportion to the memory of machine.
35 * Note : Dont forget somaxconn that may limit backlog too.
37 int sysctl_max_syn_backlog
= 256;
38 EXPORT_SYMBOL(sysctl_max_syn_backlog
);
40 int reqsk_queue_alloc(struct request_sock_queue
*queue
,
41 unsigned int nr_table_entries
)
43 size_t lopt_size
= sizeof(struct listen_sock
);
44 struct listen_sock
*lopt
;
46 nr_table_entries
= min_t(u32
, nr_table_entries
, sysctl_max_syn_backlog
);
47 nr_table_entries
= max_t(u32
, nr_table_entries
, 8);
48 nr_table_entries
= roundup_pow_of_two(nr_table_entries
+ 1);
49 lopt_size
+= nr_table_entries
* sizeof(struct request_sock
*);
50 if (lopt_size
> PAGE_SIZE
)
51 lopt
= vzalloc(lopt_size
);
53 lopt
= kzalloc(lopt_size
, GFP_KERNEL
);
57 for (lopt
->max_qlen_log
= 3;
58 (1 << lopt
->max_qlen_log
) < nr_table_entries
;
59 lopt
->max_qlen_log
++);
61 get_random_bytes(&lopt
->hash_rnd
, sizeof(lopt
->hash_rnd
));
62 rwlock_init(&queue
->syn_wait_lock
);
63 queue
->rskq_accept_head
= NULL
;
64 lopt
->nr_table_entries
= nr_table_entries
;
66 write_lock_bh(&queue
->syn_wait_lock
);
67 queue
->listen_opt
= lopt
;
68 write_unlock_bh(&queue
->syn_wait_lock
);
73 void __reqsk_queue_destroy(struct request_sock_queue
*queue
)
75 struct listen_sock
*lopt
;
79 * this is an error recovery path only
80 * no locking needed and the lopt is not NULL
83 lopt
= queue
->listen_opt
;
84 lopt_size
= sizeof(struct listen_sock
) +
85 lopt
->nr_table_entries
* sizeof(struct request_sock
*);
87 if (lopt_size
> PAGE_SIZE
)
93 static inline struct listen_sock
*reqsk_queue_yank_listen_sk(
94 struct request_sock_queue
*queue
)
96 struct listen_sock
*lopt
;
98 write_lock_bh(&queue
->syn_wait_lock
);
99 lopt
= queue
->listen_opt
;
100 queue
->listen_opt
= NULL
;
101 write_unlock_bh(&queue
->syn_wait_lock
);
106 void reqsk_queue_destroy(struct request_sock_queue
*queue
)
108 /* make all the listen_opt local to us */
109 struct listen_sock
*lopt
= reqsk_queue_yank_listen_sk(queue
);
110 size_t lopt_size
= sizeof(struct listen_sock
) +
111 lopt
->nr_table_entries
* sizeof(struct request_sock
*);
113 if (lopt
->qlen
!= 0) {
116 for (i
= 0; i
< lopt
->nr_table_entries
; i
++) {
117 struct request_sock
*req
;
119 while ((req
= lopt
->syn_table
[i
]) != NULL
) {
120 lopt
->syn_table
[i
] = req
->dl_next
;
127 WARN_ON(lopt
->qlen
!= 0);
128 if (lopt_size
> PAGE_SIZE
)
135 * This function is called to set a Fast Open socket's "fastopen_rsk" field
136 * to NULL when a TFO socket no longer needs to access the request_sock.
137 * This happens only after 3WHS has been either completed or aborted (e.g.,
140 * Before TFO, a child socket is created only after 3WHS is completed,
141 * hence it never needs to access the request_sock. things get a lot more
142 * complex with TFO. A child socket, accepted or not, has to access its
143 * request_sock for 3WHS processing, e.g., to retransmit SYN-ACK pkts,
144 * until 3WHS is either completed or aborted. Afterwards the req will stay
145 * until either the child socket is accepted, or in the rare case when the
146 * listener is closed before the child is accepted.
148 * In short, a request socket is only freed after BOTH 3WHS has completed
149 * (or aborted) and the child socket has been accepted (or listener closed).
150 * When a child socket is accepted, its corresponding req->sk is set to
151 * NULL since it's no longer needed. More importantly, "req->sk == NULL"
152 * will be used by the code below to determine if a child socket has been
153 * accepted or not, and the check is protected by the fastopenq->lock
156 * Note that fastopen_rsk is only accessed from the child socket's context
157 * with its socket lock held. But a request_sock (req) can be accessed by
158 * both its child socket through fastopen_rsk, and a listener socket through
159 * icsk_accept_queue.rskq_accept_head. To protect the access a simple spin
160 * lock per listener "icsk->icsk_accept_queue.fastopenq->lock" is created.
161 * only in the rare case when both the listener and the child locks are held,
162 * e.g., in inet_csk_listen_stop() do we not need to acquire the lock.
163 * The lock also protects other fields such as fastopenq->qlen, which is
164 * decremented by this function when fastopen_rsk is no longer needed.
166 * Note that another solution was to simply use the existing socket lock
167 * from the listener. But first socket lock is difficult to use. It is not
168 * a simple spin lock - one must consider sock_owned_by_user() and arrange
169 * to use sk_add_backlog() stuff. But what really makes it infeasible is the
170 * locking hierarchy violation. E.g., inet_csk_listen_stop() may try to
171 * acquire a child's lock while holding listener's socket lock. A corner
172 * case might also exist in tcp_v4_hnd_req() that will trigger this locking
175 * When a TFO req is created, it needs to sock_hold its listener to prevent
176 * the latter data structure from going away.
178 * This function also sets "treq->listener" to NULL and unreference listener
179 * socket. treq->listener is used by the listener so it is protected by the
180 * fastopenq->lock in this function.
182 void reqsk_fastopen_remove(struct sock
*sk
, struct request_sock
*req
,
185 struct sock
*lsk
= tcp_rsk(req
)->listener
;
186 struct fastopen_queue
*fastopenq
=
187 inet_csk(lsk
)->icsk_accept_queue
.fastopenq
;
189 BUG_ON(!spin_is_locked(&sk
->sk_lock
.slock
) && !sock_owned_by_user(sk
));
191 tcp_sk(sk
)->fastopen_rsk
= NULL
;
192 spin_lock_bh(&fastopenq
->lock
);
194 tcp_rsk(req
)->listener
= NULL
;
195 if (req
->sk
) /* the child socket hasn't been accepted yet */
198 if (!reset
|| lsk
->sk_state
!= TCP_LISTEN
) {
199 /* If the listener has been closed don't bother with the
200 * special RST handling below.
202 spin_unlock_bh(&fastopenq
->lock
);
207 /* Wait for 60secs before removing a req that has triggered RST.
208 * This is a simple defense against TFO spoofing attack - by
209 * counting the req against fastopen.max_qlen, and disabling
210 * TFO when the qlen exceeds max_qlen.
212 * For more details see CoNext'11 "TCP Fast Open" paper.
214 req
->expires
= jiffies
+ 60*HZ
;
215 if (fastopenq
->rskq_rst_head
== NULL
)
216 fastopenq
->rskq_rst_head
= req
;
218 fastopenq
->rskq_rst_tail
->dl_next
= req
;
221 fastopenq
->rskq_rst_tail
= req
;
224 spin_unlock_bh(&fastopenq
->lock
);