2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Support for INET connection oriented protocols.
8 * Authors: See the TCP sources
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or(at your option) any later version.
16 #include <linux/module.h>
17 #include <linux/jhash.h>
19 #include <net/inet_connection_sock.h>
20 #include <net/inet_hashtables.h>
21 #include <net/inet_timewait_sock.h>
23 #include <net/route.h>
24 #include <net/tcp_states.h>
28 const char inet_csk_timer_bug_msg
[] = "inet_csk BUG: unknown timer value\n";
29 EXPORT_SYMBOL(inet_csk_timer_bug_msg
);
33 * This array holds the first and last local port number.
35 int sysctl_local_port_range
[2] = { 32768, 61000 };
37 int inet_csk_bind_conflict(const struct sock
*sk
,
38 const struct inet_bind_bucket
*tb
)
40 const __be32 sk_rcv_saddr
= inet_rcv_saddr(sk
);
42 struct hlist_node
*node
;
43 int reuse
= sk
->sk_reuse
;
45 sk_for_each_bound(sk2
, node
, &tb
->owners
) {
47 !inet_v6_ipv6only(sk2
) &&
48 (!sk
->sk_bound_dev_if
||
49 !sk2
->sk_bound_dev_if
||
50 sk
->sk_bound_dev_if
== sk2
->sk_bound_dev_if
)) {
51 if (!reuse
|| !sk2
->sk_reuse
||
52 sk2
->sk_state
== TCP_LISTEN
) {
53 const __be32 sk2_rcv_saddr
= inet_rcv_saddr(sk2
);
54 if (!sk2_rcv_saddr
|| !sk_rcv_saddr
||
55 sk2_rcv_saddr
== sk_rcv_saddr
)
63 EXPORT_SYMBOL_GPL(inet_csk_bind_conflict
);
65 /* Obtain a reference to a local port for the given sock,
66 * if snum is zero it means select any available local port.
68 int inet_csk_get_port(struct inet_hashinfo
*hashinfo
,
69 struct sock
*sk
, unsigned short snum
,
70 int (*bind_conflict
)(const struct sock
*sk
,
71 const struct inet_bind_bucket
*tb
))
73 struct inet_bind_hashbucket
*head
;
74 struct hlist_node
*node
;
75 struct inet_bind_bucket
*tb
;
80 int low
= sysctl_local_port_range
[0];
81 int high
= sysctl_local_port_range
[1];
82 int remaining
= (high
- low
) + 1;
83 int rover
= net_random() % (high
- low
) + low
;
86 head
= &hashinfo
->bhash
[inet_bhashfn(rover
, hashinfo
->bhash_size
)];
87 spin_lock(&head
->lock
);
88 inet_bind_bucket_for_each(tb
, node
, &head
->chain
)
89 if (tb
->port
== rover
)
93 spin_unlock(&head
->lock
);
96 } while (--remaining
> 0);
98 /* Exhausted local port range during search? It is not
99 * possible for us to be holding one of the bind hash
100 * locks if this test triggers, because if 'remaining'
101 * drops to zero, we broke out of the do/while loop at
102 * the top level, not from the 'break;' statement.
108 /* OK, here is the one we will use. HEAD is
109 * non-NULL and we hold it's mutex.
113 head
= &hashinfo
->bhash
[inet_bhashfn(snum
, hashinfo
->bhash_size
)];
114 spin_lock(&head
->lock
);
115 inet_bind_bucket_for_each(tb
, node
, &head
->chain
)
116 if (tb
->port
== snum
)
122 if (!hlist_empty(&tb
->owners
)) {
123 if (sk
->sk_reuse
> 1)
125 if (tb
->fastreuse
> 0 &&
126 sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
) {
130 if (bind_conflict(sk
, tb
))
136 if (!tb
&& (tb
= inet_bind_bucket_create(hashinfo
->bind_bucket_cachep
, head
, snum
)) == NULL
)
138 if (hlist_empty(&tb
->owners
)) {
139 if (sk
->sk_reuse
&& sk
->sk_state
!= TCP_LISTEN
)
143 } else if (tb
->fastreuse
&&
144 (!sk
->sk_reuse
|| sk
->sk_state
== TCP_LISTEN
))
147 if (!inet_csk(sk
)->icsk_bind_hash
)
148 inet_bind_hash(sk
, tb
, snum
);
149 BUG_TRAP(inet_csk(sk
)->icsk_bind_hash
== tb
);
153 spin_unlock(&head
->lock
);
159 EXPORT_SYMBOL_GPL(inet_csk_get_port
);
162 * Wait for an incoming connection, avoid race conditions. This must be called
163 * with the socket locked.
165 static int inet_csk_wait_for_connect(struct sock
*sk
, long timeo
)
167 struct inet_connection_sock
*icsk
= inet_csk(sk
);
172 * True wake-one mechanism for incoming connections: only
173 * one process gets woken up, not the 'whole herd'.
174 * Since we do not 'race & poll' for established sockets
175 * anymore, the common case will execute the loop only once.
177 * Subtle issue: "add_wait_queue_exclusive()" will be added
178 * after any current non-exclusive waiters, and we know that
179 * it will always _stay_ after any new non-exclusive waiters
180 * because all non-exclusive waiters are added at the
181 * beginning of the wait-queue. As such, it's ok to "drop"
182 * our exclusiveness temporarily when we get woken up without
183 * having to remove and re-insert us on the wait queue.
186 prepare_to_wait_exclusive(sk
->sk_sleep
, &wait
,
189 if (reqsk_queue_empty(&icsk
->icsk_accept_queue
))
190 timeo
= schedule_timeout(timeo
);
193 if (!reqsk_queue_empty(&icsk
->icsk_accept_queue
))
196 if (sk
->sk_state
!= TCP_LISTEN
)
198 err
= sock_intr_errno(timeo
);
199 if (signal_pending(current
))
205 finish_wait(sk
->sk_sleep
, &wait
);
210 * This will accept the next outstanding connection.
212 struct sock
*inet_csk_accept(struct sock
*sk
, int flags
, int *err
)
214 struct inet_connection_sock
*icsk
= inet_csk(sk
);
220 /* We need to make sure that this socket is listening,
221 * and that it has something pending.
224 if (sk
->sk_state
!= TCP_LISTEN
)
227 /* Find already established connection */
228 if (reqsk_queue_empty(&icsk
->icsk_accept_queue
)) {
229 long timeo
= sock_rcvtimeo(sk
, flags
& O_NONBLOCK
);
231 /* If this is a non blocking socket don't sleep */
236 error
= inet_csk_wait_for_connect(sk
, timeo
);
241 newsk
= reqsk_queue_get_child(&icsk
->icsk_accept_queue
, sk
);
242 BUG_TRAP(newsk
->sk_state
!= TCP_SYN_RECV
);
252 EXPORT_SYMBOL(inet_csk_accept
);
255 * Using different timers for retransmit, delayed acks and probes
256 * We may wish use just one timer maintaining a list of expire jiffies
259 void inet_csk_init_xmit_timers(struct sock
*sk
,
260 void (*retransmit_handler
)(unsigned long),
261 void (*delack_handler
)(unsigned long),
262 void (*keepalive_handler
)(unsigned long))
264 struct inet_connection_sock
*icsk
= inet_csk(sk
);
266 init_timer(&icsk
->icsk_retransmit_timer
);
267 init_timer(&icsk
->icsk_delack_timer
);
268 init_timer(&sk
->sk_timer
);
270 icsk
->icsk_retransmit_timer
.function
= retransmit_handler
;
271 icsk
->icsk_delack_timer
.function
= delack_handler
;
272 sk
->sk_timer
.function
= keepalive_handler
;
274 icsk
->icsk_retransmit_timer
.data
=
275 icsk
->icsk_delack_timer
.data
=
276 sk
->sk_timer
.data
= (unsigned long)sk
;
278 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= 0;
281 EXPORT_SYMBOL(inet_csk_init_xmit_timers
);
283 void inet_csk_clear_xmit_timers(struct sock
*sk
)
285 struct inet_connection_sock
*icsk
= inet_csk(sk
);
287 icsk
->icsk_pending
= icsk
->icsk_ack
.pending
= icsk
->icsk_ack
.blocked
= 0;
289 sk_stop_timer(sk
, &icsk
->icsk_retransmit_timer
);
290 sk_stop_timer(sk
, &icsk
->icsk_delack_timer
);
291 sk_stop_timer(sk
, &sk
->sk_timer
);
294 EXPORT_SYMBOL(inet_csk_clear_xmit_timers
);
296 void inet_csk_delete_keepalive_timer(struct sock
*sk
)
298 sk_stop_timer(sk
, &sk
->sk_timer
);
301 EXPORT_SYMBOL(inet_csk_delete_keepalive_timer
);
303 void inet_csk_reset_keepalive_timer(struct sock
*sk
, unsigned long len
)
305 sk_reset_timer(sk
, &sk
->sk_timer
, jiffies
+ len
);
308 EXPORT_SYMBOL(inet_csk_reset_keepalive_timer
);
310 struct dst_entry
* inet_csk_route_req(struct sock
*sk
,
311 const struct request_sock
*req
)
314 const struct inet_request_sock
*ireq
= inet_rsk(req
);
315 struct ip_options
*opt
= inet_rsk(req
)->opt
;
316 struct flowi fl
= { .oif
= sk
->sk_bound_dev_if
,
318 { .daddr
= ((opt
&& opt
->srr
) ?
321 .saddr
= ireq
->loc_addr
,
322 .tos
= RT_CONN_FLAGS(sk
) } },
323 .proto
= sk
->sk_protocol
,
325 { .sport
= inet_sk(sk
)->sport
,
326 .dport
= ireq
->rmt_port
} } };
328 security_req_classify_flow(req
, &fl
);
329 if (ip_route_output_flow(&rt
, &fl
, sk
, 0)) {
330 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES
);
333 if (opt
&& opt
->is_strictroute
&& rt
->rt_dst
!= rt
->rt_gateway
) {
335 IP_INC_STATS_BH(IPSTATS_MIB_OUTNOROUTES
);
341 EXPORT_SYMBOL_GPL(inet_csk_route_req
);
343 static inline u32
inet_synq_hash(const __be32 raddr
, const __be16 rport
,
344 const u32 rnd
, const u32 synq_hsize
)
346 return jhash_2words((__force u32
)raddr
, (__force u32
)rport
, rnd
) & (synq_hsize
- 1);
349 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
350 #define AF_INET_FAMILY(fam) ((fam) == AF_INET)
352 #define AF_INET_FAMILY(fam) 1
355 struct request_sock
*inet_csk_search_req(const struct sock
*sk
,
356 struct request_sock
***prevp
,
357 const __be16 rport
, const __be32 raddr
,
360 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
361 struct listen_sock
*lopt
= icsk
->icsk_accept_queue
.listen_opt
;
362 struct request_sock
*req
, **prev
;
364 for (prev
= &lopt
->syn_table
[inet_synq_hash(raddr
, rport
, lopt
->hash_rnd
,
365 lopt
->nr_table_entries
)];
366 (req
= *prev
) != NULL
;
367 prev
= &req
->dl_next
) {
368 const struct inet_request_sock
*ireq
= inet_rsk(req
);
370 if (ireq
->rmt_port
== rport
&&
371 ireq
->rmt_addr
== raddr
&&
372 ireq
->loc_addr
== laddr
&&
373 AF_INET_FAMILY(req
->rsk_ops
->family
)) {
383 EXPORT_SYMBOL_GPL(inet_csk_search_req
);
385 void inet_csk_reqsk_queue_hash_add(struct sock
*sk
, struct request_sock
*req
,
386 unsigned long timeout
)
388 struct inet_connection_sock
*icsk
= inet_csk(sk
);
389 struct listen_sock
*lopt
= icsk
->icsk_accept_queue
.listen_opt
;
390 const u32 h
= inet_synq_hash(inet_rsk(req
)->rmt_addr
, inet_rsk(req
)->rmt_port
,
391 lopt
->hash_rnd
, lopt
->nr_table_entries
);
393 reqsk_queue_hash_req(&icsk
->icsk_accept_queue
, h
, req
, timeout
);
394 inet_csk_reqsk_queue_added(sk
, timeout
);
397 /* Only thing we need from tcp.h */
398 extern int sysctl_tcp_synack_retries
;
400 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_hash_add
);
402 void inet_csk_reqsk_queue_prune(struct sock
*parent
,
403 const unsigned long interval
,
404 const unsigned long timeout
,
405 const unsigned long max_rto
)
407 struct inet_connection_sock
*icsk
= inet_csk(parent
);
408 struct request_sock_queue
*queue
= &icsk
->icsk_accept_queue
;
409 struct listen_sock
*lopt
= queue
->listen_opt
;
410 int max_retries
= icsk
->icsk_syn_retries
? : sysctl_tcp_synack_retries
;
411 int thresh
= max_retries
;
412 unsigned long now
= jiffies
;
413 struct request_sock
**reqp
, *req
;
416 if (lopt
== NULL
|| lopt
->qlen
== 0)
419 /* Normally all the openreqs are young and become mature
420 * (i.e. converted to established socket) for first timeout.
421 * If synack was not acknowledged for 3 seconds, it means
422 * one of the following things: synack was lost, ack was lost,
423 * rtt is high or nobody planned to ack (i.e. synflood).
424 * When server is a bit loaded, queue is populated with old
425 * open requests, reducing effective size of queue.
426 * When server is well loaded, queue size reduces to zero
427 * after several minutes of work. It is not synflood,
428 * it is normal operation. The solution is pruning
429 * too old entries overriding normal timeout, when
430 * situation becomes dangerous.
432 * Essentially, we reserve half of room for young
433 * embrions; and abort old ones without pity, if old
434 * ones are about to clog our table.
436 if (lopt
->qlen
>>(lopt
->max_qlen_log
-1)) {
437 int young
= (lopt
->qlen_young
<<1);
440 if (lopt
->qlen
< young
)
447 if (queue
->rskq_defer_accept
)
448 max_retries
= queue
->rskq_defer_accept
;
450 budget
= 2 * (lopt
->nr_table_entries
/ (timeout
/ interval
));
451 i
= lopt
->clock_hand
;
454 reqp
=&lopt
->syn_table
[i
];
455 while ((req
= *reqp
) != NULL
) {
456 if (time_after_eq(now
, req
->expires
)) {
457 if ((req
->retrans
< thresh
||
458 (inet_rsk(req
)->acked
&& req
->retrans
< max_retries
))
459 && !req
->rsk_ops
->rtx_syn_ack(parent
, req
, NULL
)) {
462 if (req
->retrans
++ == 0)
464 timeo
= min((timeout
<< req
->retrans
), max_rto
);
465 req
->expires
= now
+ timeo
;
466 reqp
= &req
->dl_next
;
470 /* Drop this request */
471 inet_csk_reqsk_queue_unlink(parent
, req
, reqp
);
472 reqsk_queue_removed(queue
, req
);
476 reqp
= &req
->dl_next
;
479 i
= (i
+ 1) & (lopt
->nr_table_entries
- 1);
481 } while (--budget
> 0);
483 lopt
->clock_hand
= i
;
486 inet_csk_reset_keepalive_timer(parent
, interval
);
489 EXPORT_SYMBOL_GPL(inet_csk_reqsk_queue_prune
);
491 struct sock
*inet_csk_clone(struct sock
*sk
, const struct request_sock
*req
,
492 const gfp_t priority
)
494 struct sock
*newsk
= sk_clone(sk
, priority
);
497 struct inet_connection_sock
*newicsk
= inet_csk(newsk
);
499 newsk
->sk_state
= TCP_SYN_RECV
;
500 newicsk
->icsk_bind_hash
= NULL
;
502 inet_sk(newsk
)->dport
= inet_rsk(req
)->rmt_port
;
503 newsk
->sk_write_space
= sk_stream_write_space
;
505 newicsk
->icsk_retransmits
= 0;
506 newicsk
->icsk_backoff
= 0;
507 newicsk
->icsk_probes_out
= 0;
509 /* Deinitialize accept_queue to trap illegal accesses. */
510 memset(&newicsk
->icsk_accept_queue
, 0, sizeof(newicsk
->icsk_accept_queue
));
512 security_inet_csk_clone(newsk
, req
);
517 EXPORT_SYMBOL_GPL(inet_csk_clone
);
520 * At this point, there should be no process reference to this
521 * socket, and thus no user references at all. Therefore we
522 * can assume the socket waitqueue is inactive and nobody will
523 * try to jump onto it.
525 void inet_csk_destroy_sock(struct sock
*sk
)
527 BUG_TRAP(sk
->sk_state
== TCP_CLOSE
);
528 BUG_TRAP(sock_flag(sk
, SOCK_DEAD
));
530 /* It cannot be in hash table! */
531 BUG_TRAP(sk_unhashed(sk
));
533 /* If it has not 0 inet_sk(sk)->num, it must be bound */
534 BUG_TRAP(!inet_sk(sk
)->num
|| inet_csk(sk
)->icsk_bind_hash
);
536 sk
->sk_prot
->destroy(sk
);
538 sk_stream_kill_queues(sk
);
540 xfrm_sk_free_policy(sk
);
542 sk_refcnt_debug_release(sk
);
544 atomic_dec(sk
->sk_prot
->orphan_count
);
548 EXPORT_SYMBOL(inet_csk_destroy_sock
);
550 int inet_csk_listen_start(struct sock
*sk
, const int nr_table_entries
)
552 struct inet_sock
*inet
= inet_sk(sk
);
553 struct inet_connection_sock
*icsk
= inet_csk(sk
);
554 int rc
= reqsk_queue_alloc(&icsk
->icsk_accept_queue
, nr_table_entries
);
559 sk
->sk_max_ack_backlog
= 0;
560 sk
->sk_ack_backlog
= 0;
561 inet_csk_delack_init(sk
);
563 /* There is race window here: we announce ourselves listening,
564 * but this transition is still not validated by get_port().
565 * It is OK, because this socket enters to hash table only
566 * after validation is complete.
568 sk
->sk_state
= TCP_LISTEN
;
569 if (!sk
->sk_prot
->get_port(sk
, inet
->num
)) {
570 inet
->sport
= htons(inet
->num
);
573 sk
->sk_prot
->hash(sk
);
578 sk
->sk_state
= TCP_CLOSE
;
579 __reqsk_queue_destroy(&icsk
->icsk_accept_queue
);
583 EXPORT_SYMBOL_GPL(inet_csk_listen_start
);
586 * This routine closes sockets which have been at least partially
587 * opened, but not yet accepted.
589 void inet_csk_listen_stop(struct sock
*sk
)
591 struct inet_connection_sock
*icsk
= inet_csk(sk
);
592 struct request_sock
*acc_req
;
593 struct request_sock
*req
;
595 inet_csk_delete_keepalive_timer(sk
);
597 /* make all the listen_opt local to us */
598 acc_req
= reqsk_queue_yank_acceptq(&icsk
->icsk_accept_queue
);
600 /* Following specs, it would be better either to send FIN
601 * (and enter FIN-WAIT-1, it is normal close)
602 * or to send active reset (abort).
603 * Certainly, it is pretty dangerous while synflood, but it is
604 * bad justification for our negligence 8)
605 * To be honest, we are not able to make either
606 * of the variants now. --ANK
608 reqsk_queue_destroy(&icsk
->icsk_accept_queue
);
610 while ((req
= acc_req
) != NULL
) {
611 struct sock
*child
= req
->sk
;
613 acc_req
= req
->dl_next
;
617 BUG_TRAP(!sock_owned_by_user(child
));
620 sk
->sk_prot
->disconnect(child
, O_NONBLOCK
);
624 atomic_inc(sk
->sk_prot
->orphan_count
);
626 inet_csk_destroy_sock(child
);
628 bh_unlock_sock(child
);
632 sk_acceptq_removed(sk
);
635 BUG_TRAP(!sk
->sk_ack_backlog
);
638 EXPORT_SYMBOL_GPL(inet_csk_listen_stop
);
640 void inet_csk_addr2sockaddr(struct sock
*sk
, struct sockaddr
*uaddr
)
642 struct sockaddr_in
*sin
= (struct sockaddr_in
*)uaddr
;
643 const struct inet_sock
*inet
= inet_sk(sk
);
645 sin
->sin_family
= AF_INET
;
646 sin
->sin_addr
.s_addr
= inet
->daddr
;
647 sin
->sin_port
= inet
->dport
;
650 EXPORT_SYMBOL_GPL(inet_csk_addr2sockaddr
);
652 int inet_csk_ctl_sock_create(struct socket
**sock
, unsigned short family
,
653 unsigned short type
, unsigned char protocol
)
655 int rc
= sock_create_kern(family
, type
, protocol
, sock
);
658 (*sock
)->sk
->sk_allocation
= GFP_ATOMIC
;
659 inet_sk((*sock
)->sk
)->uc_ttl
= -1;
661 * Unhash it so that IP input processing does not even see it,
662 * we do not wish this socket to see incoming packets.
664 (*sock
)->sk
->sk_prot
->unhash((*sock
)->sk
);
669 EXPORT_SYMBOL_GPL(inet_csk_ctl_sock_create
);
672 int inet_csk_compat_getsockopt(struct sock
*sk
, int level
, int optname
,
673 char __user
*optval
, int __user
*optlen
)
675 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
677 if (icsk
->icsk_af_ops
->compat_getsockopt
!= NULL
)
678 return icsk
->icsk_af_ops
->compat_getsockopt(sk
, level
, optname
,
680 return icsk
->icsk_af_ops
->getsockopt(sk
, level
, optname
,
684 EXPORT_SYMBOL_GPL(inet_csk_compat_getsockopt
);
686 int inet_csk_compat_setsockopt(struct sock
*sk
, int level
, int optname
,
687 char __user
*optval
, int optlen
)
689 const struct inet_connection_sock
*icsk
= inet_csk(sk
);
691 if (icsk
->icsk_af_ops
->compat_setsockopt
!= NULL
)
692 return icsk
->icsk_af_ops
->compat_setsockopt(sk
, level
, optname
,
694 return icsk
->icsk_af_ops
->setsockopt(sk
, level
, optname
,
698 EXPORT_SYMBOL_GPL(inet_csk_compat_setsockopt
);