MOXA linux-2.6.x / linux-2.6.9-uc0 from sdlinux-moxaart.tgz
[linux-2.6.9-moxart.git] / net / ipv4 / tcp.c
blob462cbda0277f017155b23722cea23d3ced1c6c06
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/config.h>
251 #include <linux/module.h>
252 #include <linux/types.h>
253 #include <linux/fcntl.h>
254 #include <linux/poll.h>
255 #include <linux/init.h>
256 #include <linux/smp_lock.h>
257 #include <linux/fs.h>
258 #include <linux/random.h>
260 #include <net/icmp.h>
261 #include <net/tcp.h>
262 #include <net/xfrm.h>
263 #include <net/ip.h>
266 #include <asm/uaccess.h>
267 #include <asm/ioctls.h>
269 int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
271 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics);
273 kmem_cache_t *tcp_openreq_cachep;
274 kmem_cache_t *tcp_bucket_cachep;
275 kmem_cache_t *tcp_timewait_cachep;
277 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
279 int sysctl_tcp_mem[3];
280 int sysctl_tcp_wmem[3] = { 4 * 1024, 16 * 1024, 128 * 1024 };
281 int sysctl_tcp_rmem[3] = { 4 * 1024, 87380, 87380 * 2 };
283 EXPORT_SYMBOL(sysctl_tcp_mem);
284 EXPORT_SYMBOL(sysctl_tcp_rmem);
285 EXPORT_SYMBOL(sysctl_tcp_wmem);
287 atomic_t tcp_memory_allocated; /* Current allocated memory. */
288 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
290 EXPORT_SYMBOL(tcp_memory_allocated);
291 EXPORT_SYMBOL(tcp_sockets_allocated);
294 * Pressure flag: try to collapse.
295 * Technical note: it is used by multiple contexts non atomically.
296 * All the sk_stream_mem_schedule() is of this nature: accounting
297 * is strict, actions are advisory and have some latency.
299 int tcp_memory_pressure;
301 EXPORT_SYMBOL(tcp_memory_pressure);
303 void tcp_enter_memory_pressure(void)
305 if (!tcp_memory_pressure) {
306 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
307 tcp_memory_pressure = 1;
311 EXPORT_SYMBOL(tcp_enter_memory_pressure);
314 * LISTEN is a special case for poll..
316 static __inline__ unsigned int tcp_listen_poll(struct sock *sk,
317 poll_table *wait)
319 return tcp_sk(sk)->accept_queue ? (POLLIN | POLLRDNORM) : 0;
323 * Wait for a TCP event.
325 * Note that we don't need to lock the socket, as the upper poll layers
326 * take care of normal races (between the test and the event) and we don't
327 * go look at any of the socket buffers directly.
329 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
331 unsigned int mask;
332 struct sock *sk = sock->sk;
333 struct tcp_opt *tp = tcp_sk(sk);
335 poll_wait(file, sk->sk_sleep, wait);
336 if (sk->sk_state == TCP_LISTEN)
337 return tcp_listen_poll(sk, wait);
339 /* Socket is not locked. We are protected from async events
340 by poll logic and correct handling of state changes
341 made by another threads is impossible in any case.
344 mask = 0;
345 if (sk->sk_err)
346 mask = POLLERR;
349 * POLLHUP is certainly not done right. But poll() doesn't
350 * have a notion of HUP in just one direction, and for a
351 * socket the read side is more interesting.
353 * Some poll() documentation says that POLLHUP is incompatible
354 * with the POLLOUT/POLLWR flags, so somebody should check this
355 * all. But careful, it tends to be safer to return too many
356 * bits than too few, and you can easily break real applications
357 * if you don't tell them that something has hung up!
359 * Check-me.
361 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
362 * our fs/select.c). It means that after we received EOF,
363 * poll always returns immediately, making impossible poll() on write()
364 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
365 * if and only if shutdown has been made in both directions.
366 * Actually, it is interesting to look how Solaris and DUX
367 * solve this dilemma. I would prefer, if PULLHUP were maskable,
368 * then we could set it on SND_SHUTDOWN. BTW examples given
369 * in Stevens' books assume exactly this behaviour, it explains
370 * why PULLHUP is incompatible with POLLOUT. --ANK
372 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
373 * blocking on fresh not-connected or disconnected socket. --ANK
375 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
376 mask |= POLLHUP;
377 if (sk->sk_shutdown & RCV_SHUTDOWN)
378 mask |= POLLIN | POLLRDNORM;
380 /* Connected? */
381 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
382 /* Potential race condition. If read of tp below will
383 * escape above sk->sk_state, we can be illegally awaken
384 * in SYN_* states. */
385 if ((tp->rcv_nxt != tp->copied_seq) &&
386 (tp->urg_seq != tp->copied_seq ||
387 tp->rcv_nxt != tp->copied_seq + 1 ||
388 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
389 mask |= POLLIN | POLLRDNORM;
391 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
392 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
393 mask |= POLLOUT | POLLWRNORM;
394 } else { /* send SIGIO later */
395 set_bit(SOCK_ASYNC_NOSPACE,
396 &sk->sk_socket->flags);
397 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
399 /* Race breaker. If space is freed after
400 * wspace test but before the flags are set,
401 * IO signal will be lost.
403 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
404 mask |= POLLOUT | POLLWRNORM;
408 if (tp->urg_data & TCP_URG_VALID)
409 mask |= POLLPRI;
411 return mask;
414 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
416 struct tcp_opt *tp = tcp_sk(sk);
417 int answ;
419 switch (cmd) {
420 case SIOCINQ:
421 if (sk->sk_state == TCP_LISTEN)
422 return -EINVAL;
424 lock_sock(sk);
425 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
426 answ = 0;
427 else if (sock_flag(sk, SOCK_URGINLINE) ||
428 !tp->urg_data ||
429 before(tp->urg_seq, tp->copied_seq) ||
430 !before(tp->urg_seq, tp->rcv_nxt)) {
431 answ = tp->rcv_nxt - tp->copied_seq;
433 /* Subtract 1, if FIN is in queue. */
434 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
435 answ -=
436 ((struct sk_buff *)sk->sk_receive_queue.prev)->h.th->fin;
437 } else
438 answ = tp->urg_seq - tp->copied_seq;
439 release_sock(sk);
440 break;
441 case SIOCATMARK:
442 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
443 break;
444 case SIOCOUTQ:
445 if (sk->sk_state == TCP_LISTEN)
446 return -EINVAL;
448 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
449 answ = 0;
450 else
451 answ = tp->write_seq - tp->snd_una;
452 break;
453 default:
454 return -ENOIOCTLCMD;
457 return put_user(answ, (int __user *)arg);
461 int tcp_listen_start(struct sock *sk)
463 struct inet_opt *inet = inet_sk(sk);
464 struct tcp_opt *tp = tcp_sk(sk);
465 struct tcp_listen_opt *lopt;
467 sk->sk_max_ack_backlog = 0;
468 sk->sk_ack_backlog = 0;
469 tp->accept_queue = tp->accept_queue_tail = NULL;
470 tp->syn_wait_lock = RW_LOCK_UNLOCKED;
471 tcp_delack_init(tp);
473 lopt = kmalloc(sizeof(struct tcp_listen_opt), GFP_KERNEL);
474 if (!lopt)
475 return -ENOMEM;
477 memset(lopt, 0, sizeof(struct tcp_listen_opt));
478 for (lopt->max_qlen_log = 6; ; lopt->max_qlen_log++)
479 if ((1 << lopt->max_qlen_log) >= sysctl_max_syn_backlog)
480 break;
481 get_random_bytes(&lopt->hash_rnd, 4);
483 write_lock_bh(&tp->syn_wait_lock);
484 tp->listen_opt = lopt;
485 write_unlock_bh(&tp->syn_wait_lock);
487 /* There is race window here: we announce ourselves listening,
488 * but this transition is still not validated by get_port().
489 * It is OK, because this socket enters to hash table only
490 * after validation is complete.
492 sk->sk_state = TCP_LISTEN;
493 if (!sk->sk_prot->get_port(sk, inet->num)) {
494 inet->sport = htons(inet->num);
496 sk_dst_reset(sk);
497 sk->sk_prot->hash(sk);
499 return 0;
502 sk->sk_state = TCP_CLOSE;
503 write_lock_bh(&tp->syn_wait_lock);
504 tp->listen_opt = NULL;
505 write_unlock_bh(&tp->syn_wait_lock);
506 kfree(lopt);
507 return -EADDRINUSE;
511 * This routine closes sockets which have been at least partially
512 * opened, but not yet accepted.
515 static void tcp_listen_stop (struct sock *sk)
517 struct tcp_opt *tp = tcp_sk(sk);
518 struct tcp_listen_opt *lopt = tp->listen_opt;
519 struct open_request *acc_req = tp->accept_queue;
520 struct open_request *req;
521 int i;
523 tcp_delete_keepalive_timer(sk);
525 /* make all the listen_opt local to us */
526 write_lock_bh(&tp->syn_wait_lock);
527 tp->listen_opt = NULL;
528 write_unlock_bh(&tp->syn_wait_lock);
529 tp->accept_queue = tp->accept_queue_tail = NULL;
531 if (lopt->qlen) {
532 for (i = 0; i < TCP_SYNQ_HSIZE; i++) {
533 while ((req = lopt->syn_table[i]) != NULL) {
534 lopt->syn_table[i] = req->dl_next;
535 lopt->qlen--;
536 tcp_openreq_free(req);
538 /* Following specs, it would be better either to send FIN
539 * (and enter FIN-WAIT-1, it is normal close)
540 * or to send active reset (abort).
541 * Certainly, it is pretty dangerous while synflood, but it is
542 * bad justification for our negligence 8)
543 * To be honest, we are not able to make either
544 * of the variants now. --ANK
549 BUG_TRAP(!lopt->qlen);
551 kfree(lopt);
553 while ((req = acc_req) != NULL) {
554 struct sock *child = req->sk;
556 acc_req = req->dl_next;
558 local_bh_disable();
559 bh_lock_sock(child);
560 BUG_TRAP(!sock_owned_by_user(child));
561 sock_hold(child);
563 tcp_disconnect(child, O_NONBLOCK);
565 sock_orphan(child);
567 atomic_inc(&tcp_orphan_count);
569 tcp_destroy_sock(child);
571 bh_unlock_sock(child);
572 local_bh_enable();
573 sock_put(child);
575 sk_acceptq_removed(sk);
576 tcp_openreq_fastfree(req);
578 BUG_TRAP(!sk->sk_ack_backlog);
581 static inline void tcp_mark_push(struct tcp_opt *tp, struct sk_buff *skb)
583 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
584 tp->pushed_seq = tp->write_seq;
587 static inline int forced_push(struct tcp_opt *tp)
589 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
592 static inline void skb_entail(struct sock *sk, struct tcp_opt *tp,
593 struct sk_buff *skb)
595 skb->csum = 0;
596 TCP_SKB_CB(skb)->seq = tp->write_seq;
597 TCP_SKB_CB(skb)->end_seq = tp->write_seq;
598 TCP_SKB_CB(skb)->flags = TCPCB_FLAG_ACK;
599 TCP_SKB_CB(skb)->sacked = 0;
600 __skb_queue_tail(&sk->sk_write_queue, skb);
601 sk_charge_skb(sk, skb);
602 if (!sk->sk_send_head)
603 sk->sk_send_head = skb;
604 else if (tp->nonagle&TCP_NAGLE_PUSH)
605 tp->nonagle &= ~TCP_NAGLE_PUSH;
608 static inline void tcp_mark_urg(struct tcp_opt *tp, int flags,
609 struct sk_buff *skb)
611 if (flags & MSG_OOB) {
612 tp->urg_mode = 1;
613 tp->snd_up = tp->write_seq;
614 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
618 static inline void tcp_push(struct sock *sk, struct tcp_opt *tp, int flags,
619 int mss_now, int nonagle)
621 if (sk->sk_send_head) {
622 struct sk_buff *skb = sk->sk_write_queue.prev;
623 if (!(flags & MSG_MORE) || forced_push(tp))
624 tcp_mark_push(tp, skb);
625 tcp_mark_urg(tp, flags, skb);
626 __tcp_push_pending_frames(sk, tp, mss_now,
627 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
631 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
632 size_t psize, int flags)
634 struct tcp_opt *tp = tcp_sk(sk);
635 int mss_now;
636 int err;
637 ssize_t copied;
638 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
640 /* Wait for a connection to finish. */
641 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
642 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
643 goto out_err;
645 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
647 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
648 copied = 0;
650 err = -EPIPE;
651 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
652 goto do_error;
654 while (psize > 0) {
655 struct sk_buff *skb = sk->sk_write_queue.prev;
656 struct page *page = pages[poffset / PAGE_SIZE];
657 int copy, i;
658 int offset = poffset % PAGE_SIZE;
659 int size = min_t(size_t, psize, PAGE_SIZE - offset);
661 if (!sk->sk_send_head || (copy = mss_now - skb->len) <= 0) {
662 new_segment:
663 if (!sk_stream_memory_free(sk))
664 goto wait_for_sndbuf;
666 skb = sk_stream_alloc_pskb(sk, 0, tp->mss_cache,
667 sk->sk_allocation);
668 if (!skb)
669 goto wait_for_memory;
671 skb_entail(sk, tp, skb);
672 copy = mss_now;
675 if (copy > size)
676 copy = size;
678 i = skb_shinfo(skb)->nr_frags;
679 if (skb_can_coalesce(skb, i, page, offset)) {
680 skb_shinfo(skb)->frags[i - 1].size += copy;
681 } else if (i < MAX_SKB_FRAGS) {
682 get_page(page);
683 skb_fill_page_desc(skb, i, page, offset, copy);
684 } else {
685 tcp_mark_push(tp, skb);
686 goto new_segment;
689 skb->len += copy;
690 skb->data_len += copy;
691 skb->ip_summed = CHECKSUM_HW;
692 tp->write_seq += copy;
693 TCP_SKB_CB(skb)->end_seq += copy;
694 skb_shinfo(skb)->tso_segs = 0;
696 if (!copied)
697 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
699 copied += copy;
700 poffset += copy;
701 if (!(psize -= copy))
702 goto out;
704 if (skb->len != mss_now || (flags & MSG_OOB))
705 continue;
707 if (forced_push(tp)) {
708 tcp_mark_push(tp, skb);
709 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
710 } else if (skb == sk->sk_send_head)
711 tcp_push_one(sk, mss_now);
712 continue;
714 wait_for_sndbuf:
715 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
716 wait_for_memory:
717 if (copied)
718 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
720 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
721 goto do_error;
723 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
726 out:
727 if (copied)
728 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
729 return copied;
731 do_error:
732 if (copied)
733 goto out;
734 out_err:
735 return sk_stream_error(sk, flags, err);
738 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
739 size_t size, int flags)
741 ssize_t res;
742 struct sock *sk = sock->sk;
744 #define TCP_ZC_CSUM_FLAGS (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM | NETIF_F_HW_CSUM)
746 if (!(sk->sk_route_caps & NETIF_F_SG) ||
747 !(sk->sk_route_caps & TCP_ZC_CSUM_FLAGS))
748 return sock_no_sendpage(sock, page, offset, size, flags);
750 #undef TCP_ZC_CSUM_FLAGS
752 lock_sock(sk);
753 TCP_CHECK_TIMER(sk);
754 res = do_tcp_sendpages(sk, &page, offset, size, flags);
755 TCP_CHECK_TIMER(sk);
756 release_sock(sk);
757 return res;
760 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
761 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
763 static inline int select_size(struct sock *sk, struct tcp_opt *tp)
765 int tmp = tp->mss_cache_std;
767 if (sk->sk_route_caps & NETIF_F_SG) {
768 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
770 if (tmp >= pgbreak &&
771 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
772 tmp = pgbreak;
774 return tmp;
777 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
778 size_t size)
780 struct iovec *iov;
781 struct tcp_opt *tp = tcp_sk(sk);
782 struct sk_buff *skb;
783 int iovlen, flags;
784 int mss_now;
785 int err, copied;
786 long timeo;
788 lock_sock(sk);
789 TCP_CHECK_TIMER(sk);
791 flags = msg->msg_flags;
792 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
794 /* Wait for a connection to finish. */
795 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
796 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
797 goto out_err;
799 /* This should be in poll */
800 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
802 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
804 /* Ok commence sending. */
805 iovlen = msg->msg_iovlen;
806 iov = msg->msg_iov;
807 copied = 0;
809 err = -EPIPE;
810 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
811 goto do_error;
813 while (--iovlen >= 0) {
814 int seglen = iov->iov_len;
815 unsigned char __user *from = iov->iov_base;
817 iov++;
819 while (seglen > 0) {
820 int copy;
822 skb = sk->sk_write_queue.prev;
824 if (!sk->sk_send_head ||
825 (copy = mss_now - skb->len) <= 0) {
827 new_segment:
828 /* Allocate new segment. If the interface is SG,
829 * allocate skb fitting to single page.
831 if (!sk_stream_memory_free(sk))
832 goto wait_for_sndbuf;
834 skb = sk_stream_alloc_pskb(sk, select_size(sk, tp),
835 0, sk->sk_allocation);
836 if (!skb)
837 goto wait_for_memory;
840 * Check whether we can use HW checksum.
842 if (sk->sk_route_caps &
843 (NETIF_F_IP_CSUM | NETIF_F_NO_CSUM |
844 NETIF_F_HW_CSUM))
845 skb->ip_summed = CHECKSUM_HW;
847 skb_entail(sk, tp, skb);
848 copy = mss_now;
851 /* Try to append data to the end of skb. */
852 if (copy > seglen)
853 copy = seglen;
855 /* Where to copy to? */
856 if (skb_tailroom(skb) > 0) {
857 /* We have some space in skb head. Superb! */
858 if (copy > skb_tailroom(skb))
859 copy = skb_tailroom(skb);
860 if ((err = skb_add_data(skb, from, copy)) != 0)
861 goto do_fault;
862 } else {
863 int merge = 0;
864 int i = skb_shinfo(skb)->nr_frags;
865 struct page *page = TCP_PAGE(sk);
866 int off = TCP_OFF(sk);
868 if (skb_can_coalesce(skb, i, page, off) &&
869 off != PAGE_SIZE) {
870 /* We can extend the last page
871 * fragment. */
872 merge = 1;
873 } else if (i == MAX_SKB_FRAGS ||
874 (!i &&
875 !(sk->sk_route_caps & NETIF_F_SG))) {
876 /* Need to add new fragment and cannot
877 * do this because interface is non-SG,
878 * or because all the page slots are
879 * busy. */
880 tcp_mark_push(tp, skb);
881 goto new_segment;
882 } else if (page) {
883 /* If page is cached, align
884 * offset to L1 cache boundary
886 off = (off + L1_CACHE_BYTES - 1) &
887 ~(L1_CACHE_BYTES - 1);
888 if (off == PAGE_SIZE) {
889 put_page(page);
890 TCP_PAGE(sk) = page = NULL;
894 if (!page) {
895 /* Allocate new cache page. */
896 if (!(page = sk_stream_alloc_page(sk)))
897 goto wait_for_memory;
898 off = 0;
901 if (copy > PAGE_SIZE - off)
902 copy = PAGE_SIZE - off;
904 /* Time to copy data. We are close to
905 * the end! */
906 err = skb_copy_to_page(sk, from, skb, page,
907 off, copy);
908 if (err) {
909 /* If this page was new, give it to the
910 * socket so it does not get leaked.
912 if (!TCP_PAGE(sk)) {
913 TCP_PAGE(sk) = page;
914 TCP_OFF(sk) = 0;
916 goto do_error;
919 /* Update the skb. */
920 if (merge) {
921 skb_shinfo(skb)->frags[i - 1].size +=
922 copy;
923 } else {
924 skb_fill_page_desc(skb, i, page, off, copy);
925 if (TCP_PAGE(sk)) {
926 get_page(page);
927 } else if (off + copy < PAGE_SIZE) {
928 get_page(page);
929 TCP_PAGE(sk) = page;
933 TCP_OFF(sk) = off + copy;
936 if (!copied)
937 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
939 tp->write_seq += copy;
940 TCP_SKB_CB(skb)->end_seq += copy;
941 skb_shinfo(skb)->tso_segs = 0;
943 from += copy;
944 copied += copy;
945 if ((seglen -= copy) == 0 && iovlen == 0)
946 goto out;
948 if (skb->len != mss_now || (flags & MSG_OOB))
949 continue;
951 if (forced_push(tp)) {
952 tcp_mark_push(tp, skb);
953 __tcp_push_pending_frames(sk, tp, mss_now, TCP_NAGLE_PUSH);
954 } else if (skb == sk->sk_send_head)
955 tcp_push_one(sk, mss_now);
956 continue;
958 wait_for_sndbuf:
959 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
960 wait_for_memory:
961 if (copied)
962 tcp_push(sk, tp, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
964 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
965 goto do_error;
967 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
971 out:
972 if (copied)
973 tcp_push(sk, tp, flags, mss_now, tp->nonagle);
974 TCP_CHECK_TIMER(sk);
975 release_sock(sk);
976 return copied;
978 do_fault:
979 if (!skb->len) {
980 if (sk->sk_send_head == skb)
981 sk->sk_send_head = NULL;
982 __skb_unlink(skb, skb->list);
983 sk_stream_free_skb(sk, skb);
986 do_error:
987 if (copied)
988 goto out;
989 out_err:
990 err = sk_stream_error(sk, flags, err);
991 TCP_CHECK_TIMER(sk);
992 release_sock(sk);
993 return err;
997 * Handle reading urgent data. BSD has very simple semantics for
998 * this, no blocking and very strange errors 8)
1001 static int tcp_recv_urg(struct sock *sk, long timeo,
1002 struct msghdr *msg, int len, int flags,
1003 int *addr_len)
1005 struct tcp_opt *tp = tcp_sk(sk);
1007 /* No URG data to read. */
1008 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
1009 tp->urg_data == TCP_URG_READ)
1010 return -EINVAL; /* Yes this is right ! */
1012 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
1013 return -ENOTCONN;
1015 if (tp->urg_data & TCP_URG_VALID) {
1016 int err = 0;
1017 char c = tp->urg_data;
1019 if (!(flags & MSG_PEEK))
1020 tp->urg_data = TCP_URG_READ;
1022 /* Read urgent data. */
1023 msg->msg_flags |= MSG_OOB;
1025 if (len > 0) {
1026 if (!(flags & MSG_TRUNC))
1027 err = memcpy_toiovec(msg->msg_iov, &c, 1);
1028 len = 1;
1029 } else
1030 msg->msg_flags |= MSG_TRUNC;
1032 return err ? -EFAULT : len;
1035 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
1036 return 0;
1038 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
1039 * the available implementations agree in this case:
1040 * this call should never block, independent of the
1041 * blocking state of the socket.
1042 * Mike <pall@rz.uni-karlsruhe.de>
1044 return -EAGAIN;
1047 /* Clean up the receive buffer for full frames taken by the user,
1048 * then send an ACK if necessary. COPIED is the number of bytes
1049 * tcp_recvmsg has given to the user so far, it speeds up the
1050 * calculation of whether or not we must ACK for the sake of
1051 * a window update.
1053 static void cleanup_rbuf(struct sock *sk, int copied)
1055 struct tcp_opt *tp = tcp_sk(sk);
1056 int time_to_ack = 0;
1058 #if TCP_DEBUG
1059 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
1061 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
1062 #endif
1064 if (tcp_ack_scheduled(tp)) {
1065 /* Delayed ACKs frequently hit locked sockets during bulk
1066 * receive. */
1067 if (tp->ack.blocked ||
1068 /* Once-per-two-segments ACK was not sent by tcp_input.c */
1069 tp->rcv_nxt - tp->rcv_wup > tp->ack.rcv_mss ||
1071 * If this read emptied read buffer, we send ACK, if
1072 * connection is not bidirectional, user drained
1073 * receive buffer and there was a small segment
1074 * in queue.
1076 (copied > 0 && (tp->ack.pending & TCP_ACK_PUSHED) &&
1077 !tp->ack.pingpong && !atomic_read(&sk->sk_rmem_alloc)))
1078 time_to_ack = 1;
1081 /* We send an ACK if we can now advertise a non-zero window
1082 * which has been raised "significantly".
1084 * Even if window raised up to infinity, do not send window open ACK
1085 * in states, where we will not receive more. It is useless.
1087 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
1088 __u32 rcv_window_now = tcp_receive_window(tp);
1090 /* Optimize, __tcp_select_window() is not cheap. */
1091 if (2*rcv_window_now <= tp->window_clamp) {
1092 __u32 new_window = __tcp_select_window(sk);
1094 /* Send ACK now, if this read freed lots of space
1095 * in our buffer. Certainly, new_window is new window.
1096 * We can advertise it now, if it is not less than current one.
1097 * "Lots" means "at least twice" here.
1099 if (new_window && new_window >= 2 * rcv_window_now)
1100 time_to_ack = 1;
1103 if (time_to_ack)
1104 tcp_send_ack(sk);
1107 static void tcp_prequeue_process(struct sock *sk)
1109 struct sk_buff *skb;
1110 struct tcp_opt *tp = tcp_sk(sk);
1112 NET_ADD_STATS_USER(LINUX_MIB_TCPPREQUEUED, skb_queue_len(&tp->ucopy.prequeue));
1114 /* RX process wants to run with disabled BHs, though it is not
1115 * necessary */
1116 local_bh_disable();
1117 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1118 sk->sk_backlog_rcv(sk, skb);
1119 local_bh_enable();
1121 /* Clear memory counter. */
1122 tp->ucopy.memory = 0;
1125 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1127 struct sk_buff *skb;
1128 u32 offset;
1130 skb_queue_walk(&sk->sk_receive_queue, skb) {
1131 offset = seq - TCP_SKB_CB(skb)->seq;
1132 if (skb->h.th->syn)
1133 offset--;
1134 if (offset < skb->len || skb->h.th->fin) {
1135 *off = offset;
1136 return skb;
1139 return NULL;
1143 * This routine provides an alternative to tcp_recvmsg() for routines
1144 * that would like to handle copying from skbuffs directly in 'sendfile'
1145 * fashion.
1146 * Note:
1147 * - It is assumed that the socket was locked by the caller.
1148 * - The routine does not block.
1149 * - At present, there is no support for reading OOB data
1150 * or for 'peeking' the socket using this routine
1151 * (although both would be easy to implement).
1153 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1154 sk_read_actor_t recv_actor)
1156 struct sk_buff *skb;
1157 struct tcp_opt *tp = tcp_sk(sk);
1158 u32 seq = tp->copied_seq;
1159 u32 offset;
1160 int copied = 0;
1162 if (sk->sk_state == TCP_LISTEN)
1163 return -ENOTCONN;
1164 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1165 if (offset < skb->len) {
1166 size_t used, len;
1168 len = skb->len - offset;
1169 /* Stop reading if we hit a patch of urgent data */
1170 if (tp->urg_data) {
1171 u32 urg_offset = tp->urg_seq - seq;
1172 if (urg_offset < len)
1173 len = urg_offset;
1174 if (!len)
1175 break;
1177 used = recv_actor(desc, skb, offset, len);
1178 if (used <= len) {
1179 seq += used;
1180 copied += used;
1181 offset += used;
1183 if (offset != skb->len)
1184 break;
1186 if (skb->h.th->fin) {
1187 sk_eat_skb(sk, skb);
1188 ++seq;
1189 break;
1191 sk_eat_skb(sk, skb);
1192 if (!desc->count)
1193 break;
1195 tp->copied_seq = seq;
1197 tcp_rcv_space_adjust(sk);
1199 /* Clean up data we have read: This will do ACK frames. */
1200 if (copied)
1201 cleanup_rbuf(sk, copied);
1202 return copied;
1206 * This routine copies from a sock struct into the user buffer.
1208 * Technical note: in 2.3 we work on _locked_ socket, so that
1209 * tricks with *seq access order and skb->users are not required.
1210 * Probably, code can be easily improved even more.
1213 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1214 size_t len, int nonblock, int flags, int *addr_len)
1216 struct tcp_opt *tp = tcp_sk(sk);
1217 int copied = 0;
1218 u32 peek_seq;
1219 u32 *seq;
1220 unsigned long used;
1221 int err;
1222 int target; /* Read at least this many bytes */
1223 long timeo;
1224 struct task_struct *user_recv = NULL;
1226 lock_sock(sk);
1228 TCP_CHECK_TIMER(sk);
1230 err = -ENOTCONN;
1231 if (sk->sk_state == TCP_LISTEN)
1232 goto out;
1234 timeo = sock_rcvtimeo(sk, nonblock);
1236 /* Urgent data needs to be handled specially. */
1237 if (flags & MSG_OOB)
1238 goto recv_urg;
1240 seq = &tp->copied_seq;
1241 if (flags & MSG_PEEK) {
1242 peek_seq = tp->copied_seq;
1243 seq = &peek_seq;
1246 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1248 do {
1249 struct sk_buff *skb;
1250 u32 offset;
1252 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1253 if (tp->urg_data && tp->urg_seq == *seq) {
1254 if (copied)
1255 break;
1256 if (signal_pending(current)) {
1257 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1258 break;
1262 /* Next get a buffer. */
1264 skb = skb_peek(&sk->sk_receive_queue);
1265 do {
1266 if (!skb)
1267 break;
1269 /* Now that we have two receive queues this
1270 * shouldn't happen.
1272 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1273 printk(KERN_INFO "recvmsg bug: copied %X "
1274 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1275 break;
1277 offset = *seq - TCP_SKB_CB(skb)->seq;
1278 if (skb->h.th->syn)
1279 offset--;
1280 if (offset < skb->len)
1281 goto found_ok_skb;
1282 if (skb->h.th->fin)
1283 goto found_fin_ok;
1284 BUG_TRAP(flags & MSG_PEEK);
1285 skb = skb->next;
1286 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1288 /* Well, if we have backlog, try to process it now yet. */
1290 if (copied >= target && !sk->sk_backlog.tail)
1291 break;
1293 if (copied) {
1294 if (sk->sk_err ||
1295 sk->sk_state == TCP_CLOSE ||
1296 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1297 !timeo ||
1298 signal_pending(current) ||
1299 (flags & MSG_PEEK))
1300 break;
1301 } else {
1302 if (sock_flag(sk, SOCK_DONE))
1303 break;
1305 if (sk->sk_err) {
1306 copied = sock_error(sk);
1307 break;
1310 if (sk->sk_shutdown & RCV_SHUTDOWN)
1311 break;
1313 if (sk->sk_state == TCP_CLOSE) {
1314 if (!sock_flag(sk, SOCK_DONE)) {
1315 /* This occurs when user tries to read
1316 * from never connected socket.
1318 copied = -ENOTCONN;
1319 break;
1321 break;
1324 if (!timeo) {
1325 copied = -EAGAIN;
1326 break;
1329 if (signal_pending(current)) {
1330 copied = sock_intr_errno(timeo);
1331 break;
1335 cleanup_rbuf(sk, copied);
1337 if (tp->ucopy.task == user_recv) {
1338 /* Install new reader */
1339 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1340 user_recv = current;
1341 tp->ucopy.task = user_recv;
1342 tp->ucopy.iov = msg->msg_iov;
1345 tp->ucopy.len = len;
1347 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1348 (flags & (MSG_PEEK | MSG_TRUNC)));
1350 /* Ugly... If prequeue is not empty, we have to
1351 * process it before releasing socket, otherwise
1352 * order will be broken at second iteration.
1353 * More elegant solution is required!!!
1355 * Look: we have the following (pseudo)queues:
1357 * 1. packets in flight
1358 * 2. backlog
1359 * 3. prequeue
1360 * 4. receive_queue
1362 * Each queue can be processed only if the next ones
1363 * are empty. At this point we have empty receive_queue.
1364 * But prequeue _can_ be not empty after 2nd iteration,
1365 * when we jumped to start of loop because backlog
1366 * processing added something to receive_queue.
1367 * We cannot release_sock(), because backlog contains
1368 * packets arrived _after_ prequeued ones.
1370 * Shortly, algorithm is clear --- to process all
1371 * the queues in order. We could make it more directly,
1372 * requeueing packets from backlog to prequeue, if
1373 * is not empty. It is more elegant, but eats cycles,
1374 * unfortunately.
1376 if (skb_queue_len(&tp->ucopy.prequeue))
1377 goto do_prequeue;
1379 /* __ Set realtime policy in scheduler __ */
1382 if (copied >= target) {
1383 /* Do not sleep, just process backlog. */
1384 release_sock(sk);
1385 lock_sock(sk);
1386 } else
1387 sk_wait_data(sk, &timeo);
1389 if (user_recv) {
1390 int chunk;
1392 /* __ Restore normal policy in scheduler __ */
1394 if ((chunk = len - tp->ucopy.len) != 0) {
1395 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1396 len -= chunk;
1397 copied += chunk;
1400 if (tp->rcv_nxt == tp->copied_seq &&
1401 skb_queue_len(&tp->ucopy.prequeue)) {
1402 do_prequeue:
1403 tcp_prequeue_process(sk);
1405 if ((chunk = len - tp->ucopy.len) != 0) {
1406 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1407 len -= chunk;
1408 copied += chunk;
1412 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1413 if (net_ratelimit())
1414 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1415 current->comm, current->pid);
1416 peek_seq = tp->copied_seq;
1418 continue;
1420 found_ok_skb:
1421 /* Ok so how much can we use? */
1422 used = skb->len - offset;
1423 if (len < used)
1424 used = len;
1426 /* Do we have urgent data here? */
1427 if (tp->urg_data) {
1428 u32 urg_offset = tp->urg_seq - *seq;
1429 if (urg_offset < used) {
1430 if (!urg_offset) {
1431 if (!sock_flag(sk, SOCK_URGINLINE)) {
1432 ++*seq;
1433 offset++;
1434 used--;
1435 if (!used)
1436 goto skip_copy;
1438 } else
1439 used = urg_offset;
1443 if (!(flags & MSG_TRUNC)) {
1444 err = skb_copy_datagram_iovec(skb, offset,
1445 msg->msg_iov, used);
1446 if (err) {
1447 /* Exception. Bailout! */
1448 if (!copied)
1449 copied = -EFAULT;
1450 break;
1454 *seq += used;
1455 copied += used;
1456 len -= used;
1458 tcp_rcv_space_adjust(sk);
1460 skip_copy:
1461 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1462 tp->urg_data = 0;
1463 tcp_fast_path_check(sk, tp);
1465 if (used + offset < skb->len)
1466 continue;
1468 if (skb->h.th->fin)
1469 goto found_fin_ok;
1470 if (!(flags & MSG_PEEK))
1471 sk_eat_skb(sk, skb);
1472 continue;
1474 found_fin_ok:
1475 /* Process the FIN. */
1476 ++*seq;
1477 if (!(flags & MSG_PEEK))
1478 sk_eat_skb(sk, skb);
1479 break;
1480 } while (len > 0);
1482 if (user_recv) {
1483 if (skb_queue_len(&tp->ucopy.prequeue)) {
1484 int chunk;
1486 tp->ucopy.len = copied > 0 ? len : 0;
1488 tcp_prequeue_process(sk);
1490 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1491 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1492 len -= chunk;
1493 copied += chunk;
1497 tp->ucopy.task = NULL;
1498 tp->ucopy.len = 0;
1501 /* According to UNIX98, msg_name/msg_namelen are ignored
1502 * on connected socket. I was just happy when found this 8) --ANK
1505 /* Clean up data we have read: This will do ACK frames. */
1506 cleanup_rbuf(sk, copied);
1508 TCP_CHECK_TIMER(sk);
1509 release_sock(sk);
1510 return copied;
1512 out:
1513 TCP_CHECK_TIMER(sk);
1514 release_sock(sk);
1515 return err;
1517 recv_urg:
1518 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1519 goto out;
1523 * State processing on a close. This implements the state shift for
1524 * sending our FIN frame. Note that we only send a FIN for some
1525 * states. A shutdown() may have already sent the FIN, or we may be
1526 * closed.
1529 static unsigned char new_state[16] = {
1530 /* current state: new state: action: */
1531 /* (Invalid) */ TCP_CLOSE,
1532 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1533 /* TCP_SYN_SENT */ TCP_CLOSE,
1534 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1535 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1536 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1537 /* TCP_TIME_WAIT */ TCP_CLOSE,
1538 /* TCP_CLOSE */ TCP_CLOSE,
1539 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1540 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1541 /* TCP_LISTEN */ TCP_CLOSE,
1542 /* TCP_CLOSING */ TCP_CLOSING,
1545 static int tcp_close_state(struct sock *sk)
1547 int next = (int)new_state[sk->sk_state];
1548 int ns = next & TCP_STATE_MASK;
1550 tcp_set_state(sk, ns);
1552 return next & TCP_ACTION_FIN;
1556 * Shutdown the sending side of a connection. Much like close except
1557 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1560 void tcp_shutdown(struct sock *sk, int how)
1562 /* We need to grab some memory, and put together a FIN,
1563 * and then put it into the queue to be sent.
1564 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1566 if (!(how & SEND_SHUTDOWN))
1567 return;
1569 /* If we've already sent a FIN, or it's a closed state, skip this. */
1570 if ((1 << sk->sk_state) &
1571 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1572 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1573 /* Clear out any half completed packets. FIN if needed. */
1574 if (tcp_close_state(sk))
1575 tcp_send_fin(sk);
1580 * At this point, there should be no process reference to this
1581 * socket, and thus no user references at all. Therefore we
1582 * can assume the socket waitqueue is inactive and nobody will
1583 * try to jump onto it.
1585 void tcp_destroy_sock(struct sock *sk)
1587 BUG_TRAP(sk->sk_state == TCP_CLOSE);
1588 BUG_TRAP(sock_flag(sk, SOCK_DEAD));
1590 /* It cannot be in hash table! */
1591 BUG_TRAP(sk_unhashed(sk));
1593 /* If it has not 0 inet_sk(sk)->num, it must be bound */
1594 BUG_TRAP(!inet_sk(sk)->num || tcp_sk(sk)->bind_hash);
1596 sk->sk_prot->destroy(sk);
1598 sk_stream_kill_queues(sk);
1600 xfrm_sk_free_policy(sk);
1602 #ifdef INET_REFCNT_DEBUG
1603 if (atomic_read(&sk->sk_refcnt) != 1) {
1604 printk(KERN_DEBUG "Destruction TCP %p delayed, c=%d\n",
1605 sk, atomic_read(&sk->sk_refcnt));
1607 #endif
1609 atomic_dec(&tcp_orphan_count);
1610 sock_put(sk);
1613 void tcp_close(struct sock *sk, long timeout)
1615 struct sk_buff *skb;
1616 int data_was_unread = 0;
1618 lock_sock(sk);
1619 sk->sk_shutdown = SHUTDOWN_MASK;
1621 if (sk->sk_state == TCP_LISTEN) {
1622 tcp_set_state(sk, TCP_CLOSE);
1624 /* Special case. */
1625 tcp_listen_stop(sk);
1627 goto adjudge_to_death;
1630 /* We need to flush the recv. buffs. We do this only on the
1631 * descriptor close, not protocol-sourced closes, because the
1632 * reader process may not have drained the data yet!
1634 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1635 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1636 skb->h.th->fin;
1637 data_was_unread += len;
1638 __kfree_skb(skb);
1641 sk_stream_mem_reclaim(sk);
1643 /* As outlined in draft-ietf-tcpimpl-prob-03.txt, section
1644 * 3.10, we send a RST here because data was lost. To
1645 * witness the awful effects of the old behavior of always
1646 * doing a FIN, run an older 2.1.x kernel or 2.0.x, start
1647 * a bulk GET in an FTP client, suspend the process, wait
1648 * for the client to advertise a zero window, then kill -9
1649 * the FTP client, wheee... Note: timeout is always zero
1650 * in such a case.
1652 if (data_was_unread) {
1653 /* Unread data was tossed, zap the connection. */
1654 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1655 tcp_set_state(sk, TCP_CLOSE);
1656 tcp_send_active_reset(sk, GFP_KERNEL);
1657 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1658 /* Check zero linger _after_ checking for unread data. */
1659 sk->sk_prot->disconnect(sk, 0);
1660 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1661 } else if (tcp_close_state(sk)) {
1662 /* We FIN if the application ate all the data before
1663 * zapping the connection.
1666 /* RED-PEN. Formally speaking, we have broken TCP state
1667 * machine. State transitions:
1669 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1670 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1671 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1673 * are legal only when FIN has been sent (i.e. in window),
1674 * rather than queued out of window. Purists blame.
1676 * F.e. "RFC state" is ESTABLISHED,
1677 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1679 * The visible declinations are that sometimes
1680 * we enter time-wait state, when it is not required really
1681 * (harmless), do not send active resets, when they are
1682 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1683 * they look as CLOSING or LAST_ACK for Linux)
1684 * Probably, I missed some more holelets.
1685 * --ANK
1687 tcp_send_fin(sk);
1690 sk_stream_wait_close(sk, timeout);
1692 adjudge_to_death:
1693 /* It is the last release_sock in its life. It will remove backlog. */
1694 release_sock(sk);
1697 /* Now socket is owned by kernel and we acquire BH lock
1698 to finish close. No need to check for user refs.
1700 local_bh_disable();
1701 bh_lock_sock(sk);
1702 BUG_TRAP(!sock_owned_by_user(sk));
1704 sock_hold(sk);
1705 sock_orphan(sk);
1707 /* This is a (useful) BSD violating of the RFC. There is a
1708 * problem with TCP as specified in that the other end could
1709 * keep a socket open forever with no application left this end.
1710 * We use a 3 minute timeout (about the same as BSD) then kill
1711 * our end. If they send after that then tough - BUT: long enough
1712 * that we won't make the old 4*rto = almost no time - whoops
1713 * reset mistake.
1715 * Nope, it was not mistake. It is really desired behaviour
1716 * f.e. on http servers, when such sockets are useless, but
1717 * consume significant resources. Let's do it with special
1718 * linger2 option. --ANK
1721 if (sk->sk_state == TCP_FIN_WAIT2) {
1722 struct tcp_opt *tp = tcp_sk(sk);
1723 if (tp->linger2 < 0) {
1724 tcp_set_state(sk, TCP_CLOSE);
1725 tcp_send_active_reset(sk, GFP_ATOMIC);
1726 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1727 } else {
1728 int tmo = tcp_fin_time(tp);
1730 if (tmo > TCP_TIMEWAIT_LEN) {
1731 tcp_reset_keepalive_timer(sk, tcp_fin_time(tp));
1732 } else {
1733 atomic_inc(&tcp_orphan_count);
1734 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1735 goto out;
1739 if (sk->sk_state != TCP_CLOSE) {
1740 sk_stream_mem_reclaim(sk);
1741 if (atomic_read(&tcp_orphan_count) > sysctl_tcp_max_orphans ||
1742 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1743 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1744 if (net_ratelimit())
1745 printk(KERN_INFO "TCP: too many of orphaned "
1746 "sockets\n");
1747 tcp_set_state(sk, TCP_CLOSE);
1748 tcp_send_active_reset(sk, GFP_ATOMIC);
1749 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1752 atomic_inc(&tcp_orphan_count);
1754 if (sk->sk_state == TCP_CLOSE)
1755 tcp_destroy_sock(sk);
1756 /* Otherwise, socket is reprieved until protocol close. */
1758 out:
1759 bh_unlock_sock(sk);
1760 local_bh_enable();
1761 sock_put(sk);
1764 /* These states need RST on ABORT according to RFC793 */
1766 static inline int tcp_need_reset(int state)
1768 return (1 << state) &
1769 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1770 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1773 int tcp_disconnect(struct sock *sk, int flags)
1775 struct inet_opt *inet = inet_sk(sk);
1776 struct tcp_opt *tp = tcp_sk(sk);
1777 int err = 0;
1778 int old_state = sk->sk_state;
1780 if (old_state != TCP_CLOSE)
1781 tcp_set_state(sk, TCP_CLOSE);
1783 /* ABORT function of RFC793 */
1784 if (old_state == TCP_LISTEN) {
1785 tcp_listen_stop(sk);
1786 } else if (tcp_need_reset(old_state) ||
1787 (tp->snd_nxt != tp->write_seq &&
1788 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1789 /* The last check adjusts for discrepance of Linux wrt. RFC
1790 * states
1792 tcp_send_active_reset(sk, gfp_any());
1793 sk->sk_err = ECONNRESET;
1794 } else if (old_state == TCP_SYN_SENT)
1795 sk->sk_err = ECONNRESET;
1797 tcp_clear_xmit_timers(sk);
1798 __skb_queue_purge(&sk->sk_receive_queue);
1799 sk_stream_writequeue_purge(sk);
1800 __skb_queue_purge(&tp->out_of_order_queue);
1802 inet->dport = 0;
1804 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1805 inet_reset_saddr(sk);
1807 sk->sk_shutdown = 0;
1808 sock_reset_flag(sk, SOCK_DONE);
1809 tp->srtt = 0;
1810 if ((tp->write_seq += tp->max_window + 2) == 0)
1811 tp->write_seq = 1;
1812 tp->backoff = 0;
1813 tp->snd_cwnd = 2;
1814 tp->probes_out = 0;
1815 tcp_set_pcount(&tp->packets_out, 0);
1816 tp->snd_ssthresh = 0x7fffffff;
1817 tp->snd_cwnd_cnt = 0;
1818 tcp_set_ca_state(tp, TCP_CA_Open);
1819 tcp_clear_retrans(tp);
1820 tcp_delack_init(tp);
1821 sk->sk_send_head = NULL;
1822 tp->saw_tstamp = 0;
1823 tcp_sack_reset(tp);
1824 __sk_dst_reset(sk);
1826 BUG_TRAP(!inet->num || tp->bind_hash);
1828 sk->sk_error_report(sk);
1829 return err;
1833 * Wait for an incoming connection, avoid race
1834 * conditions. This must be called with the socket locked.
1836 static int wait_for_connect(struct sock *sk, long timeo)
1838 struct tcp_opt *tp = tcp_sk(sk);
1839 DEFINE_WAIT(wait);
1840 int err;
1843 * True wake-one mechanism for incoming connections: only
1844 * one process gets woken up, not the 'whole herd'.
1845 * Since we do not 'race & poll' for established sockets
1846 * anymore, the common case will execute the loop only once.
1848 * Subtle issue: "add_wait_queue_exclusive()" will be added
1849 * after any current non-exclusive waiters, and we know that
1850 * it will always _stay_ after any new non-exclusive waiters
1851 * because all non-exclusive waiters are added at the
1852 * beginning of the wait-queue. As such, it's ok to "drop"
1853 * our exclusiveness temporarily when we get woken up without
1854 * having to remove and re-insert us on the wait queue.
1856 for (;;) {
1857 prepare_to_wait_exclusive(sk->sk_sleep, &wait,
1858 TASK_INTERRUPTIBLE);
1859 release_sock(sk);
1860 if (!tp->accept_queue)
1861 timeo = schedule_timeout(timeo);
1862 lock_sock(sk);
1863 err = 0;
1864 if (tp->accept_queue)
1865 break;
1866 err = -EINVAL;
1867 if (sk->sk_state != TCP_LISTEN)
1868 break;
1869 err = sock_intr_errno(timeo);
1870 if (signal_pending(current))
1871 break;
1872 err = -EAGAIN;
1873 if (!timeo)
1874 break;
1876 finish_wait(sk->sk_sleep, &wait);
1877 return err;
1881 * This will accept the next outstanding connection.
1884 struct sock *tcp_accept(struct sock *sk, int flags, int *err)
1886 struct tcp_opt *tp = tcp_sk(sk);
1887 struct open_request *req;
1888 struct sock *newsk;
1889 int error;
1891 lock_sock(sk);
1893 /* We need to make sure that this socket is listening,
1894 * and that it has something pending.
1896 error = -EINVAL;
1897 if (sk->sk_state != TCP_LISTEN)
1898 goto out;
1900 /* Find already established connection */
1901 if (!tp->accept_queue) {
1902 long timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1904 /* If this is a non blocking socket don't sleep */
1905 error = -EAGAIN;
1906 if (!timeo)
1907 goto out;
1909 error = wait_for_connect(sk, timeo);
1910 if (error)
1911 goto out;
1914 req = tp->accept_queue;
1915 if ((tp->accept_queue = req->dl_next) == NULL)
1916 tp->accept_queue_tail = NULL;
1918 newsk = req->sk;
1919 sk_acceptq_removed(sk);
1920 tcp_openreq_fastfree(req);
1921 BUG_TRAP(newsk->sk_state != TCP_SYN_RECV);
1922 release_sock(sk);
1923 return newsk;
1925 out:
1926 release_sock(sk);
1927 *err = error;
1928 return NULL;
1932 * Socket option code for TCP.
1934 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1935 int optlen)
1937 struct tcp_opt *tp = tcp_sk(sk);
1938 int val;
1939 int err = 0;
1941 if (level != SOL_TCP)
1942 return tp->af_specific->setsockopt(sk, level, optname,
1943 optval, optlen);
1945 if (optlen < sizeof(int))
1946 return -EINVAL;
1948 if (get_user(val, (int __user *)optval))
1949 return -EFAULT;
1951 lock_sock(sk);
1953 switch (optname) {
1954 case TCP_MAXSEG:
1955 /* Values greater than interface MTU won't take effect. However
1956 * at the point when this call is done we typically don't yet
1957 * know which interface is going to be used */
1958 if (val < 8 || val > MAX_TCP_WINDOW) {
1959 err = -EINVAL;
1960 break;
1962 tp->user_mss = val;
1963 break;
1965 case TCP_NODELAY:
1966 if (val) {
1967 /* TCP_NODELAY is weaker than TCP_CORK, so that
1968 * this option on corked socket is remembered, but
1969 * it is not activated until cork is cleared.
1971 * However, when TCP_NODELAY is set we make
1972 * an explicit push, which overrides even TCP_CORK
1973 * for currently queued segments.
1975 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1976 tcp_push_pending_frames(sk, tp);
1977 } else {
1978 tp->nonagle &= ~TCP_NAGLE_OFF;
1980 break;
1982 case TCP_CORK:
1983 /* When set indicates to always queue non-full frames.
1984 * Later the user clears this option and we transmit
1985 * any pending partial frames in the queue. This is
1986 * meant to be used alongside sendfile() to get properly
1987 * filled frames when the user (for example) must write
1988 * out headers with a write() call first and then use
1989 * sendfile to send out the data parts.
1991 * TCP_CORK can be set together with TCP_NODELAY and it is
1992 * stronger than TCP_NODELAY.
1994 if (val) {
1995 tp->nonagle |= TCP_NAGLE_CORK;
1996 } else {
1997 tp->nonagle &= ~TCP_NAGLE_CORK;
1998 if (tp->nonagle&TCP_NAGLE_OFF)
1999 tp->nonagle |= TCP_NAGLE_PUSH;
2000 tcp_push_pending_frames(sk, tp);
2002 break;
2004 case TCP_KEEPIDLE:
2005 if (val < 1 || val > MAX_TCP_KEEPIDLE)
2006 err = -EINVAL;
2007 else {
2008 tp->keepalive_time = val * HZ;
2009 if (sock_flag(sk, SOCK_KEEPOPEN) &&
2010 !((1 << sk->sk_state) &
2011 (TCPF_CLOSE | TCPF_LISTEN))) {
2012 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
2013 if (tp->keepalive_time > elapsed)
2014 elapsed = tp->keepalive_time - elapsed;
2015 else
2016 elapsed = 0;
2017 tcp_reset_keepalive_timer(sk, elapsed);
2020 break;
2021 case TCP_KEEPINTVL:
2022 if (val < 1 || val > MAX_TCP_KEEPINTVL)
2023 err = -EINVAL;
2024 else
2025 tp->keepalive_intvl = val * HZ;
2026 break;
2027 case TCP_KEEPCNT:
2028 if (val < 1 || val > MAX_TCP_KEEPCNT)
2029 err = -EINVAL;
2030 else
2031 tp->keepalive_probes = val;
2032 break;
2033 case TCP_SYNCNT:
2034 if (val < 1 || val > MAX_TCP_SYNCNT)
2035 err = -EINVAL;
2036 else
2037 tp->syn_retries = val;
2038 break;
2040 case TCP_LINGER2:
2041 if (val < 0)
2042 tp->linger2 = -1;
2043 else if (val > sysctl_tcp_fin_timeout / HZ)
2044 tp->linger2 = 0;
2045 else
2046 tp->linger2 = val * HZ;
2047 break;
2049 case TCP_DEFER_ACCEPT:
2050 tp->defer_accept = 0;
2051 if (val > 0) {
2052 /* Translate value in seconds to number of
2053 * retransmits */
2054 while (tp->defer_accept < 32 &&
2055 val > ((TCP_TIMEOUT_INIT / HZ) <<
2056 tp->defer_accept))
2057 tp->defer_accept++;
2058 tp->defer_accept++;
2060 break;
2062 case TCP_WINDOW_CLAMP:
2063 if (!val) {
2064 if (sk->sk_state != TCP_CLOSE) {
2065 err = -EINVAL;
2066 break;
2068 tp->window_clamp = 0;
2069 } else
2070 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
2071 SOCK_MIN_RCVBUF / 2 : val;
2072 break;
2074 case TCP_QUICKACK:
2075 if (!val) {
2076 tp->ack.pingpong = 1;
2077 } else {
2078 tp->ack.pingpong = 0;
2079 if ((1 << sk->sk_state) &
2080 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
2081 tcp_ack_scheduled(tp)) {
2082 tp->ack.pending |= TCP_ACK_PUSHED;
2083 cleanup_rbuf(sk, 1);
2084 if (!(val & 1))
2085 tp->ack.pingpong = 1;
2088 break;
2090 default:
2091 err = -ENOPROTOOPT;
2092 break;
2094 release_sock(sk);
2095 return err;
2098 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2099 int __user *optlen)
2101 struct tcp_opt *tp = tcp_sk(sk);
2102 int val, len;
2104 if (level != SOL_TCP)
2105 return tp->af_specific->getsockopt(sk, level, optname,
2106 optval, optlen);
2108 if (get_user(len, optlen))
2109 return -EFAULT;
2111 len = min_t(unsigned int, len, sizeof(int));
2113 if (len < 0)
2114 return -EINVAL;
2116 switch (optname) {
2117 case TCP_MAXSEG:
2118 val = tp->mss_cache_std;
2119 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2120 val = tp->user_mss;
2121 break;
2122 case TCP_NODELAY:
2123 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2124 break;
2125 case TCP_CORK:
2126 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2127 break;
2128 case TCP_KEEPIDLE:
2129 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2130 break;
2131 case TCP_KEEPINTVL:
2132 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2133 break;
2134 case TCP_KEEPCNT:
2135 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2136 break;
2137 case TCP_SYNCNT:
2138 val = tp->syn_retries ? : sysctl_tcp_syn_retries;
2139 break;
2140 case TCP_LINGER2:
2141 val = tp->linger2;
2142 if (val >= 0)
2143 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2144 break;
2145 case TCP_DEFER_ACCEPT:
2146 val = !tp->defer_accept ? 0 : ((TCP_TIMEOUT_INIT / HZ) <<
2147 (tp->defer_accept - 1));
2148 break;
2149 case TCP_WINDOW_CLAMP:
2150 val = tp->window_clamp;
2151 break;
2152 case TCP_INFO: {
2153 struct tcp_info info;
2155 if (get_user(len, optlen))
2156 return -EFAULT;
2158 tcp_get_info(sk, &info);
2160 len = min_t(unsigned int, len, sizeof(info));
2161 if (put_user(len, optlen))
2162 return -EFAULT;
2163 if (copy_to_user(optval, &info, len))
2164 return -EFAULT;
2165 return 0;
2167 case TCP_QUICKACK:
2168 val = !tp->ack.pingpong;
2169 break;
2170 default:
2171 return -ENOPROTOOPT;
2174 if (put_user(len, optlen))
2175 return -EFAULT;
2176 if (copy_to_user(optval, &val, len))
2177 return -EFAULT;
2178 return 0;
2182 extern void __skb_cb_too_small_for_tcp(int, int);
2183 extern void tcpdiag_init(void);
2185 static __initdata unsigned long thash_entries;
2186 static int __init set_thash_entries(char *str)
2188 if (!str)
2189 return 0;
2190 thash_entries = simple_strtoul(str, &str, 0);
2191 return 1;
2193 __setup("thash_entries=", set_thash_entries);
2195 void __init tcp_init(void)
2197 struct sk_buff *skb = NULL;
2198 unsigned long goal;
2199 int order, i;
2201 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2202 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2203 sizeof(skb->cb));
2205 tcp_openreq_cachep = kmem_cache_create("tcp_open_request",
2206 sizeof(struct open_request),
2207 0, SLAB_HWCACHE_ALIGN,
2208 NULL, NULL);
2209 if (!tcp_openreq_cachep)
2210 panic("tcp_init: Cannot alloc open_request cache.");
2212 tcp_bucket_cachep = kmem_cache_create("tcp_bind_bucket",
2213 sizeof(struct tcp_bind_bucket),
2214 0, SLAB_HWCACHE_ALIGN,
2215 NULL, NULL);
2216 if (!tcp_bucket_cachep)
2217 panic("tcp_init: Cannot alloc tcp_bind_bucket cache.");
2219 tcp_timewait_cachep = kmem_cache_create("tcp_tw_bucket",
2220 sizeof(struct tcp_tw_bucket),
2221 0, SLAB_HWCACHE_ALIGN,
2222 NULL, NULL);
2223 if (!tcp_timewait_cachep)
2224 panic("tcp_init: Cannot alloc tcp_tw_bucket cache.");
2226 /* Size and allocate the main established and bind bucket
2227 * hash tables.
2229 * The methodology is similar to that of the buffer cache.
2231 if (num_physpages >= (128 * 1024))
2232 goal = num_physpages >> (21 - PAGE_SHIFT);
2233 else
2234 goal = num_physpages >> (23 - PAGE_SHIFT);
2236 if (thash_entries)
2237 goal = (thash_entries * sizeof(struct tcp_ehash_bucket)) >> PAGE_SHIFT;
2238 for (order = 0; (1UL << order) < goal; order++)
2240 do {
2241 tcp_ehash_size = (1UL << order) * PAGE_SIZE /
2242 sizeof(struct tcp_ehash_bucket);
2243 tcp_ehash_size >>= 1;
2244 while (tcp_ehash_size & (tcp_ehash_size - 1))
2245 tcp_ehash_size--;
2246 tcp_ehash = (struct tcp_ehash_bucket *)
2247 __get_free_pages(GFP_ATOMIC, order);
2248 } while (!tcp_ehash && --order > 0);
2250 if (!tcp_ehash)
2251 panic("Failed to allocate TCP established hash table\n");
2252 for (i = 0; i < (tcp_ehash_size << 1); i++) {
2253 tcp_ehash[i].lock = RW_LOCK_UNLOCKED;
2254 INIT_HLIST_HEAD(&tcp_ehash[i].chain);
2257 do {
2258 tcp_bhash_size = (1UL << order) * PAGE_SIZE /
2259 sizeof(struct tcp_bind_hashbucket);
2260 if ((tcp_bhash_size > (64 * 1024)) && order > 0)
2261 continue;
2262 tcp_bhash = (struct tcp_bind_hashbucket *)
2263 __get_free_pages(GFP_ATOMIC, order);
2264 } while (!tcp_bhash && --order >= 0);
2266 if (!tcp_bhash)
2267 panic("Failed to allocate TCP bind hash table\n");
2268 for (i = 0; i < tcp_bhash_size; i++) {
2269 tcp_bhash[i].lock = SPIN_LOCK_UNLOCKED;
2270 INIT_HLIST_HEAD(&tcp_bhash[i].chain);
2273 /* Try to be a bit smarter and adjust defaults depending
2274 * on available memory.
2276 if (order > 4) {
2277 sysctl_local_port_range[0] = 32768;
2278 sysctl_local_port_range[1] = 61000;
2279 sysctl_tcp_max_tw_buckets = 180000;
2280 sysctl_tcp_max_orphans = 4096 << (order - 4);
2281 sysctl_max_syn_backlog = 1024;
2282 } else if (order < 3) {
2283 sysctl_local_port_range[0] = 1024 * (3 - order);
2284 sysctl_tcp_max_tw_buckets >>= (3 - order);
2285 sysctl_tcp_max_orphans >>= (3 - order);
2286 sysctl_max_syn_backlog = 128;
2288 tcp_port_rover = sysctl_local_port_range[0] - 1;
2290 sysctl_tcp_mem[0] = 768 << order;
2291 sysctl_tcp_mem[1] = 1024 << order;
2292 sysctl_tcp_mem[2] = 1536 << order;
2294 if (order < 3) {
2295 sysctl_tcp_wmem[2] = 64 * 1024;
2296 sysctl_tcp_rmem[0] = PAGE_SIZE;
2297 sysctl_tcp_rmem[1] = 43689;
2298 sysctl_tcp_rmem[2] = 2 * 43689;
2301 printk(KERN_INFO "TCP: Hash tables configured "
2302 "(established %d bind %d)\n",
2303 tcp_ehash_size << 1, tcp_bhash_size);
2305 tcpdiag_init();
2308 EXPORT_SYMBOL(tcp_accept);
2309 EXPORT_SYMBOL(tcp_close);
2310 EXPORT_SYMBOL(tcp_close_state);
2311 EXPORT_SYMBOL(tcp_destroy_sock);
2312 EXPORT_SYMBOL(tcp_disconnect);
2313 EXPORT_SYMBOL(tcp_getsockopt);
2314 EXPORT_SYMBOL(tcp_ioctl);
2315 EXPORT_SYMBOL(tcp_openreq_cachep);
2316 EXPORT_SYMBOL(tcp_poll);
2317 EXPORT_SYMBOL(tcp_read_sock);
2318 EXPORT_SYMBOL(tcp_recvmsg);
2319 EXPORT_SYMBOL(tcp_sendmsg);
2320 EXPORT_SYMBOL(tcp_sendpage);
2321 EXPORT_SYMBOL(tcp_setsockopt);
2322 EXPORT_SYMBOL(tcp_shutdown);
2323 EXPORT_SYMBOL(tcp_statistics);
2324 EXPORT_SYMBOL(tcp_timewait_cachep);