[TCP]: Update references in two old comments
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / tcp.c
blobd6e488668171ec1f284df90afd10b50acdb182f8
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Implementation of the Transmission Control Protocol(TCP).
8 * Version: $Id: tcp.c,v 1.216 2002/02/01 22:01:04 davem Exp $
10 * Authors: Ross Biro
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Mark Evans, <evansmp@uhura.aston.ac.uk>
13 * Corey Minyard <wf-rch!minyard@relay.EU.net>
14 * Florian La Roche, <flla@stud.uni-sb.de>
15 * Charles Hedrick, <hedrick@klinzhai.rutgers.edu>
16 * Linus Torvalds, <torvalds@cs.helsinki.fi>
17 * Alan Cox, <gw4pts@gw4pts.ampr.org>
18 * Matthew Dillon, <dillon@apollo.west.oic.com>
19 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
20 * Jorge Cwik, <jorge@laser.satlink.net>
22 * Fixes:
23 * Alan Cox : Numerous verify_area() calls
24 * Alan Cox : Set the ACK bit on a reset
25 * Alan Cox : Stopped it crashing if it closed while
26 * sk->inuse=1 and was trying to connect
27 * (tcp_err()).
28 * Alan Cox : All icmp error handling was broken
29 * pointers passed where wrong and the
30 * socket was looked up backwards. Nobody
31 * tested any icmp error code obviously.
32 * Alan Cox : tcp_err() now handled properly. It
33 * wakes people on errors. poll
34 * behaves and the icmp error race
35 * has gone by moving it into sock.c
36 * Alan Cox : tcp_send_reset() fixed to work for
37 * everything not just packets for
38 * unknown sockets.
39 * Alan Cox : tcp option processing.
40 * Alan Cox : Reset tweaked (still not 100%) [Had
41 * syn rule wrong]
42 * Herp Rosmanith : More reset fixes
43 * Alan Cox : No longer acks invalid rst frames.
44 * Acking any kind of RST is right out.
45 * Alan Cox : Sets an ignore me flag on an rst
46 * receive otherwise odd bits of prattle
47 * escape still
48 * Alan Cox : Fixed another acking RST frame bug.
49 * Should stop LAN workplace lockups.
50 * Alan Cox : Some tidyups using the new skb list
51 * facilities
52 * Alan Cox : sk->keepopen now seems to work
53 * Alan Cox : Pulls options out correctly on accepts
54 * Alan Cox : Fixed assorted sk->rqueue->next errors
55 * Alan Cox : PSH doesn't end a TCP read. Switched a
56 * bit to skb ops.
57 * Alan Cox : Tidied tcp_data to avoid a potential
58 * nasty.
59 * Alan Cox : Added some better commenting, as the
60 * tcp is hard to follow
61 * Alan Cox : Removed incorrect check for 20 * psh
62 * Michael O'Reilly : ack < copied bug fix.
63 * Johannes Stille : Misc tcp fixes (not all in yet).
64 * Alan Cox : FIN with no memory -> CRASH
65 * Alan Cox : Added socket option proto entries.
66 * Also added awareness of them to accept.
67 * Alan Cox : Added TCP options (SOL_TCP)
68 * Alan Cox : Switched wakeup calls to callbacks,
69 * so the kernel can layer network
70 * sockets.
71 * Alan Cox : Use ip_tos/ip_ttl settings.
72 * Alan Cox : Handle FIN (more) properly (we hope).
73 * Alan Cox : RST frames sent on unsynchronised
74 * state ack error.
75 * Alan Cox : Put in missing check for SYN bit.
76 * Alan Cox : Added tcp_select_window() aka NET2E
77 * window non shrink trick.
78 * Alan Cox : Added a couple of small NET2E timer
79 * fixes
80 * Charles Hedrick : TCP fixes
81 * Toomas Tamm : TCP window fixes
82 * Alan Cox : Small URG fix to rlogin ^C ack fight
83 * Charles Hedrick : Rewrote most of it to actually work
84 * Linus : Rewrote tcp_read() and URG handling
85 * completely
86 * Gerhard Koerting: Fixed some missing timer handling
87 * Matthew Dillon : Reworked TCP machine states as per RFC
88 * Gerhard Koerting: PC/TCP workarounds
89 * Adam Caldwell : Assorted timer/timing errors
90 * Matthew Dillon : Fixed another RST bug
91 * Alan Cox : Move to kernel side addressing changes.
92 * Alan Cox : Beginning work on TCP fastpathing
93 * (not yet usable)
94 * Arnt Gulbrandsen: Turbocharged tcp_check() routine.
95 * Alan Cox : TCP fast path debugging
96 * Alan Cox : Window clamping
97 * Michael Riepe : Bug in tcp_check()
98 * Matt Dillon : More TCP improvements and RST bug fixes
99 * Matt Dillon : Yet more small nasties remove from the
100 * TCP code (Be very nice to this man if
101 * tcp finally works 100%) 8)
102 * Alan Cox : BSD accept semantics.
103 * Alan Cox : Reset on closedown bug.
104 * Peter De Schrijver : ENOTCONN check missing in tcp_sendto().
105 * Michael Pall : Handle poll() after URG properly in
106 * all cases.
107 * Michael Pall : Undo the last fix in tcp_read_urg()
108 * (multi URG PUSH broke rlogin).
109 * Michael Pall : Fix the multi URG PUSH problem in
110 * tcp_readable(), poll() after URG
111 * works now.
112 * Michael Pall : recv(...,MSG_OOB) never blocks in the
113 * BSD api.
114 * Alan Cox : Changed the semantics of sk->socket to
115 * fix a race and a signal problem with
116 * accept() and async I/O.
117 * Alan Cox : Relaxed the rules on tcp_sendto().
118 * Yury Shevchuk : Really fixed accept() blocking problem.
119 * Craig I. Hagan : Allow for BSD compatible TIME_WAIT for
120 * clients/servers which listen in on
121 * fixed ports.
122 * Alan Cox : Cleaned the above up and shrank it to
123 * a sensible code size.
124 * Alan Cox : Self connect lockup fix.
125 * Alan Cox : No connect to multicast.
126 * Ross Biro : Close unaccepted children on master
127 * socket close.
128 * Alan Cox : Reset tracing code.
129 * Alan Cox : Spurious resets on shutdown.
130 * Alan Cox : Giant 15 minute/60 second timer error
131 * Alan Cox : Small whoops in polling before an
132 * accept.
133 * Alan Cox : Kept the state trace facility since
134 * it's handy for debugging.
135 * Alan Cox : More reset handler fixes.
136 * Alan Cox : Started rewriting the code based on
137 * the RFC's for other useful protocol
138 * references see: Comer, KA9Q NOS, and
139 * for a reference on the difference
140 * between specifications and how BSD
141 * works see the 4.4lite source.
142 * A.N.Kuznetsov : Don't time wait on completion of tidy
143 * close.
144 * Linus Torvalds : Fin/Shutdown & copied_seq changes.
145 * Linus Torvalds : Fixed BSD port reuse to work first syn
146 * Alan Cox : Reimplemented timers as per the RFC
147 * and using multiple timers for sanity.
148 * Alan Cox : Small bug fixes, and a lot of new
149 * comments.
150 * Alan Cox : Fixed dual reader crash by locking
151 * the buffers (much like datagram.c)
152 * Alan Cox : Fixed stuck sockets in probe. A probe
153 * now gets fed up of retrying without
154 * (even a no space) answer.
155 * Alan Cox : Extracted closing code better
156 * Alan Cox : Fixed the closing state machine to
157 * resemble the RFC.
158 * Alan Cox : More 'per spec' fixes.
159 * Jorge Cwik : Even faster checksumming.
160 * Alan Cox : tcp_data() doesn't ack illegal PSH
161 * only frames. At least one pc tcp stack
162 * generates them.
163 * Alan Cox : Cache last socket.
164 * Alan Cox : Per route irtt.
165 * Matt Day : poll()->select() match BSD precisely on error
166 * Alan Cox : New buffers
167 * Marc Tamsky : Various sk->prot->retransmits and
168 * sk->retransmits misupdating fixed.
169 * Fixed tcp_write_timeout: stuck close,
170 * and TCP syn retries gets used now.
171 * Mark Yarvis : In tcp_read_wakeup(), don't send an
172 * ack if state is TCP_CLOSED.
173 * Alan Cox : Look up device on a retransmit - routes may
174 * change. Doesn't yet cope with MSS shrink right
175 * but it's a start!
176 * Marc Tamsky : Closing in closing fixes.
177 * Mike Shaver : RFC1122 verifications.
178 * Alan Cox : rcv_saddr errors.
179 * Alan Cox : Block double connect().
180 * Alan Cox : Small hooks for enSKIP.
181 * Alexey Kuznetsov: Path MTU discovery.
182 * Alan Cox : Support soft errors.
183 * Alan Cox : Fix MTU discovery pathological case
184 * when the remote claims no mtu!
185 * Marc Tamsky : TCP_CLOSE fix.
186 * Colin (G3TNE) : Send a reset on syn ack replies in
187 * window but wrong (fixes NT lpd problems)
188 * Pedro Roque : Better TCP window handling, delayed ack.
189 * Joerg Reuter : No modification of locked buffers in
190 * tcp_do_retransmit()
191 * Eric Schenk : Changed receiver side silly window
192 * avoidance algorithm to BSD style
193 * algorithm. This doubles throughput
194 * against machines running Solaris,
195 * and seems to result in general
196 * improvement.
197 * Stefan Magdalinski : adjusted tcp_readable() to fix FIONREAD
198 * Willy Konynenberg : Transparent proxying support.
199 * Mike McLagan : Routing by source
200 * Keith Owens : Do proper merging with partial SKB's in
201 * tcp_do_sendmsg to avoid burstiness.
202 * Eric Schenk : Fix fast close down bug with
203 * shutdown() followed by close().
204 * Andi Kleen : Make poll agree with SIGIO
205 * Salvatore Sanfilippo : Support SO_LINGER with linger == 1 and
206 * lingertime == 0 (RFC 793 ABORT Call)
207 * Hirokazu Takahashi : Use copy_from_user() instead of
208 * csum_and_copy_from_user() if possible.
210 * This program is free software; you can redistribute it and/or
211 * modify it under the terms of the GNU General Public License
212 * as published by the Free Software Foundation; either version
213 * 2 of the License, or(at your option) any later version.
215 * Description of States:
217 * TCP_SYN_SENT sent a connection request, waiting for ack
219 * TCP_SYN_RECV received a connection request, sent ack,
220 * waiting for final ack in three-way handshake.
222 * TCP_ESTABLISHED connection established
224 * TCP_FIN_WAIT1 our side has shutdown, waiting to complete
225 * transmission of remaining buffered data
227 * TCP_FIN_WAIT2 all buffered data sent, waiting for remote
228 * to shutdown
230 * TCP_CLOSING both sides have shutdown but we still have
231 * data we have to finish sending
233 * TCP_TIME_WAIT timeout to catch resent junk before entering
234 * closed, can only be entered from FIN_WAIT2
235 * or CLOSING. Required because the other end
236 * may not have gotten our last ACK causing it
237 * to retransmit the data packet (which we ignore)
239 * TCP_CLOSE_WAIT remote side has shutdown and is waiting for
240 * us to finish writing our data and to shutdown
241 * (we have to close() to move on to LAST_ACK)
243 * TCP_LAST_ACK out side has shutdown after remote has
244 * shutdown. There may still be data in our
245 * buffer that we have to finish sending
247 * TCP_CLOSE socket is finished
250 #include <linux/module.h>
251 #include <linux/types.h>
252 #include <linux/fcntl.h>
253 #include <linux/poll.h>
254 #include <linux/init.h>
255 #include <linux/smp_lock.h>
256 #include <linux/fs.h>
257 #include <linux/random.h>
258 #include <linux/bootmem.h>
259 #include <linux/cache.h>
260 #include <linux/err.h>
261 #include <linux/crypto.h>
263 #include <net/icmp.h>
264 #include <net/tcp.h>
265 #include <net/xfrm.h>
266 #include <net/ip.h>
267 #include <net/netdma.h>
269 #include <asm/uaccess.h>
270 #include <asm/ioctls.h>
272 int sysctl_tcp_fin_timeout __read_mostly = TCP_FIN_TIMEOUT;
274 DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
276 atomic_t tcp_orphan_count = ATOMIC_INIT(0);
278 EXPORT_SYMBOL_GPL(tcp_orphan_count);
280 int sysctl_tcp_mem[3] __read_mostly;
281 int sysctl_tcp_wmem[3] __read_mostly;
282 int sysctl_tcp_rmem[3] __read_mostly;
284 EXPORT_SYMBOL(sysctl_tcp_mem);
285 EXPORT_SYMBOL(sysctl_tcp_rmem);
286 EXPORT_SYMBOL(sysctl_tcp_wmem);
288 atomic_t tcp_memory_allocated; /* Current allocated memory. */
289 atomic_t tcp_sockets_allocated; /* Current number of TCP sockets. */
291 EXPORT_SYMBOL(tcp_memory_allocated);
292 EXPORT_SYMBOL(tcp_sockets_allocated);
295 * Pressure flag: try to collapse.
296 * Technical note: it is used by multiple contexts non atomically.
297 * All the sk_stream_mem_schedule() is of this nature: accounting
298 * is strict, actions are advisory and have some latency.
300 int tcp_memory_pressure __read_mostly;
302 EXPORT_SYMBOL(tcp_memory_pressure);
304 void tcp_enter_memory_pressure(void)
306 if (!tcp_memory_pressure) {
307 NET_INC_STATS(LINUX_MIB_TCPMEMORYPRESSURES);
308 tcp_memory_pressure = 1;
312 EXPORT_SYMBOL(tcp_enter_memory_pressure);
315 * Wait for a TCP event.
317 * Note that we don't need to lock the socket, as the upper poll layers
318 * take care of normal races (between the test and the event) and we don't
319 * go look at any of the socket buffers directly.
321 unsigned int tcp_poll(struct file *file, struct socket *sock, poll_table *wait)
323 unsigned int mask;
324 struct sock *sk = sock->sk;
325 struct tcp_sock *tp = tcp_sk(sk);
327 poll_wait(file, sk->sk_sleep, wait);
328 if (sk->sk_state == TCP_LISTEN)
329 return inet_csk_listen_poll(sk);
331 /* Socket is not locked. We are protected from async events
332 by poll logic and correct handling of state changes
333 made by another threads is impossible in any case.
336 mask = 0;
337 if (sk->sk_err)
338 mask = POLLERR;
341 * POLLHUP is certainly not done right. But poll() doesn't
342 * have a notion of HUP in just one direction, and for a
343 * socket the read side is more interesting.
345 * Some poll() documentation says that POLLHUP is incompatible
346 * with the POLLOUT/POLLWR flags, so somebody should check this
347 * all. But careful, it tends to be safer to return too many
348 * bits than too few, and you can easily break real applications
349 * if you don't tell them that something has hung up!
351 * Check-me.
353 * Check number 1. POLLHUP is _UNMASKABLE_ event (see UNIX98 and
354 * our fs/select.c). It means that after we received EOF,
355 * poll always returns immediately, making impossible poll() on write()
356 * in state CLOSE_WAIT. One solution is evident --- to set POLLHUP
357 * if and only if shutdown has been made in both directions.
358 * Actually, it is interesting to look how Solaris and DUX
359 * solve this dilemma. I would prefer, if PULLHUP were maskable,
360 * then we could set it on SND_SHUTDOWN. BTW examples given
361 * in Stevens' books assume exactly this behaviour, it explains
362 * why PULLHUP is incompatible with POLLOUT. --ANK
364 * NOTE. Check for TCP_CLOSE is added. The goal is to prevent
365 * blocking on fresh not-connected or disconnected socket. --ANK
367 if (sk->sk_shutdown == SHUTDOWN_MASK || sk->sk_state == TCP_CLOSE)
368 mask |= POLLHUP;
369 if (sk->sk_shutdown & RCV_SHUTDOWN)
370 mask |= POLLIN | POLLRDNORM | POLLRDHUP;
372 /* Connected? */
373 if ((1 << sk->sk_state) & ~(TCPF_SYN_SENT | TCPF_SYN_RECV)) {
374 /* Potential race condition. If read of tp below will
375 * escape above sk->sk_state, we can be illegally awaken
376 * in SYN_* states. */
377 if ((tp->rcv_nxt != tp->copied_seq) &&
378 (tp->urg_seq != tp->copied_seq ||
379 tp->rcv_nxt != tp->copied_seq + 1 ||
380 sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data))
381 mask |= POLLIN | POLLRDNORM;
383 if (!(sk->sk_shutdown & SEND_SHUTDOWN)) {
384 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk)) {
385 mask |= POLLOUT | POLLWRNORM;
386 } else { /* send SIGIO later */
387 set_bit(SOCK_ASYNC_NOSPACE,
388 &sk->sk_socket->flags);
389 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
391 /* Race breaker. If space is freed after
392 * wspace test but before the flags are set,
393 * IO signal will be lost.
395 if (sk_stream_wspace(sk) >= sk_stream_min_wspace(sk))
396 mask |= POLLOUT | POLLWRNORM;
400 if (tp->urg_data & TCP_URG_VALID)
401 mask |= POLLPRI;
403 return mask;
406 int tcp_ioctl(struct sock *sk, int cmd, unsigned long arg)
408 struct tcp_sock *tp = tcp_sk(sk);
409 int answ;
411 switch (cmd) {
412 case SIOCINQ:
413 if (sk->sk_state == TCP_LISTEN)
414 return -EINVAL;
416 lock_sock(sk);
417 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
418 answ = 0;
419 else if (sock_flag(sk, SOCK_URGINLINE) ||
420 !tp->urg_data ||
421 before(tp->urg_seq, tp->copied_seq) ||
422 !before(tp->urg_seq, tp->rcv_nxt)) {
423 answ = tp->rcv_nxt - tp->copied_seq;
425 /* Subtract 1, if FIN is in queue. */
426 if (answ && !skb_queue_empty(&sk->sk_receive_queue))
427 answ -=
428 tcp_hdr((struct sk_buff *)sk->sk_receive_queue.prev)->fin;
429 } else
430 answ = tp->urg_seq - tp->copied_seq;
431 release_sock(sk);
432 break;
433 case SIOCATMARK:
434 answ = tp->urg_data && tp->urg_seq == tp->copied_seq;
435 break;
436 case SIOCOUTQ:
437 if (sk->sk_state == TCP_LISTEN)
438 return -EINVAL;
440 if ((1 << sk->sk_state) & (TCPF_SYN_SENT | TCPF_SYN_RECV))
441 answ = 0;
442 else
443 answ = tp->write_seq - tp->snd_una;
444 break;
445 default:
446 return -ENOIOCTLCMD;
449 return put_user(answ, (int __user *)arg);
452 static inline void tcp_mark_push(struct tcp_sock *tp, struct sk_buff *skb)
454 TCP_SKB_CB(skb)->flags |= TCPCB_FLAG_PSH;
455 tp->pushed_seq = tp->write_seq;
458 static inline int forced_push(struct tcp_sock *tp)
460 return after(tp->write_seq, tp->pushed_seq + (tp->max_window >> 1));
463 static inline void skb_entail(struct sock *sk, struct sk_buff *skb)
465 struct tcp_sock *tp = tcp_sk(sk);
466 struct tcp_skb_cb *tcb = TCP_SKB_CB(skb);
468 skb->csum = 0;
469 tcb->seq = tcb->end_seq = tp->write_seq;
470 tcb->flags = TCPCB_FLAG_ACK;
471 tcb->sacked = 0;
472 skb_header_release(skb);
473 tcp_add_write_queue_tail(sk, skb);
474 sk_charge_skb(sk, skb);
475 if (tp->nonagle & TCP_NAGLE_PUSH)
476 tp->nonagle &= ~TCP_NAGLE_PUSH;
479 static inline void tcp_mark_urg(struct tcp_sock *tp, int flags,
480 struct sk_buff *skb)
482 if (flags & MSG_OOB) {
483 tp->urg_mode = 1;
484 tp->snd_up = tp->write_seq;
485 TCP_SKB_CB(skb)->sacked |= TCPCB_URG;
489 static inline void tcp_push(struct sock *sk, int flags, int mss_now,
490 int nonagle)
492 struct tcp_sock *tp = tcp_sk(sk);
494 if (tcp_send_head(sk)) {
495 struct sk_buff *skb = tcp_write_queue_tail(sk);
496 if (!(flags & MSG_MORE) || forced_push(tp))
497 tcp_mark_push(tp, skb);
498 tcp_mark_urg(tp, flags, skb);
499 __tcp_push_pending_frames(sk, mss_now,
500 (flags & MSG_MORE) ? TCP_NAGLE_CORK : nonagle);
504 static ssize_t do_tcp_sendpages(struct sock *sk, struct page **pages, int poffset,
505 size_t psize, int flags)
507 struct tcp_sock *tp = tcp_sk(sk);
508 int mss_now, size_goal;
509 int err;
510 ssize_t copied;
511 long timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
513 /* Wait for a connection to finish. */
514 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
515 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
516 goto out_err;
518 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
520 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
521 size_goal = tp->xmit_size_goal;
522 copied = 0;
524 err = -EPIPE;
525 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
526 goto do_error;
528 while (psize > 0) {
529 struct sk_buff *skb = tcp_write_queue_tail(sk);
530 struct page *page = pages[poffset / PAGE_SIZE];
531 int copy, i, can_coalesce;
532 int offset = poffset % PAGE_SIZE;
533 int size = min_t(size_t, psize, PAGE_SIZE - offset);
535 if (!tcp_send_head(sk) || (copy = size_goal - skb->len) <= 0) {
536 new_segment:
537 if (!sk_stream_memory_free(sk))
538 goto wait_for_sndbuf;
540 skb = sk_stream_alloc_pskb(sk, 0, 0,
541 sk->sk_allocation);
542 if (!skb)
543 goto wait_for_memory;
545 skb_entail(sk, skb);
546 copy = size_goal;
549 if (copy > size)
550 copy = size;
552 i = skb_shinfo(skb)->nr_frags;
553 can_coalesce = skb_can_coalesce(skb, i, page, offset);
554 if (!can_coalesce && i >= MAX_SKB_FRAGS) {
555 tcp_mark_push(tp, skb);
556 goto new_segment;
558 if (!sk_stream_wmem_schedule(sk, copy))
559 goto wait_for_memory;
561 if (can_coalesce) {
562 skb_shinfo(skb)->frags[i - 1].size += copy;
563 } else {
564 get_page(page);
565 skb_fill_page_desc(skb, i, page, offset, copy);
568 skb->len += copy;
569 skb->data_len += copy;
570 skb->truesize += copy;
571 sk->sk_wmem_queued += copy;
572 sk->sk_forward_alloc -= copy;
573 skb->ip_summed = CHECKSUM_PARTIAL;
574 tp->write_seq += copy;
575 TCP_SKB_CB(skb)->end_seq += copy;
576 skb_shinfo(skb)->gso_segs = 0;
578 if (!copied)
579 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
581 copied += copy;
582 poffset += copy;
583 if (!(psize -= copy))
584 goto out;
586 if (skb->len < mss_now || (flags & MSG_OOB))
587 continue;
589 if (forced_push(tp)) {
590 tcp_mark_push(tp, skb);
591 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
592 } else if (skb == tcp_send_head(sk))
593 tcp_push_one(sk, mss_now);
594 continue;
596 wait_for_sndbuf:
597 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
598 wait_for_memory:
599 if (copied)
600 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
602 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
603 goto do_error;
605 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
606 size_goal = tp->xmit_size_goal;
609 out:
610 if (copied)
611 tcp_push(sk, flags, mss_now, tp->nonagle);
612 return copied;
614 do_error:
615 if (copied)
616 goto out;
617 out_err:
618 return sk_stream_error(sk, flags, err);
621 ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset,
622 size_t size, int flags)
624 ssize_t res;
625 struct sock *sk = sock->sk;
627 if (!(sk->sk_route_caps & NETIF_F_SG) ||
628 !(sk->sk_route_caps & NETIF_F_ALL_CSUM))
629 return sock_no_sendpage(sock, page, offset, size, flags);
631 lock_sock(sk);
632 TCP_CHECK_TIMER(sk);
633 res = do_tcp_sendpages(sk, &page, offset, size, flags);
634 TCP_CHECK_TIMER(sk);
635 release_sock(sk);
636 return res;
639 #define TCP_PAGE(sk) (sk->sk_sndmsg_page)
640 #define TCP_OFF(sk) (sk->sk_sndmsg_off)
642 static inline int select_size(struct sock *sk)
644 struct tcp_sock *tp = tcp_sk(sk);
645 int tmp = tp->mss_cache;
647 if (sk->sk_route_caps & NETIF_F_SG) {
648 if (sk_can_gso(sk))
649 tmp = 0;
650 else {
651 int pgbreak = SKB_MAX_HEAD(MAX_TCP_HEADER);
653 if (tmp >= pgbreak &&
654 tmp <= pgbreak + (MAX_SKB_FRAGS - 1) * PAGE_SIZE)
655 tmp = pgbreak;
659 return tmp;
662 int tcp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
663 size_t size)
665 struct iovec *iov;
666 struct tcp_sock *tp = tcp_sk(sk);
667 struct sk_buff *skb;
668 int iovlen, flags;
669 int mss_now, size_goal;
670 int err, copied;
671 long timeo;
673 lock_sock(sk);
674 TCP_CHECK_TIMER(sk);
676 flags = msg->msg_flags;
677 timeo = sock_sndtimeo(sk, flags & MSG_DONTWAIT);
679 /* Wait for a connection to finish. */
680 if ((1 << sk->sk_state) & ~(TCPF_ESTABLISHED | TCPF_CLOSE_WAIT))
681 if ((err = sk_stream_wait_connect(sk, &timeo)) != 0)
682 goto out_err;
684 /* This should be in poll */
685 clear_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags);
687 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
688 size_goal = tp->xmit_size_goal;
690 /* Ok commence sending. */
691 iovlen = msg->msg_iovlen;
692 iov = msg->msg_iov;
693 copied = 0;
695 err = -EPIPE;
696 if (sk->sk_err || (sk->sk_shutdown & SEND_SHUTDOWN))
697 goto do_error;
699 while (--iovlen >= 0) {
700 int seglen = iov->iov_len;
701 unsigned char __user *from = iov->iov_base;
703 iov++;
705 while (seglen > 0) {
706 int copy;
708 skb = tcp_write_queue_tail(sk);
710 if (!tcp_send_head(sk) ||
711 (copy = size_goal - skb->len) <= 0) {
713 new_segment:
714 /* Allocate new segment. If the interface is SG,
715 * allocate skb fitting to single page.
717 if (!sk_stream_memory_free(sk))
718 goto wait_for_sndbuf;
720 skb = sk_stream_alloc_pskb(sk, select_size(sk),
721 0, sk->sk_allocation);
722 if (!skb)
723 goto wait_for_memory;
726 * Check whether we can use HW checksum.
728 if (sk->sk_route_caps & NETIF_F_ALL_CSUM)
729 skb->ip_summed = CHECKSUM_PARTIAL;
731 skb_entail(sk, skb);
732 copy = size_goal;
735 /* Try to append data to the end of skb. */
736 if (copy > seglen)
737 copy = seglen;
739 /* Where to copy to? */
740 if (skb_tailroom(skb) > 0) {
741 /* We have some space in skb head. Superb! */
742 if (copy > skb_tailroom(skb))
743 copy = skb_tailroom(skb);
744 if ((err = skb_add_data(skb, from, copy)) != 0)
745 goto do_fault;
746 } else {
747 int merge = 0;
748 int i = skb_shinfo(skb)->nr_frags;
749 struct page *page = TCP_PAGE(sk);
750 int off = TCP_OFF(sk);
752 if (skb_can_coalesce(skb, i, page, off) &&
753 off != PAGE_SIZE) {
754 /* We can extend the last page
755 * fragment. */
756 merge = 1;
757 } else if (i == MAX_SKB_FRAGS ||
758 (!i &&
759 !(sk->sk_route_caps & NETIF_F_SG))) {
760 /* Need to add new fragment and cannot
761 * do this because interface is non-SG,
762 * or because all the page slots are
763 * busy. */
764 tcp_mark_push(tp, skb);
765 goto new_segment;
766 } else if (page) {
767 if (off == PAGE_SIZE) {
768 put_page(page);
769 TCP_PAGE(sk) = page = NULL;
770 off = 0;
772 } else
773 off = 0;
775 if (copy > PAGE_SIZE - off)
776 copy = PAGE_SIZE - off;
778 if (!sk_stream_wmem_schedule(sk, copy))
779 goto wait_for_memory;
781 if (!page) {
782 /* Allocate new cache page. */
783 if (!(page = sk_stream_alloc_page(sk)))
784 goto wait_for_memory;
787 /* Time to copy data. We are close to
788 * the end! */
789 err = skb_copy_to_page(sk, from, skb, page,
790 off, copy);
791 if (err) {
792 /* If this page was new, give it to the
793 * socket so it does not get leaked.
795 if (!TCP_PAGE(sk)) {
796 TCP_PAGE(sk) = page;
797 TCP_OFF(sk) = 0;
799 goto do_error;
802 /* Update the skb. */
803 if (merge) {
804 skb_shinfo(skb)->frags[i - 1].size +=
805 copy;
806 } else {
807 skb_fill_page_desc(skb, i, page, off, copy);
808 if (TCP_PAGE(sk)) {
809 get_page(page);
810 } else if (off + copy < PAGE_SIZE) {
811 get_page(page);
812 TCP_PAGE(sk) = page;
816 TCP_OFF(sk) = off + copy;
819 if (!copied)
820 TCP_SKB_CB(skb)->flags &= ~TCPCB_FLAG_PSH;
822 tp->write_seq += copy;
823 TCP_SKB_CB(skb)->end_seq += copy;
824 skb_shinfo(skb)->gso_segs = 0;
826 from += copy;
827 copied += copy;
828 if ((seglen -= copy) == 0 && iovlen == 0)
829 goto out;
831 if (skb->len < mss_now || (flags & MSG_OOB))
832 continue;
834 if (forced_push(tp)) {
835 tcp_mark_push(tp, skb);
836 __tcp_push_pending_frames(sk, mss_now, TCP_NAGLE_PUSH);
837 } else if (skb == tcp_send_head(sk))
838 tcp_push_one(sk, mss_now);
839 continue;
841 wait_for_sndbuf:
842 set_bit(SOCK_NOSPACE, &sk->sk_socket->flags);
843 wait_for_memory:
844 if (copied)
845 tcp_push(sk, flags & ~MSG_MORE, mss_now, TCP_NAGLE_PUSH);
847 if ((err = sk_stream_wait_memory(sk, &timeo)) != 0)
848 goto do_error;
850 mss_now = tcp_current_mss(sk, !(flags&MSG_OOB));
851 size_goal = tp->xmit_size_goal;
855 out:
856 if (copied)
857 tcp_push(sk, flags, mss_now, tp->nonagle);
858 TCP_CHECK_TIMER(sk);
859 release_sock(sk);
860 return copied;
862 do_fault:
863 if (!skb->len) {
864 tcp_unlink_write_queue(skb, sk);
865 /* It is the one place in all of TCP, except connection
866 * reset, where we can be unlinking the send_head.
868 tcp_check_send_head(sk, skb);
869 sk_stream_free_skb(sk, skb);
872 do_error:
873 if (copied)
874 goto out;
875 out_err:
876 err = sk_stream_error(sk, flags, err);
877 TCP_CHECK_TIMER(sk);
878 release_sock(sk);
879 return err;
883 * Handle reading urgent data. BSD has very simple semantics for
884 * this, no blocking and very strange errors 8)
887 static int tcp_recv_urg(struct sock *sk, long timeo,
888 struct msghdr *msg, int len, int flags,
889 int *addr_len)
891 struct tcp_sock *tp = tcp_sk(sk);
893 /* No URG data to read. */
894 if (sock_flag(sk, SOCK_URGINLINE) || !tp->urg_data ||
895 tp->urg_data == TCP_URG_READ)
896 return -EINVAL; /* Yes this is right ! */
898 if (sk->sk_state == TCP_CLOSE && !sock_flag(sk, SOCK_DONE))
899 return -ENOTCONN;
901 if (tp->urg_data & TCP_URG_VALID) {
902 int err = 0;
903 char c = tp->urg_data;
905 if (!(flags & MSG_PEEK))
906 tp->urg_data = TCP_URG_READ;
908 /* Read urgent data. */
909 msg->msg_flags |= MSG_OOB;
911 if (len > 0) {
912 if (!(flags & MSG_TRUNC))
913 err = memcpy_toiovec(msg->msg_iov, &c, 1);
914 len = 1;
915 } else
916 msg->msg_flags |= MSG_TRUNC;
918 return err ? -EFAULT : len;
921 if (sk->sk_state == TCP_CLOSE || (sk->sk_shutdown & RCV_SHUTDOWN))
922 return 0;
924 /* Fixed the recv(..., MSG_OOB) behaviour. BSD docs and
925 * the available implementations agree in this case:
926 * this call should never block, independent of the
927 * blocking state of the socket.
928 * Mike <pall@rz.uni-karlsruhe.de>
930 return -EAGAIN;
933 /* Clean up the receive buffer for full frames taken by the user,
934 * then send an ACK if necessary. COPIED is the number of bytes
935 * tcp_recvmsg has given to the user so far, it speeds up the
936 * calculation of whether or not we must ACK for the sake of
937 * a window update.
939 void tcp_cleanup_rbuf(struct sock *sk, int copied)
941 struct tcp_sock *tp = tcp_sk(sk);
942 int time_to_ack = 0;
944 #if TCP_DEBUG
945 struct sk_buff *skb = skb_peek(&sk->sk_receive_queue);
947 BUG_TRAP(!skb || before(tp->copied_seq, TCP_SKB_CB(skb)->end_seq));
948 #endif
950 if (inet_csk_ack_scheduled(sk)) {
951 const struct inet_connection_sock *icsk = inet_csk(sk);
952 /* Delayed ACKs frequently hit locked sockets during bulk
953 * receive. */
954 if (icsk->icsk_ack.blocked ||
955 /* Once-per-two-segments ACK was not sent by tcp_input.c */
956 tp->rcv_nxt - tp->rcv_wup > icsk->icsk_ack.rcv_mss ||
958 * If this read emptied read buffer, we send ACK, if
959 * connection is not bidirectional, user drained
960 * receive buffer and there was a small segment
961 * in queue.
963 (copied > 0 &&
964 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED2) ||
965 ((icsk->icsk_ack.pending & ICSK_ACK_PUSHED) &&
966 !icsk->icsk_ack.pingpong)) &&
967 !atomic_read(&sk->sk_rmem_alloc)))
968 time_to_ack = 1;
971 /* We send an ACK if we can now advertise a non-zero window
972 * which has been raised "significantly".
974 * Even if window raised up to infinity, do not send window open ACK
975 * in states, where we will not receive more. It is useless.
977 if (copied > 0 && !time_to_ack && !(sk->sk_shutdown & RCV_SHUTDOWN)) {
978 __u32 rcv_window_now = tcp_receive_window(tp);
980 /* Optimize, __tcp_select_window() is not cheap. */
981 if (2*rcv_window_now <= tp->window_clamp) {
982 __u32 new_window = __tcp_select_window(sk);
984 /* Send ACK now, if this read freed lots of space
985 * in our buffer. Certainly, new_window is new window.
986 * We can advertise it now, if it is not less than current one.
987 * "Lots" means "at least twice" here.
989 if (new_window && new_window >= 2 * rcv_window_now)
990 time_to_ack = 1;
993 if (time_to_ack)
994 tcp_send_ack(sk);
997 static void tcp_prequeue_process(struct sock *sk)
999 struct sk_buff *skb;
1000 struct tcp_sock *tp = tcp_sk(sk);
1002 NET_INC_STATS_USER(LINUX_MIB_TCPPREQUEUED);
1004 /* RX process wants to run with disabled BHs, though it is not
1005 * necessary */
1006 local_bh_disable();
1007 while ((skb = __skb_dequeue(&tp->ucopy.prequeue)) != NULL)
1008 sk->sk_backlog_rcv(sk, skb);
1009 local_bh_enable();
1011 /* Clear memory counter. */
1012 tp->ucopy.memory = 0;
1015 static inline struct sk_buff *tcp_recv_skb(struct sock *sk, u32 seq, u32 *off)
1017 struct sk_buff *skb;
1018 u32 offset;
1020 skb_queue_walk(&sk->sk_receive_queue, skb) {
1021 offset = seq - TCP_SKB_CB(skb)->seq;
1022 if (tcp_hdr(skb)->syn)
1023 offset--;
1024 if (offset < skb->len || tcp_hdr(skb)->fin) {
1025 *off = offset;
1026 return skb;
1029 return NULL;
1033 * This routine provides an alternative to tcp_recvmsg() for routines
1034 * that would like to handle copying from skbuffs directly in 'sendfile'
1035 * fashion.
1036 * Note:
1037 * - It is assumed that the socket was locked by the caller.
1038 * - The routine does not block.
1039 * - At present, there is no support for reading OOB data
1040 * or for 'peeking' the socket using this routine
1041 * (although both would be easy to implement).
1043 int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
1044 sk_read_actor_t recv_actor)
1046 struct sk_buff *skb;
1047 struct tcp_sock *tp = tcp_sk(sk);
1048 u32 seq = tp->copied_seq;
1049 u32 offset;
1050 int copied = 0;
1052 if (sk->sk_state == TCP_LISTEN)
1053 return -ENOTCONN;
1054 while ((skb = tcp_recv_skb(sk, seq, &offset)) != NULL) {
1055 if (offset < skb->len) {
1056 size_t used, len;
1058 len = skb->len - offset;
1059 /* Stop reading if we hit a patch of urgent data */
1060 if (tp->urg_data) {
1061 u32 urg_offset = tp->urg_seq - seq;
1062 if (urg_offset < len)
1063 len = urg_offset;
1064 if (!len)
1065 break;
1067 used = recv_actor(desc, skb, offset, len);
1068 if (used <= len) {
1069 seq += used;
1070 copied += used;
1071 offset += used;
1073 if (offset != skb->len)
1074 break;
1076 if (tcp_hdr(skb)->fin) {
1077 sk_eat_skb(sk, skb, 0);
1078 ++seq;
1079 break;
1081 sk_eat_skb(sk, skb, 0);
1082 if (!desc->count)
1083 break;
1085 tp->copied_seq = seq;
1087 tcp_rcv_space_adjust(sk);
1089 /* Clean up data we have read: This will do ACK frames. */
1090 if (copied)
1091 tcp_cleanup_rbuf(sk, copied);
1092 return copied;
1096 * This routine copies from a sock struct into the user buffer.
1098 * Technical note: in 2.3 we work on _locked_ socket, so that
1099 * tricks with *seq access order and skb->users are not required.
1100 * Probably, code can be easily improved even more.
1103 int tcp_recvmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
1104 size_t len, int nonblock, int flags, int *addr_len)
1106 struct tcp_sock *tp = tcp_sk(sk);
1107 int copied = 0;
1108 u32 peek_seq;
1109 u32 *seq;
1110 unsigned long used;
1111 int err;
1112 int target; /* Read at least this many bytes */
1113 long timeo;
1114 struct task_struct *user_recv = NULL;
1115 int copied_early = 0;
1117 lock_sock(sk);
1119 TCP_CHECK_TIMER(sk);
1121 err = -ENOTCONN;
1122 if (sk->sk_state == TCP_LISTEN)
1123 goto out;
1125 timeo = sock_rcvtimeo(sk, nonblock);
1127 /* Urgent data needs to be handled specially. */
1128 if (flags & MSG_OOB)
1129 goto recv_urg;
1131 seq = &tp->copied_seq;
1132 if (flags & MSG_PEEK) {
1133 peek_seq = tp->copied_seq;
1134 seq = &peek_seq;
1137 target = sock_rcvlowat(sk, flags & MSG_WAITALL, len);
1139 #ifdef CONFIG_NET_DMA
1140 tp->ucopy.dma_chan = NULL;
1141 preempt_disable();
1142 if ((len > sysctl_tcp_dma_copybreak) && !(flags & MSG_PEEK) &&
1143 !sysctl_tcp_low_latency && __get_cpu_var(softnet_data).net_dma) {
1144 preempt_enable_no_resched();
1145 tp->ucopy.pinned_list = dma_pin_iovec_pages(msg->msg_iov, len);
1146 } else
1147 preempt_enable_no_resched();
1148 #endif
1150 do {
1151 struct sk_buff *skb;
1152 u32 offset;
1154 /* Are we at urgent data? Stop if we have read anything or have SIGURG pending. */
1155 if (tp->urg_data && tp->urg_seq == *seq) {
1156 if (copied)
1157 break;
1158 if (signal_pending(current)) {
1159 copied = timeo ? sock_intr_errno(timeo) : -EAGAIN;
1160 break;
1164 /* Next get a buffer. */
1166 skb = skb_peek(&sk->sk_receive_queue);
1167 do {
1168 if (!skb)
1169 break;
1171 /* Now that we have two receive queues this
1172 * shouldn't happen.
1174 if (before(*seq, TCP_SKB_CB(skb)->seq)) {
1175 printk(KERN_INFO "recvmsg bug: copied %X "
1176 "seq %X\n", *seq, TCP_SKB_CB(skb)->seq);
1177 break;
1179 offset = *seq - TCP_SKB_CB(skb)->seq;
1180 if (tcp_hdr(skb)->syn)
1181 offset--;
1182 if (offset < skb->len)
1183 goto found_ok_skb;
1184 if (tcp_hdr(skb)->fin)
1185 goto found_fin_ok;
1186 BUG_TRAP(flags & MSG_PEEK);
1187 skb = skb->next;
1188 } while (skb != (struct sk_buff *)&sk->sk_receive_queue);
1190 /* Well, if we have backlog, try to process it now yet. */
1192 if (copied >= target && !sk->sk_backlog.tail)
1193 break;
1195 if (copied) {
1196 if (sk->sk_err ||
1197 sk->sk_state == TCP_CLOSE ||
1198 (sk->sk_shutdown & RCV_SHUTDOWN) ||
1199 !timeo ||
1200 signal_pending(current) ||
1201 (flags & MSG_PEEK))
1202 break;
1203 } else {
1204 if (sock_flag(sk, SOCK_DONE))
1205 break;
1207 if (sk->sk_err) {
1208 copied = sock_error(sk);
1209 break;
1212 if (sk->sk_shutdown & RCV_SHUTDOWN)
1213 break;
1215 if (sk->sk_state == TCP_CLOSE) {
1216 if (!sock_flag(sk, SOCK_DONE)) {
1217 /* This occurs when user tries to read
1218 * from never connected socket.
1220 copied = -ENOTCONN;
1221 break;
1223 break;
1226 if (!timeo) {
1227 copied = -EAGAIN;
1228 break;
1231 if (signal_pending(current)) {
1232 copied = sock_intr_errno(timeo);
1233 break;
1237 tcp_cleanup_rbuf(sk, copied);
1239 if (!sysctl_tcp_low_latency && tp->ucopy.task == user_recv) {
1240 /* Install new reader */
1241 if (!user_recv && !(flags & (MSG_TRUNC | MSG_PEEK))) {
1242 user_recv = current;
1243 tp->ucopy.task = user_recv;
1244 tp->ucopy.iov = msg->msg_iov;
1247 tp->ucopy.len = len;
1249 BUG_TRAP(tp->copied_seq == tp->rcv_nxt ||
1250 (flags & (MSG_PEEK | MSG_TRUNC)));
1252 /* Ugly... If prequeue is not empty, we have to
1253 * process it before releasing socket, otherwise
1254 * order will be broken at second iteration.
1255 * More elegant solution is required!!!
1257 * Look: we have the following (pseudo)queues:
1259 * 1. packets in flight
1260 * 2. backlog
1261 * 3. prequeue
1262 * 4. receive_queue
1264 * Each queue can be processed only if the next ones
1265 * are empty. At this point we have empty receive_queue.
1266 * But prequeue _can_ be not empty after 2nd iteration,
1267 * when we jumped to start of loop because backlog
1268 * processing added something to receive_queue.
1269 * We cannot release_sock(), because backlog contains
1270 * packets arrived _after_ prequeued ones.
1272 * Shortly, algorithm is clear --- to process all
1273 * the queues in order. We could make it more directly,
1274 * requeueing packets from backlog to prequeue, if
1275 * is not empty. It is more elegant, but eats cycles,
1276 * unfortunately.
1278 if (!skb_queue_empty(&tp->ucopy.prequeue))
1279 goto do_prequeue;
1281 /* __ Set realtime policy in scheduler __ */
1284 if (copied >= target) {
1285 /* Do not sleep, just process backlog. */
1286 release_sock(sk);
1287 lock_sock(sk);
1288 } else
1289 sk_wait_data(sk, &timeo);
1291 #ifdef CONFIG_NET_DMA
1292 tp->ucopy.wakeup = 0;
1293 #endif
1295 if (user_recv) {
1296 int chunk;
1298 /* __ Restore normal policy in scheduler __ */
1300 if ((chunk = len - tp->ucopy.len) != 0) {
1301 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMBACKLOG, chunk);
1302 len -= chunk;
1303 copied += chunk;
1306 if (tp->rcv_nxt == tp->copied_seq &&
1307 !skb_queue_empty(&tp->ucopy.prequeue)) {
1308 do_prequeue:
1309 tcp_prequeue_process(sk);
1311 if ((chunk = len - tp->ucopy.len) != 0) {
1312 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1313 len -= chunk;
1314 copied += chunk;
1318 if ((flags & MSG_PEEK) && peek_seq != tp->copied_seq) {
1319 if (net_ratelimit())
1320 printk(KERN_DEBUG "TCP(%s:%d): Application bug, race in MSG_PEEK.\n",
1321 current->comm, current->pid);
1322 peek_seq = tp->copied_seq;
1324 continue;
1326 found_ok_skb:
1327 /* Ok so how much can we use? */
1328 used = skb->len - offset;
1329 if (len < used)
1330 used = len;
1332 /* Do we have urgent data here? */
1333 if (tp->urg_data) {
1334 u32 urg_offset = tp->urg_seq - *seq;
1335 if (urg_offset < used) {
1336 if (!urg_offset) {
1337 if (!sock_flag(sk, SOCK_URGINLINE)) {
1338 ++*seq;
1339 offset++;
1340 used--;
1341 if (!used)
1342 goto skip_copy;
1344 } else
1345 used = urg_offset;
1349 if (!(flags & MSG_TRUNC)) {
1350 #ifdef CONFIG_NET_DMA
1351 if (!tp->ucopy.dma_chan && tp->ucopy.pinned_list)
1352 tp->ucopy.dma_chan = get_softnet_dma();
1354 if (tp->ucopy.dma_chan) {
1355 tp->ucopy.dma_cookie = dma_skb_copy_datagram_iovec(
1356 tp->ucopy.dma_chan, skb, offset,
1357 msg->msg_iov, used,
1358 tp->ucopy.pinned_list);
1360 if (tp->ucopy.dma_cookie < 0) {
1362 printk(KERN_ALERT "dma_cookie < 0\n");
1364 /* Exception. Bailout! */
1365 if (!copied)
1366 copied = -EFAULT;
1367 break;
1369 if ((offset + used) == skb->len)
1370 copied_early = 1;
1372 } else
1373 #endif
1375 err = skb_copy_datagram_iovec(skb, offset,
1376 msg->msg_iov, used);
1377 if (err) {
1378 /* Exception. Bailout! */
1379 if (!copied)
1380 copied = -EFAULT;
1381 break;
1386 *seq += used;
1387 copied += used;
1388 len -= used;
1390 tcp_rcv_space_adjust(sk);
1392 skip_copy:
1393 if (tp->urg_data && after(tp->copied_seq, tp->urg_seq)) {
1394 tp->urg_data = 0;
1395 tcp_fast_path_check(sk);
1397 if (used + offset < skb->len)
1398 continue;
1400 if (tcp_hdr(skb)->fin)
1401 goto found_fin_ok;
1402 if (!(flags & MSG_PEEK)) {
1403 sk_eat_skb(sk, skb, copied_early);
1404 copied_early = 0;
1406 continue;
1408 found_fin_ok:
1409 /* Process the FIN. */
1410 ++*seq;
1411 if (!(flags & MSG_PEEK)) {
1412 sk_eat_skb(sk, skb, copied_early);
1413 copied_early = 0;
1415 break;
1416 } while (len > 0);
1418 if (user_recv) {
1419 if (!skb_queue_empty(&tp->ucopy.prequeue)) {
1420 int chunk;
1422 tp->ucopy.len = copied > 0 ? len : 0;
1424 tcp_prequeue_process(sk);
1426 if (copied > 0 && (chunk = len - tp->ucopy.len) != 0) {
1427 NET_ADD_STATS_USER(LINUX_MIB_TCPDIRECTCOPYFROMPREQUEUE, chunk);
1428 len -= chunk;
1429 copied += chunk;
1433 tp->ucopy.task = NULL;
1434 tp->ucopy.len = 0;
1437 #ifdef CONFIG_NET_DMA
1438 if (tp->ucopy.dma_chan) {
1439 struct sk_buff *skb;
1440 dma_cookie_t done, used;
1442 dma_async_memcpy_issue_pending(tp->ucopy.dma_chan);
1444 while (dma_async_memcpy_complete(tp->ucopy.dma_chan,
1445 tp->ucopy.dma_cookie, &done,
1446 &used) == DMA_IN_PROGRESS) {
1447 /* do partial cleanup of sk_async_wait_queue */
1448 while ((skb = skb_peek(&sk->sk_async_wait_queue)) &&
1449 (dma_async_is_complete(skb->dma_cookie, done,
1450 used) == DMA_SUCCESS)) {
1451 __skb_dequeue(&sk->sk_async_wait_queue);
1452 kfree_skb(skb);
1456 /* Safe to free early-copied skbs now */
1457 __skb_queue_purge(&sk->sk_async_wait_queue);
1458 dma_chan_put(tp->ucopy.dma_chan);
1459 tp->ucopy.dma_chan = NULL;
1461 if (tp->ucopy.pinned_list) {
1462 dma_unpin_iovec_pages(tp->ucopy.pinned_list);
1463 tp->ucopy.pinned_list = NULL;
1465 #endif
1467 /* According to UNIX98, msg_name/msg_namelen are ignored
1468 * on connected socket. I was just happy when found this 8) --ANK
1471 /* Clean up data we have read: This will do ACK frames. */
1472 tcp_cleanup_rbuf(sk, copied);
1474 TCP_CHECK_TIMER(sk);
1475 release_sock(sk);
1476 return copied;
1478 out:
1479 TCP_CHECK_TIMER(sk);
1480 release_sock(sk);
1481 return err;
1483 recv_urg:
1484 err = tcp_recv_urg(sk, timeo, msg, len, flags, addr_len);
1485 goto out;
1489 * State processing on a close. This implements the state shift for
1490 * sending our FIN frame. Note that we only send a FIN for some
1491 * states. A shutdown() may have already sent the FIN, or we may be
1492 * closed.
1495 static const unsigned char new_state[16] = {
1496 /* current state: new state: action: */
1497 /* (Invalid) */ TCP_CLOSE,
1498 /* TCP_ESTABLISHED */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1499 /* TCP_SYN_SENT */ TCP_CLOSE,
1500 /* TCP_SYN_RECV */ TCP_FIN_WAIT1 | TCP_ACTION_FIN,
1501 /* TCP_FIN_WAIT1 */ TCP_FIN_WAIT1,
1502 /* TCP_FIN_WAIT2 */ TCP_FIN_WAIT2,
1503 /* TCP_TIME_WAIT */ TCP_CLOSE,
1504 /* TCP_CLOSE */ TCP_CLOSE,
1505 /* TCP_CLOSE_WAIT */ TCP_LAST_ACK | TCP_ACTION_FIN,
1506 /* TCP_LAST_ACK */ TCP_LAST_ACK,
1507 /* TCP_LISTEN */ TCP_CLOSE,
1508 /* TCP_CLOSING */ TCP_CLOSING,
1511 static int tcp_close_state(struct sock *sk)
1513 int next = (int)new_state[sk->sk_state];
1514 int ns = next & TCP_STATE_MASK;
1516 tcp_set_state(sk, ns);
1518 return next & TCP_ACTION_FIN;
1522 * Shutdown the sending side of a connection. Much like close except
1523 * that we don't receive shut down or set_sock_flag(sk, SOCK_DEAD).
1526 void tcp_shutdown(struct sock *sk, int how)
1528 /* We need to grab some memory, and put together a FIN,
1529 * and then put it into the queue to be sent.
1530 * Tim MacKenzie(tym@dibbler.cs.monash.edu.au) 4 Dec '92.
1532 if (!(how & SEND_SHUTDOWN))
1533 return;
1535 /* If we've already sent a FIN, or it's a closed state, skip this. */
1536 if ((1 << sk->sk_state) &
1537 (TCPF_ESTABLISHED | TCPF_SYN_SENT |
1538 TCPF_SYN_RECV | TCPF_CLOSE_WAIT)) {
1539 /* Clear out any half completed packets. FIN if needed. */
1540 if (tcp_close_state(sk))
1541 tcp_send_fin(sk);
1545 void tcp_close(struct sock *sk, long timeout)
1547 struct sk_buff *skb;
1548 int data_was_unread = 0;
1549 int state;
1551 lock_sock(sk);
1552 sk->sk_shutdown = SHUTDOWN_MASK;
1554 if (sk->sk_state == TCP_LISTEN) {
1555 tcp_set_state(sk, TCP_CLOSE);
1557 /* Special case. */
1558 inet_csk_listen_stop(sk);
1560 goto adjudge_to_death;
1563 /* We need to flush the recv. buffs. We do this only on the
1564 * descriptor close, not protocol-sourced closes, because the
1565 * reader process may not have drained the data yet!
1567 while ((skb = __skb_dequeue(&sk->sk_receive_queue)) != NULL) {
1568 u32 len = TCP_SKB_CB(skb)->end_seq - TCP_SKB_CB(skb)->seq -
1569 tcp_hdr(skb)->fin;
1570 data_was_unread += len;
1571 __kfree_skb(skb);
1574 sk_stream_mem_reclaim(sk);
1576 /* As outlined in RFC 2525, section 2.17, we send a RST here because
1577 * data was lost. To witness the awful effects of the old behavior of
1578 * always doing a FIN, run an older 2.1.x kernel or 2.0.x, start a bulk
1579 * GET in an FTP client, suspend the process, wait for the client to
1580 * advertise a zero window, then kill -9 the FTP client, wheee...
1581 * Note: timeout is always zero in such a case.
1583 if (data_was_unread) {
1584 /* Unread data was tossed, zap the connection. */
1585 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONCLOSE);
1586 tcp_set_state(sk, TCP_CLOSE);
1587 tcp_send_active_reset(sk, GFP_KERNEL);
1588 } else if (sock_flag(sk, SOCK_LINGER) && !sk->sk_lingertime) {
1589 /* Check zero linger _after_ checking for unread data. */
1590 sk->sk_prot->disconnect(sk, 0);
1591 NET_INC_STATS_USER(LINUX_MIB_TCPABORTONDATA);
1592 } else if (tcp_close_state(sk)) {
1593 /* We FIN if the application ate all the data before
1594 * zapping the connection.
1597 /* RED-PEN. Formally speaking, we have broken TCP state
1598 * machine. State transitions:
1600 * TCP_ESTABLISHED -> TCP_FIN_WAIT1
1601 * TCP_SYN_RECV -> TCP_FIN_WAIT1 (forget it, it's impossible)
1602 * TCP_CLOSE_WAIT -> TCP_LAST_ACK
1604 * are legal only when FIN has been sent (i.e. in window),
1605 * rather than queued out of window. Purists blame.
1607 * F.e. "RFC state" is ESTABLISHED,
1608 * if Linux state is FIN-WAIT-1, but FIN is still not sent.
1610 * The visible declinations are that sometimes
1611 * we enter time-wait state, when it is not required really
1612 * (harmless), do not send active resets, when they are
1613 * required by specs (TCP_ESTABLISHED, TCP_CLOSE_WAIT, when
1614 * they look as CLOSING or LAST_ACK for Linux)
1615 * Probably, I missed some more holelets.
1616 * --ANK
1618 tcp_send_fin(sk);
1621 sk_stream_wait_close(sk, timeout);
1623 adjudge_to_death:
1624 state = sk->sk_state;
1625 sock_hold(sk);
1626 sock_orphan(sk);
1627 atomic_inc(sk->sk_prot->orphan_count);
1629 /* It is the last release_sock in its life. It will remove backlog. */
1630 release_sock(sk);
1633 /* Now socket is owned by kernel and we acquire BH lock
1634 to finish close. No need to check for user refs.
1636 local_bh_disable();
1637 bh_lock_sock(sk);
1638 BUG_TRAP(!sock_owned_by_user(sk));
1640 /* Have we already been destroyed by a softirq or backlog? */
1641 if (state != TCP_CLOSE && sk->sk_state == TCP_CLOSE)
1642 goto out;
1644 /* This is a (useful) BSD violating of the RFC. There is a
1645 * problem with TCP as specified in that the other end could
1646 * keep a socket open forever with no application left this end.
1647 * We use a 3 minute timeout (about the same as BSD) then kill
1648 * our end. If they send after that then tough - BUT: long enough
1649 * that we won't make the old 4*rto = almost no time - whoops
1650 * reset mistake.
1652 * Nope, it was not mistake. It is really desired behaviour
1653 * f.e. on http servers, when such sockets are useless, but
1654 * consume significant resources. Let's do it with special
1655 * linger2 option. --ANK
1658 if (sk->sk_state == TCP_FIN_WAIT2) {
1659 struct tcp_sock *tp = tcp_sk(sk);
1660 if (tp->linger2 < 0) {
1661 tcp_set_state(sk, TCP_CLOSE);
1662 tcp_send_active_reset(sk, GFP_ATOMIC);
1663 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONLINGER);
1664 } else {
1665 const int tmo = tcp_fin_time(sk);
1667 if (tmo > TCP_TIMEWAIT_LEN) {
1668 inet_csk_reset_keepalive_timer(sk,
1669 tmo - TCP_TIMEWAIT_LEN);
1670 } else {
1671 tcp_time_wait(sk, TCP_FIN_WAIT2, tmo);
1672 goto out;
1676 if (sk->sk_state != TCP_CLOSE) {
1677 sk_stream_mem_reclaim(sk);
1678 if (atomic_read(sk->sk_prot->orphan_count) > sysctl_tcp_max_orphans ||
1679 (sk->sk_wmem_queued > SOCK_MIN_SNDBUF &&
1680 atomic_read(&tcp_memory_allocated) > sysctl_tcp_mem[2])) {
1681 if (net_ratelimit())
1682 printk(KERN_INFO "TCP: too many of orphaned "
1683 "sockets\n");
1684 tcp_set_state(sk, TCP_CLOSE);
1685 tcp_send_active_reset(sk, GFP_ATOMIC);
1686 NET_INC_STATS_BH(LINUX_MIB_TCPABORTONMEMORY);
1690 if (sk->sk_state == TCP_CLOSE)
1691 inet_csk_destroy_sock(sk);
1692 /* Otherwise, socket is reprieved until protocol close. */
1694 out:
1695 bh_unlock_sock(sk);
1696 local_bh_enable();
1697 sock_put(sk);
1700 /* These states need RST on ABORT according to RFC793 */
1702 static inline int tcp_need_reset(int state)
1704 return (1 << state) &
1705 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT | TCPF_FIN_WAIT1 |
1706 TCPF_FIN_WAIT2 | TCPF_SYN_RECV);
1709 int tcp_disconnect(struct sock *sk, int flags)
1711 struct inet_sock *inet = inet_sk(sk);
1712 struct inet_connection_sock *icsk = inet_csk(sk);
1713 struct tcp_sock *tp = tcp_sk(sk);
1714 int err = 0;
1715 int old_state = sk->sk_state;
1717 if (old_state != TCP_CLOSE)
1718 tcp_set_state(sk, TCP_CLOSE);
1720 /* ABORT function of RFC793 */
1721 if (old_state == TCP_LISTEN) {
1722 inet_csk_listen_stop(sk);
1723 } else if (tcp_need_reset(old_state) ||
1724 (tp->snd_nxt != tp->write_seq &&
1725 (1 << old_state) & (TCPF_CLOSING | TCPF_LAST_ACK))) {
1726 /* The last check adjusts for discrepancy of Linux wrt. RFC
1727 * states
1729 tcp_send_active_reset(sk, gfp_any());
1730 sk->sk_err = ECONNRESET;
1731 } else if (old_state == TCP_SYN_SENT)
1732 sk->sk_err = ECONNRESET;
1734 tcp_clear_xmit_timers(sk);
1735 __skb_queue_purge(&sk->sk_receive_queue);
1736 tcp_write_queue_purge(sk);
1737 __skb_queue_purge(&tp->out_of_order_queue);
1738 #ifdef CONFIG_NET_DMA
1739 __skb_queue_purge(&sk->sk_async_wait_queue);
1740 #endif
1742 inet->dport = 0;
1744 if (!(sk->sk_userlocks & SOCK_BINDADDR_LOCK))
1745 inet_reset_saddr(sk);
1747 sk->sk_shutdown = 0;
1748 sock_reset_flag(sk, SOCK_DONE);
1749 tp->srtt = 0;
1750 if ((tp->write_seq += tp->max_window + 2) == 0)
1751 tp->write_seq = 1;
1752 icsk->icsk_backoff = 0;
1753 tp->snd_cwnd = 2;
1754 icsk->icsk_probes_out = 0;
1755 tp->packets_out = 0;
1756 tp->snd_ssthresh = 0x7fffffff;
1757 tp->snd_cwnd_cnt = 0;
1758 tp->bytes_acked = 0;
1759 tcp_set_ca_state(sk, TCP_CA_Open);
1760 tcp_clear_retrans(tp);
1761 inet_csk_delack_init(sk);
1762 tcp_init_send_head(sk);
1763 tp->rx_opt.saw_tstamp = 0;
1764 tcp_sack_reset(&tp->rx_opt);
1765 __sk_dst_reset(sk);
1767 BUG_TRAP(!inet->num || icsk->icsk_bind_hash);
1769 sk->sk_error_report(sk);
1770 return err;
1774 * Socket option code for TCP.
1776 static int do_tcp_setsockopt(struct sock *sk, int level,
1777 int optname, char __user *optval, int optlen)
1779 struct tcp_sock *tp = tcp_sk(sk);
1780 struct inet_connection_sock *icsk = inet_csk(sk);
1781 int val;
1782 int err = 0;
1784 /* This is a string value all the others are int's */
1785 if (optname == TCP_CONGESTION) {
1786 char name[TCP_CA_NAME_MAX];
1788 if (optlen < 1)
1789 return -EINVAL;
1791 val = strncpy_from_user(name, optval,
1792 min(TCP_CA_NAME_MAX-1, optlen));
1793 if (val < 0)
1794 return -EFAULT;
1795 name[val] = 0;
1797 lock_sock(sk);
1798 err = tcp_set_congestion_control(sk, name);
1799 release_sock(sk);
1800 return err;
1803 if (optlen < sizeof(int))
1804 return -EINVAL;
1806 if (get_user(val, (int __user *)optval))
1807 return -EFAULT;
1809 lock_sock(sk);
1811 switch (optname) {
1812 case TCP_MAXSEG:
1813 /* Values greater than interface MTU won't take effect. However
1814 * at the point when this call is done we typically don't yet
1815 * know which interface is going to be used */
1816 if (val < 8 || val > MAX_TCP_WINDOW) {
1817 err = -EINVAL;
1818 break;
1820 tp->rx_opt.user_mss = val;
1821 break;
1823 case TCP_NODELAY:
1824 if (val) {
1825 /* TCP_NODELAY is weaker than TCP_CORK, so that
1826 * this option on corked socket is remembered, but
1827 * it is not activated until cork is cleared.
1829 * However, when TCP_NODELAY is set we make
1830 * an explicit push, which overrides even TCP_CORK
1831 * for currently queued segments.
1833 tp->nonagle |= TCP_NAGLE_OFF|TCP_NAGLE_PUSH;
1834 tcp_push_pending_frames(sk);
1835 } else {
1836 tp->nonagle &= ~TCP_NAGLE_OFF;
1838 break;
1840 case TCP_CORK:
1841 /* When set indicates to always queue non-full frames.
1842 * Later the user clears this option and we transmit
1843 * any pending partial frames in the queue. This is
1844 * meant to be used alongside sendfile() to get properly
1845 * filled frames when the user (for example) must write
1846 * out headers with a write() call first and then use
1847 * sendfile to send out the data parts.
1849 * TCP_CORK can be set together with TCP_NODELAY and it is
1850 * stronger than TCP_NODELAY.
1852 if (val) {
1853 tp->nonagle |= TCP_NAGLE_CORK;
1854 } else {
1855 tp->nonagle &= ~TCP_NAGLE_CORK;
1856 if (tp->nonagle&TCP_NAGLE_OFF)
1857 tp->nonagle |= TCP_NAGLE_PUSH;
1858 tcp_push_pending_frames(sk);
1860 break;
1862 case TCP_KEEPIDLE:
1863 if (val < 1 || val > MAX_TCP_KEEPIDLE)
1864 err = -EINVAL;
1865 else {
1866 tp->keepalive_time = val * HZ;
1867 if (sock_flag(sk, SOCK_KEEPOPEN) &&
1868 !((1 << sk->sk_state) &
1869 (TCPF_CLOSE | TCPF_LISTEN))) {
1870 __u32 elapsed = tcp_time_stamp - tp->rcv_tstamp;
1871 if (tp->keepalive_time > elapsed)
1872 elapsed = tp->keepalive_time - elapsed;
1873 else
1874 elapsed = 0;
1875 inet_csk_reset_keepalive_timer(sk, elapsed);
1878 break;
1879 case TCP_KEEPINTVL:
1880 if (val < 1 || val > MAX_TCP_KEEPINTVL)
1881 err = -EINVAL;
1882 else
1883 tp->keepalive_intvl = val * HZ;
1884 break;
1885 case TCP_KEEPCNT:
1886 if (val < 1 || val > MAX_TCP_KEEPCNT)
1887 err = -EINVAL;
1888 else
1889 tp->keepalive_probes = val;
1890 break;
1891 case TCP_SYNCNT:
1892 if (val < 1 || val > MAX_TCP_SYNCNT)
1893 err = -EINVAL;
1894 else
1895 icsk->icsk_syn_retries = val;
1896 break;
1898 case TCP_LINGER2:
1899 if (val < 0)
1900 tp->linger2 = -1;
1901 else if (val > sysctl_tcp_fin_timeout / HZ)
1902 tp->linger2 = 0;
1903 else
1904 tp->linger2 = val * HZ;
1905 break;
1907 case TCP_DEFER_ACCEPT:
1908 icsk->icsk_accept_queue.rskq_defer_accept = 0;
1909 if (val > 0) {
1910 /* Translate value in seconds to number of
1911 * retransmits */
1912 while (icsk->icsk_accept_queue.rskq_defer_accept < 32 &&
1913 val > ((TCP_TIMEOUT_INIT / HZ) <<
1914 icsk->icsk_accept_queue.rskq_defer_accept))
1915 icsk->icsk_accept_queue.rskq_defer_accept++;
1916 icsk->icsk_accept_queue.rskq_defer_accept++;
1918 break;
1920 case TCP_WINDOW_CLAMP:
1921 if (!val) {
1922 if (sk->sk_state != TCP_CLOSE) {
1923 err = -EINVAL;
1924 break;
1926 tp->window_clamp = 0;
1927 } else
1928 tp->window_clamp = val < SOCK_MIN_RCVBUF / 2 ?
1929 SOCK_MIN_RCVBUF / 2 : val;
1930 break;
1932 case TCP_QUICKACK:
1933 if (!val) {
1934 icsk->icsk_ack.pingpong = 1;
1935 } else {
1936 icsk->icsk_ack.pingpong = 0;
1937 if ((1 << sk->sk_state) &
1938 (TCPF_ESTABLISHED | TCPF_CLOSE_WAIT) &&
1939 inet_csk_ack_scheduled(sk)) {
1940 icsk->icsk_ack.pending |= ICSK_ACK_PUSHED;
1941 tcp_cleanup_rbuf(sk, 1);
1942 if (!(val & 1))
1943 icsk->icsk_ack.pingpong = 1;
1946 break;
1948 #ifdef CONFIG_TCP_MD5SIG
1949 case TCP_MD5SIG:
1950 /* Read the IP->Key mappings from userspace */
1951 err = tp->af_specific->md5_parse(sk, optval, optlen);
1952 break;
1953 #endif
1955 default:
1956 err = -ENOPROTOOPT;
1957 break;
1960 release_sock(sk);
1961 return err;
1964 int tcp_setsockopt(struct sock *sk, int level, int optname, char __user *optval,
1965 int optlen)
1967 struct inet_connection_sock *icsk = inet_csk(sk);
1969 if (level != SOL_TCP)
1970 return icsk->icsk_af_ops->setsockopt(sk, level, optname,
1971 optval, optlen);
1972 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1975 #ifdef CONFIG_COMPAT
1976 int compat_tcp_setsockopt(struct sock *sk, int level, int optname,
1977 char __user *optval, int optlen)
1979 if (level != SOL_TCP)
1980 return inet_csk_compat_setsockopt(sk, level, optname,
1981 optval, optlen);
1982 return do_tcp_setsockopt(sk, level, optname, optval, optlen);
1985 EXPORT_SYMBOL(compat_tcp_setsockopt);
1986 #endif
1988 /* Return information about state of tcp endpoint in API format. */
1989 void tcp_get_info(struct sock *sk, struct tcp_info *info)
1991 struct tcp_sock *tp = tcp_sk(sk);
1992 const struct inet_connection_sock *icsk = inet_csk(sk);
1993 u32 now = tcp_time_stamp;
1995 memset(info, 0, sizeof(*info));
1997 info->tcpi_state = sk->sk_state;
1998 info->tcpi_ca_state = icsk->icsk_ca_state;
1999 info->tcpi_retransmits = icsk->icsk_retransmits;
2000 info->tcpi_probes = icsk->icsk_probes_out;
2001 info->tcpi_backoff = icsk->icsk_backoff;
2003 if (tp->rx_opt.tstamp_ok)
2004 info->tcpi_options |= TCPI_OPT_TIMESTAMPS;
2005 if (tp->rx_opt.sack_ok)
2006 info->tcpi_options |= TCPI_OPT_SACK;
2007 if (tp->rx_opt.wscale_ok) {
2008 info->tcpi_options |= TCPI_OPT_WSCALE;
2009 info->tcpi_snd_wscale = tp->rx_opt.snd_wscale;
2010 info->tcpi_rcv_wscale = tp->rx_opt.rcv_wscale;
2013 if (tp->ecn_flags&TCP_ECN_OK)
2014 info->tcpi_options |= TCPI_OPT_ECN;
2016 info->tcpi_rto = jiffies_to_usecs(icsk->icsk_rto);
2017 info->tcpi_ato = jiffies_to_usecs(icsk->icsk_ack.ato);
2018 info->tcpi_snd_mss = tp->mss_cache;
2019 info->tcpi_rcv_mss = icsk->icsk_ack.rcv_mss;
2021 info->tcpi_unacked = tp->packets_out;
2022 info->tcpi_sacked = tp->sacked_out;
2023 info->tcpi_lost = tp->lost_out;
2024 info->tcpi_retrans = tp->retrans_out;
2025 info->tcpi_fackets = tp->fackets_out;
2027 info->tcpi_last_data_sent = jiffies_to_msecs(now - tp->lsndtime);
2028 info->tcpi_last_data_recv = jiffies_to_msecs(now - icsk->icsk_ack.lrcvtime);
2029 info->tcpi_last_ack_recv = jiffies_to_msecs(now - tp->rcv_tstamp);
2031 info->tcpi_pmtu = icsk->icsk_pmtu_cookie;
2032 info->tcpi_rcv_ssthresh = tp->rcv_ssthresh;
2033 info->tcpi_rtt = jiffies_to_usecs(tp->srtt)>>3;
2034 info->tcpi_rttvar = jiffies_to_usecs(tp->mdev)>>2;
2035 info->tcpi_snd_ssthresh = tp->snd_ssthresh;
2036 info->tcpi_snd_cwnd = tp->snd_cwnd;
2037 info->tcpi_advmss = tp->advmss;
2038 info->tcpi_reordering = tp->reordering;
2040 info->tcpi_rcv_rtt = jiffies_to_usecs(tp->rcv_rtt_est.rtt)>>3;
2041 info->tcpi_rcv_space = tp->rcvq_space.space;
2043 info->tcpi_total_retrans = tp->total_retrans;
2046 EXPORT_SYMBOL_GPL(tcp_get_info);
2048 static int do_tcp_getsockopt(struct sock *sk, int level,
2049 int optname, char __user *optval, int __user *optlen)
2051 struct inet_connection_sock *icsk = inet_csk(sk);
2052 struct tcp_sock *tp = tcp_sk(sk);
2053 int val, len;
2055 if (get_user(len, optlen))
2056 return -EFAULT;
2058 len = min_t(unsigned int, len, sizeof(int));
2060 if (len < 0)
2061 return -EINVAL;
2063 switch (optname) {
2064 case TCP_MAXSEG:
2065 val = tp->mss_cache;
2066 if (!val && ((1 << sk->sk_state) & (TCPF_CLOSE | TCPF_LISTEN)))
2067 val = tp->rx_opt.user_mss;
2068 break;
2069 case TCP_NODELAY:
2070 val = !!(tp->nonagle&TCP_NAGLE_OFF);
2071 break;
2072 case TCP_CORK:
2073 val = !!(tp->nonagle&TCP_NAGLE_CORK);
2074 break;
2075 case TCP_KEEPIDLE:
2076 val = (tp->keepalive_time ? : sysctl_tcp_keepalive_time) / HZ;
2077 break;
2078 case TCP_KEEPINTVL:
2079 val = (tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl) / HZ;
2080 break;
2081 case TCP_KEEPCNT:
2082 val = tp->keepalive_probes ? : sysctl_tcp_keepalive_probes;
2083 break;
2084 case TCP_SYNCNT:
2085 val = icsk->icsk_syn_retries ? : sysctl_tcp_syn_retries;
2086 break;
2087 case TCP_LINGER2:
2088 val = tp->linger2;
2089 if (val >= 0)
2090 val = (val ? : sysctl_tcp_fin_timeout) / HZ;
2091 break;
2092 case TCP_DEFER_ACCEPT:
2093 val = !icsk->icsk_accept_queue.rskq_defer_accept ? 0 :
2094 ((TCP_TIMEOUT_INIT / HZ) << (icsk->icsk_accept_queue.rskq_defer_accept - 1));
2095 break;
2096 case TCP_WINDOW_CLAMP:
2097 val = tp->window_clamp;
2098 break;
2099 case TCP_INFO: {
2100 struct tcp_info info;
2102 if (get_user(len, optlen))
2103 return -EFAULT;
2105 tcp_get_info(sk, &info);
2107 len = min_t(unsigned int, len, sizeof(info));
2108 if (put_user(len, optlen))
2109 return -EFAULT;
2110 if (copy_to_user(optval, &info, len))
2111 return -EFAULT;
2112 return 0;
2114 case TCP_QUICKACK:
2115 val = !icsk->icsk_ack.pingpong;
2116 break;
2118 case TCP_CONGESTION:
2119 if (get_user(len, optlen))
2120 return -EFAULT;
2121 len = min_t(unsigned int, len, TCP_CA_NAME_MAX);
2122 if (put_user(len, optlen))
2123 return -EFAULT;
2124 if (copy_to_user(optval, icsk->icsk_ca_ops->name, len))
2125 return -EFAULT;
2126 return 0;
2127 default:
2128 return -ENOPROTOOPT;
2131 if (put_user(len, optlen))
2132 return -EFAULT;
2133 if (copy_to_user(optval, &val, len))
2134 return -EFAULT;
2135 return 0;
2138 int tcp_getsockopt(struct sock *sk, int level, int optname, char __user *optval,
2139 int __user *optlen)
2141 struct inet_connection_sock *icsk = inet_csk(sk);
2143 if (level != SOL_TCP)
2144 return icsk->icsk_af_ops->getsockopt(sk, level, optname,
2145 optval, optlen);
2146 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2149 #ifdef CONFIG_COMPAT
2150 int compat_tcp_getsockopt(struct sock *sk, int level, int optname,
2151 char __user *optval, int __user *optlen)
2153 if (level != SOL_TCP)
2154 return inet_csk_compat_getsockopt(sk, level, optname,
2155 optval, optlen);
2156 return do_tcp_getsockopt(sk, level, optname, optval, optlen);
2159 EXPORT_SYMBOL(compat_tcp_getsockopt);
2160 #endif
2162 struct sk_buff *tcp_tso_segment(struct sk_buff *skb, int features)
2164 struct sk_buff *segs = ERR_PTR(-EINVAL);
2165 struct tcphdr *th;
2166 unsigned thlen;
2167 unsigned int seq;
2168 __be32 delta;
2169 unsigned int oldlen;
2170 unsigned int len;
2172 if (!pskb_may_pull(skb, sizeof(*th)))
2173 goto out;
2175 th = tcp_hdr(skb);
2176 thlen = th->doff * 4;
2177 if (thlen < sizeof(*th))
2178 goto out;
2180 if (!pskb_may_pull(skb, thlen))
2181 goto out;
2183 oldlen = (u16)~skb->len;
2184 __skb_pull(skb, thlen);
2186 if (skb_gso_ok(skb, features | NETIF_F_GSO_ROBUST)) {
2187 /* Packet is from an untrusted source, reset gso_segs. */
2188 int type = skb_shinfo(skb)->gso_type;
2189 int mss;
2191 if (unlikely(type &
2192 ~(SKB_GSO_TCPV4 |
2193 SKB_GSO_DODGY |
2194 SKB_GSO_TCP_ECN |
2195 SKB_GSO_TCPV6 |
2196 0) ||
2197 !(type & (SKB_GSO_TCPV4 | SKB_GSO_TCPV6))))
2198 goto out;
2200 mss = skb_shinfo(skb)->gso_size;
2201 skb_shinfo(skb)->gso_segs = (skb->len + mss - 1) / mss;
2203 segs = NULL;
2204 goto out;
2207 segs = skb_segment(skb, features);
2208 if (IS_ERR(segs))
2209 goto out;
2211 len = skb_shinfo(skb)->gso_size;
2212 delta = htonl(oldlen + (thlen + len));
2214 skb = segs;
2215 th = tcp_hdr(skb);
2216 seq = ntohl(th->seq);
2218 do {
2219 th->fin = th->psh = 0;
2221 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2222 (__force u32)delta));
2223 if (skb->ip_summed != CHECKSUM_PARTIAL)
2224 th->check =
2225 csum_fold(csum_partial(skb_transport_header(skb),
2226 thlen, skb->csum));
2228 seq += len;
2229 skb = skb->next;
2230 th = tcp_hdr(skb);
2232 th->seq = htonl(seq);
2233 th->cwr = 0;
2234 } while (skb->next);
2236 delta = htonl(oldlen + (skb->tail - skb->transport_header) +
2237 skb->data_len);
2238 th->check = ~csum_fold((__force __wsum)((__force u32)th->check +
2239 (__force u32)delta));
2240 if (skb->ip_summed != CHECKSUM_PARTIAL)
2241 th->check = csum_fold(csum_partial(skb_transport_header(skb),
2242 thlen, skb->csum));
2244 out:
2245 return segs;
2247 EXPORT_SYMBOL(tcp_tso_segment);
2249 #ifdef CONFIG_TCP_MD5SIG
2250 static unsigned long tcp_md5sig_users;
2251 static struct tcp_md5sig_pool **tcp_md5sig_pool;
2252 static DEFINE_SPINLOCK(tcp_md5sig_pool_lock);
2254 static void __tcp_free_md5sig_pool(struct tcp_md5sig_pool **pool)
2256 int cpu;
2257 for_each_possible_cpu(cpu) {
2258 struct tcp_md5sig_pool *p = *per_cpu_ptr(pool, cpu);
2259 if (p) {
2260 if (p->md5_desc.tfm)
2261 crypto_free_hash(p->md5_desc.tfm);
2262 kfree(p);
2263 p = NULL;
2266 free_percpu(pool);
2269 void tcp_free_md5sig_pool(void)
2271 struct tcp_md5sig_pool **pool = NULL;
2273 spin_lock_bh(&tcp_md5sig_pool_lock);
2274 if (--tcp_md5sig_users == 0) {
2275 pool = tcp_md5sig_pool;
2276 tcp_md5sig_pool = NULL;
2278 spin_unlock_bh(&tcp_md5sig_pool_lock);
2279 if (pool)
2280 __tcp_free_md5sig_pool(pool);
2283 EXPORT_SYMBOL(tcp_free_md5sig_pool);
2285 static struct tcp_md5sig_pool **__tcp_alloc_md5sig_pool(void)
2287 int cpu;
2288 struct tcp_md5sig_pool **pool;
2290 pool = alloc_percpu(struct tcp_md5sig_pool *);
2291 if (!pool)
2292 return NULL;
2294 for_each_possible_cpu(cpu) {
2295 struct tcp_md5sig_pool *p;
2296 struct crypto_hash *hash;
2298 p = kzalloc(sizeof(*p), GFP_KERNEL);
2299 if (!p)
2300 goto out_free;
2301 *per_cpu_ptr(pool, cpu) = p;
2303 hash = crypto_alloc_hash("md5", 0, CRYPTO_ALG_ASYNC);
2304 if (!hash || IS_ERR(hash))
2305 goto out_free;
2307 p->md5_desc.tfm = hash;
2309 return pool;
2310 out_free:
2311 __tcp_free_md5sig_pool(pool);
2312 return NULL;
2315 struct tcp_md5sig_pool **tcp_alloc_md5sig_pool(void)
2317 struct tcp_md5sig_pool **pool;
2318 int alloc = 0;
2320 retry:
2321 spin_lock_bh(&tcp_md5sig_pool_lock);
2322 pool = tcp_md5sig_pool;
2323 if (tcp_md5sig_users++ == 0) {
2324 alloc = 1;
2325 spin_unlock_bh(&tcp_md5sig_pool_lock);
2326 } else if (!pool) {
2327 tcp_md5sig_users--;
2328 spin_unlock_bh(&tcp_md5sig_pool_lock);
2329 cpu_relax();
2330 goto retry;
2331 } else
2332 spin_unlock_bh(&tcp_md5sig_pool_lock);
2334 if (alloc) {
2335 /* we cannot hold spinlock here because this may sleep. */
2336 struct tcp_md5sig_pool **p = __tcp_alloc_md5sig_pool();
2337 spin_lock_bh(&tcp_md5sig_pool_lock);
2338 if (!p) {
2339 tcp_md5sig_users--;
2340 spin_unlock_bh(&tcp_md5sig_pool_lock);
2341 return NULL;
2343 pool = tcp_md5sig_pool;
2344 if (pool) {
2345 /* oops, it has already been assigned. */
2346 spin_unlock_bh(&tcp_md5sig_pool_lock);
2347 __tcp_free_md5sig_pool(p);
2348 } else {
2349 tcp_md5sig_pool = pool = p;
2350 spin_unlock_bh(&tcp_md5sig_pool_lock);
2353 return pool;
2356 EXPORT_SYMBOL(tcp_alloc_md5sig_pool);
2358 struct tcp_md5sig_pool *__tcp_get_md5sig_pool(int cpu)
2360 struct tcp_md5sig_pool **p;
2361 spin_lock_bh(&tcp_md5sig_pool_lock);
2362 p = tcp_md5sig_pool;
2363 if (p)
2364 tcp_md5sig_users++;
2365 spin_unlock_bh(&tcp_md5sig_pool_lock);
2366 return (p ? *per_cpu_ptr(p, cpu) : NULL);
2369 EXPORT_SYMBOL(__tcp_get_md5sig_pool);
2371 void __tcp_put_md5sig_pool(void)
2373 tcp_free_md5sig_pool();
2376 EXPORT_SYMBOL(__tcp_put_md5sig_pool);
2377 #endif
2379 void tcp_done(struct sock *sk)
2381 if(sk->sk_state == TCP_SYN_SENT || sk->sk_state == TCP_SYN_RECV)
2382 TCP_INC_STATS_BH(TCP_MIB_ATTEMPTFAILS);
2384 tcp_set_state(sk, TCP_CLOSE);
2385 tcp_clear_xmit_timers(sk);
2387 sk->sk_shutdown = SHUTDOWN_MASK;
2389 if (!sock_flag(sk, SOCK_DEAD))
2390 sk->sk_state_change(sk);
2391 else
2392 inet_csk_destroy_sock(sk);
2394 EXPORT_SYMBOL_GPL(tcp_done);
2396 extern void __skb_cb_too_small_for_tcp(int, int);
2397 extern struct tcp_congestion_ops tcp_reno;
2399 static __initdata unsigned long thash_entries;
2400 static int __init set_thash_entries(char *str)
2402 if (!str)
2403 return 0;
2404 thash_entries = simple_strtoul(str, &str, 0);
2405 return 1;
2407 __setup("thash_entries=", set_thash_entries);
2409 void __init tcp_init(void)
2411 struct sk_buff *skb = NULL;
2412 unsigned long limit;
2413 int order, i, max_share;
2415 if (sizeof(struct tcp_skb_cb) > sizeof(skb->cb))
2416 __skb_cb_too_small_for_tcp(sizeof(struct tcp_skb_cb),
2417 sizeof(skb->cb));
2419 tcp_hashinfo.bind_bucket_cachep =
2420 kmem_cache_create("tcp_bind_bucket",
2421 sizeof(struct inet_bind_bucket), 0,
2422 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL, NULL);
2424 /* Size and allocate the main established and bind bucket
2425 * hash tables.
2427 * The methodology is similar to that of the buffer cache.
2429 tcp_hashinfo.ehash =
2430 alloc_large_system_hash("TCP established",
2431 sizeof(struct inet_ehash_bucket),
2432 thash_entries,
2433 (num_physpages >= 128 * 1024) ?
2434 13 : 15,
2436 &tcp_hashinfo.ehash_size,
2437 NULL,
2439 tcp_hashinfo.ehash_size = 1 << tcp_hashinfo.ehash_size;
2440 for (i = 0; i < tcp_hashinfo.ehash_size; i++) {
2441 rwlock_init(&tcp_hashinfo.ehash[i].lock);
2442 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].chain);
2443 INIT_HLIST_HEAD(&tcp_hashinfo.ehash[i].twchain);
2446 tcp_hashinfo.bhash =
2447 alloc_large_system_hash("TCP bind",
2448 sizeof(struct inet_bind_hashbucket),
2449 tcp_hashinfo.ehash_size,
2450 (num_physpages >= 128 * 1024) ?
2451 13 : 15,
2453 &tcp_hashinfo.bhash_size,
2454 NULL,
2455 64 * 1024);
2456 tcp_hashinfo.bhash_size = 1 << tcp_hashinfo.bhash_size;
2457 for (i = 0; i < tcp_hashinfo.bhash_size; i++) {
2458 spin_lock_init(&tcp_hashinfo.bhash[i].lock);
2459 INIT_HLIST_HEAD(&tcp_hashinfo.bhash[i].chain);
2462 /* Try to be a bit smarter and adjust defaults depending
2463 * on available memory.
2465 for (order = 0; ((1 << order) << PAGE_SHIFT) <
2466 (tcp_hashinfo.bhash_size * sizeof(struct inet_bind_hashbucket));
2467 order++)
2469 if (order >= 4) {
2470 sysctl_local_port_range[0] = 32768;
2471 sysctl_local_port_range[1] = 61000;
2472 tcp_death_row.sysctl_max_tw_buckets = 180000;
2473 sysctl_tcp_max_orphans = 4096 << (order - 4);
2474 sysctl_max_syn_backlog = 1024;
2475 } else if (order < 3) {
2476 sysctl_local_port_range[0] = 1024 * (3 - order);
2477 tcp_death_row.sysctl_max_tw_buckets >>= (3 - order);
2478 sysctl_tcp_max_orphans >>= (3 - order);
2479 sysctl_max_syn_backlog = 128;
2482 /* Set the pressure threshold to be a fraction of global memory that
2483 * is up to 1/2 at 256 MB, decreasing toward zero with the amount of
2484 * memory, with a floor of 128 pages.
2486 limit = min(nr_all_pages, 1UL<<(28-PAGE_SHIFT)) >> (20-PAGE_SHIFT);
2487 limit = (limit * (nr_all_pages >> (20-PAGE_SHIFT))) >> (PAGE_SHIFT-11);
2488 limit = max(limit, 128UL);
2489 sysctl_tcp_mem[0] = limit / 4 * 3;
2490 sysctl_tcp_mem[1] = limit;
2491 sysctl_tcp_mem[2] = sysctl_tcp_mem[0] * 2;
2493 /* Set per-socket limits to no more than 1/128 the pressure threshold */
2494 limit = ((unsigned long)sysctl_tcp_mem[1]) << (PAGE_SHIFT - 7);
2495 max_share = min(4UL*1024*1024, limit);
2497 sysctl_tcp_wmem[0] = SK_STREAM_MEM_QUANTUM;
2498 sysctl_tcp_wmem[1] = 16*1024;
2499 sysctl_tcp_wmem[2] = max(64*1024, max_share);
2501 sysctl_tcp_rmem[0] = SK_STREAM_MEM_QUANTUM;
2502 sysctl_tcp_rmem[1] = 87380;
2503 sysctl_tcp_rmem[2] = max(87380, max_share);
2505 printk(KERN_INFO "TCP: Hash tables configured "
2506 "(established %d bind %d)\n",
2507 tcp_hashinfo.ehash_size, tcp_hashinfo.bhash_size);
2509 tcp_register_congestion_control(&tcp_reno);
2512 EXPORT_SYMBOL(tcp_close);
2513 EXPORT_SYMBOL(tcp_disconnect);
2514 EXPORT_SYMBOL(tcp_getsockopt);
2515 EXPORT_SYMBOL(tcp_ioctl);
2516 EXPORT_SYMBOL(tcp_poll);
2517 EXPORT_SYMBOL(tcp_read_sock);
2518 EXPORT_SYMBOL(tcp_recvmsg);
2519 EXPORT_SYMBOL(tcp_sendmsg);
2520 EXPORT_SYMBOL(tcp_sendpage);
2521 EXPORT_SYMBOL(tcp_setsockopt);
2522 EXPORT_SYMBOL(tcp_shutdown);
2523 EXPORT_SYMBOL(tcp_statistics);