Ok. I didn't make 2.4.0 in 2000. Tough. I tried, but we had some
[davej-history.git] / include / net / tcp.h
blob8574af3e1bf23914c7c224cd4c9936c1b55e58b2
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
18 #ifndef _TCP_H
19 #define _TCP_H
21 #define TCP_DEBUG 1
22 #define FASTRETRANS_DEBUG 1
24 /* Be paranoid about data immediately beyond right edge of window. */
25 #undef TCP_FORMAL_WINDOW
27 /* Cancel timers, when they are not required. */
28 #undef TCP_CLEAR_TIMERS
30 #include <linux/config.h>
31 #include <linux/tcp.h>
32 #include <linux/slab.h>
33 #include <net/checksum.h>
34 #include <net/sock.h>
36 /* This is for all connections with a full identity, no wildcards.
37 * New scheme, half the table is for TIME_WAIT, the other half is
38 * for the rest. I'll experiment with dynamic table growth later.
40 struct tcp_ehash_bucket {
41 rwlock_t lock;
42 struct sock *chain;
43 } __attribute__((__aligned__(8)));
45 /* This is for listening sockets, thus all sockets which possess wildcards. */
46 #define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
48 /* There are a few simple rules, which allow for local port reuse by
49 * an application. In essence:
51 * 1) Sockets bound to different interfaces may share a local port.
52 * Failing that, goto test 2.
53 * 2) If all sockets have sk->reuse set, and none of them are in
54 * TCP_LISTEN state, the port may be shared.
55 * Failing that, goto test 3.
56 * 3) If all sockets are bound to a specific sk->rcv_saddr local
57 * address, and none of them are the same, the port may be
58 * shared.
59 * Failing this, the port cannot be shared.
61 * The interesting point, is test #2. This is what an FTP server does
62 * all day. To optimize this case we use a specific flag bit defined
63 * below. As we add sockets to a bind bucket list, we perform a
64 * check of: (newsk->reuse && (newsk->state != TCP_LISTEN))
65 * As long as all sockets added to a bind bucket pass this test,
66 * the flag bit will be set.
67 * The resulting situation is that tcp_v[46]_verify_bind() can just check
68 * for this flag bit, if it is set and the socket trying to bind has
69 * sk->reuse set, we don't even have to walk the owners list at all,
70 * we return that it is ok to bind this socket to the requested local port.
72 * Sounds like a lot of work, but it is worth it. In a more naive
73 * implementation (ie. current FreeBSD etc.) the entire list of ports
74 * must be walked for each data port opened by an ftp server. Needless
75 * to say, this does not scale at all. With a couple thousand FTP
76 * users logged onto your box, isn't it nice to know that new data
77 * ports are created in O(1) time? I thought so. ;-) -DaveM
79 struct tcp_bind_bucket {
80 unsigned short port;
81 unsigned short fastreuse;
82 struct tcp_bind_bucket *next;
83 struct sock *owners;
84 struct tcp_bind_bucket **pprev;
87 struct tcp_bind_hashbucket {
88 spinlock_t lock;
89 struct tcp_bind_bucket *chain;
92 extern struct tcp_hashinfo {
93 /* This is for sockets with full identity only. Sockets here will
94 * always be without wildcards and will have the following invariant:
96 * TCP_ESTABLISHED <= sk->state < TCP_CLOSE
98 * First half of the table is for sockets not in TIME_WAIT, second half
99 * is for TIME_WAIT sockets only.
101 struct tcp_ehash_bucket *__tcp_ehash;
103 /* Ok, let's try this, I give up, we do need a local binding
104 * TCP hash as well as the others for fast bind/connect.
106 struct tcp_bind_hashbucket *__tcp_bhash;
108 int __tcp_bhash_size;
109 int __tcp_ehash_size;
111 /* All sockets in TCP_LISTEN state will be in here. This is the only
112 * table where wildcard'd TCP sockets can exist. Hash function here
113 * is just local port number.
115 struct sock *__tcp_listening_hash[TCP_LHTABLE_SIZE];
117 /* All the above members are written once at bootup and
118 * never written again _or_ are predominantly read-access.
120 * Now align to a new cache line as all the following members
121 * are often dirty.
123 rwlock_t __tcp_lhash_lock
124 __attribute__((__aligned__(SMP_CACHE_BYTES)));
125 atomic_t __tcp_lhash_users;
126 wait_queue_head_t __tcp_lhash_wait;
127 spinlock_t __tcp_portalloc_lock;
128 } tcp_hashinfo;
130 #define tcp_ehash (tcp_hashinfo.__tcp_ehash)
131 #define tcp_bhash (tcp_hashinfo.__tcp_bhash)
132 #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
133 #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
134 #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
135 #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
136 #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
137 #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
138 #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
140 extern kmem_cache_t *tcp_bucket_cachep;
141 extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
142 unsigned short snum);
143 extern void tcp_bucket_unlock(struct sock *sk);
144 extern int tcp_port_rover;
145 extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);
147 /* These are AF independent. */
148 static __inline__ int tcp_bhashfn(__u16 lport)
150 return (lport & (tcp_bhash_size - 1));
153 /* This is a TIME_WAIT bucket. It works around the memory consumption
154 * problems of sockets in such a state on heavily loaded servers, but
155 * without violating the protocol specification.
157 struct tcp_tw_bucket {
158 /* These _must_ match the beginning of struct sock precisely.
159 * XXX Yes I know this is gross, but I'd have to edit every single
160 * XXX networking file if I created a "struct sock_header". -DaveM
162 __u32 daddr;
163 __u32 rcv_saddr;
164 __u16 dport;
165 unsigned short num;
166 int bound_dev_if;
167 struct sock *next;
168 struct sock **pprev;
169 struct sock *bind_next;
170 struct sock **bind_pprev;
171 unsigned char state,
172 substate; /* "zapped" is replaced with "substate" */
173 __u16 sport;
174 unsigned short family;
175 unsigned char reuse,
176 rcv_wscale; /* It is also TW bucket specific */
177 atomic_t refcnt;
179 /* And these are ours. */
180 int hashent;
181 int timeout;
182 __u32 rcv_nxt;
183 __u32 snd_nxt;
184 __u32 rcv_wnd;
185 __u32 syn_seq;
186 __u32 ts_recent;
187 long ts_recent_stamp;
188 unsigned long ttd;
189 struct tcp_bind_bucket *tb;
190 struct tcp_tw_bucket *next_death;
191 struct tcp_tw_bucket **pprev_death;
193 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
194 struct in6_addr v6_daddr;
195 struct in6_addr v6_rcv_saddr;
196 #endif
199 extern kmem_cache_t *tcp_timewait_cachep;
201 static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
203 if (atomic_dec_and_test(&tw->refcnt)) {
204 #ifdef INET_REFCNT_DEBUG
205 printk(KERN_DEBUG "tw_bucket %p released\n", tw);
206 #endif
207 kmem_cache_free(tcp_timewait_cachep, tw);
211 extern atomic_t tcp_orphan_count;
212 extern int tcp_tw_count;
213 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
214 extern void tcp_timewait_kill(struct tcp_tw_bucket *tw);
215 extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
216 extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
219 /* Socket demux engine toys. */
220 #ifdef __BIG_ENDIAN
221 #define TCP_COMBINED_PORTS(__sport, __dport) \
222 (((__u32)(__sport)<<16) | (__u32)(__dport))
223 #else /* __LITTLE_ENDIAN */
224 #define TCP_COMBINED_PORTS(__sport, __dport) \
225 (((__u32)(__dport)<<16) | (__u32)(__sport))
226 #endif
228 #if (BITS_PER_LONG == 64)
229 #ifdef __BIG_ENDIAN
230 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
231 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
232 #else /* __LITTLE_ENDIAN */
233 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
234 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
235 #endif /* __BIG_ENDIAN */
236 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
237 (((*((__u64 *)&((__sk)->daddr)))== (__cookie)) && \
238 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
239 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
240 #else /* 32-bit arch */
241 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
242 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
243 (((__sk)->daddr == (__saddr)) && \
244 ((__sk)->rcv_saddr == (__daddr)) && \
245 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
246 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
247 #endif /* 64-bit arch */
249 #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
250 (((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
251 ((__sk)->family == AF_INET6) && \
252 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.daddr, (__saddr)) && \
253 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.rcv_saddr, (__daddr)) && \
254 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
256 /* These can have wildcards, don't try too hard. */
257 static __inline__ int tcp_lhashfn(unsigned short num)
259 return num & (TCP_LHTABLE_SIZE - 1);
262 static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
264 return tcp_lhashfn(sk->num);
267 #define MAX_TCP_HEADER (128 + MAX_HEADER)
270 * Never offer a window over 32767 without using window scaling. Some
271 * poor stacks do signed 16bit maths!
273 #define MAX_TCP_WINDOW 32767
275 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
276 #define TCP_MIN_MSS 88
278 /* Minimal RCV_MSS. */
279 #define TCP_MIN_RCVMSS 536
281 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
282 #define TCP_FASTRETRANS_THRESH 3
284 /* Maximal reordering. */
285 #define TCP_MAX_REORDERING 127
287 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
288 #define TCP_MAX_QUICKACKS 16
290 /* urg_data states */
291 #define TCP_URG_VALID 0x0100
292 #define TCP_URG_NOTYET 0x0200
293 #define TCP_URG_READ 0x0400
295 #define TCP_RETR1 3 /*
296 * This is how many retries it does before it
297 * tries to figure out if the gateway is
298 * down. Minimal RFC value is 3; it corresponds
299 * to ~3sec-8min depending on RTO.
302 #define TCP_RETR2 15 /*
303 * This should take at least
304 * 90 minutes to time out.
305 * RFC1122 says that the limit is 100 sec.
306 * 15 is ~13-30min depending on RTO.
309 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
310 * connection: ~180sec is RFC minumum */
312 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
313 * connection: ~180sec is RFC minumum */
316 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
317 * socket. 7 is ~50sec-16min.
321 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
322 * state, about 60 seconds */
323 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
324 /* BSD style FIN_WAIT2 deadlock breaker.
325 * It used to be 3min, new value is 60sec,
326 * to combine FIN-WAIT-2 timeout with
327 * TIME-WAIT timer.
330 #define TCP_DELACK_MAX (HZ/5) /* maximal time to delay before sending an ACK */
331 #if HZ >= 100
332 #define TCP_DELACK_MIN (HZ/25) /* minimal time to delay before sending an ACK */
333 #define TCP_ATO_MIN (HZ/25)
334 #else
335 #define TCP_DELACK_MIN 4
336 #define TCP_ATO_MIN 4
337 #endif
338 #define TCP_RTO_MAX (120*HZ)
339 #define TCP_RTO_MIN (HZ/5)
340 #define TCP_TIMEOUT_INIT (3*HZ) /* RFC 1122 initial RTO value */
342 #define TCP_RESOURCE_PROBE_INTERVAL (HZ/2) /* Maximal interval between probes
343 * for local resources.
346 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
347 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
348 #define TCP_KEEPALIVE_INTVL (75*HZ)
350 #define MAX_TCP_KEEPIDLE 32767
351 #define MAX_TCP_KEEPINTVL 32767
352 #define MAX_TCP_KEEPCNT 127
353 #define MAX_TCP_SYNCNT 127
355 /* TIME_WAIT reaping mechanism. */
356 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
357 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
359 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
360 #define TCP_SYNQ_HSIZE 64 /* Size of SYNACK hash table */
362 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
363 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
364 * after this time. It should be equal
365 * (or greater than) TCP_TIMEWAIT_LEN
366 * to provide reliability equal to one
367 * provided by timewait state.
369 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
370 * timestamps. It must be less than
371 * minimal timewait lifetime.
374 #define TCP_TW_RECYCLE_SLOTS_LOG 5
375 #define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
377 /* If time > 4sec, it is "slow" path, no recycling is required,
378 so that we select tick to get range about 4 seconds.
381 #if HZ <= 16 || HZ > 4096
382 # error Unsupported: HZ <= 16 or HZ > 4096
383 #elif HZ <= 32
384 # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
385 #elif HZ <= 64
386 # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
387 #elif HZ <= 128
388 # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
389 #elif HZ <= 256
390 # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
391 #elif HZ <= 512
392 # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
393 #elif HZ <= 1024
394 # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
395 #elif HZ <= 2048
396 # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
397 #else
398 # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
399 #endif
402 * TCP option
405 #define TCPOPT_NOP 1 /* Padding */
406 #define TCPOPT_EOL 0 /* End of options */
407 #define TCPOPT_MSS 2 /* Segment size negotiating */
408 #define TCPOPT_WINDOW 3 /* Window scaling */
409 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
410 #define TCPOPT_SACK 5 /* SACK Block */
411 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
414 * TCP option lengths
417 #define TCPOLEN_MSS 4
418 #define TCPOLEN_WINDOW 3
419 #define TCPOLEN_SACK_PERM 2
420 #define TCPOLEN_TIMESTAMP 10
422 /* But this is what stacks really send out. */
423 #define TCPOLEN_TSTAMP_ALIGNED 12
424 #define TCPOLEN_WSCALE_ALIGNED 4
425 #define TCPOLEN_SACKPERM_ALIGNED 4
426 #define TCPOLEN_SACK_BASE 2
427 #define TCPOLEN_SACK_BASE_ALIGNED 4
428 #define TCPOLEN_SACK_PERBLOCK 8
430 #define TCP_TIME_RETRANS 1 /* Retransmit timer */
431 #define TCP_TIME_DACK 2 /* Delayed ack timer */
432 #define TCP_TIME_PROBE0 3 /* Zero window probe timer */
433 #define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
435 /* sysctl variables for tcp */
436 extern int sysctl_max_syn_backlog;
437 extern int sysctl_tcp_timestamps;
438 extern int sysctl_tcp_window_scaling;
439 extern int sysctl_tcp_sack;
440 extern int sysctl_tcp_fin_timeout;
441 extern int sysctl_tcp_tw_recycle;
442 extern int sysctl_tcp_keepalive_time;
443 extern int sysctl_tcp_keepalive_probes;
444 extern int sysctl_tcp_keepalive_intvl;
445 extern int sysctl_tcp_syn_retries;
446 extern int sysctl_tcp_synack_retries;
447 extern int sysctl_tcp_retries1;
448 extern int sysctl_tcp_retries2;
449 extern int sysctl_tcp_orphan_retries;
450 extern int sysctl_tcp_syncookies;
451 extern int sysctl_tcp_retrans_collapse;
452 extern int sysctl_tcp_stdurg;
453 extern int sysctl_tcp_rfc1337;
454 extern int sysctl_tcp_tw_recycle;
455 extern int sysctl_tcp_abort_on_overflow;
456 extern int sysctl_tcp_max_orphans;
457 extern int sysctl_tcp_max_tw_buckets;
458 extern int sysctl_tcp_fack;
459 extern int sysctl_tcp_reordering;
460 extern int sysctl_tcp_ecn;
461 extern int sysctl_tcp_dsack;
462 extern int sysctl_tcp_mem[3];
463 extern int sysctl_tcp_wmem[3];
464 extern int sysctl_tcp_rmem[3];
465 extern int sysctl_tcp_app_win;
466 extern int sysctl_tcp_adv_win_scale;
468 extern atomic_t tcp_memory_allocated;
469 extern atomic_t tcp_sockets_allocated;
470 extern int tcp_memory_pressure;
472 struct open_request;
474 struct or_calltable {
475 int family;
476 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
477 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
478 void (*destructor) (struct open_request *req);
479 void (*send_reset) (struct sk_buff *skb);
482 struct tcp_v4_open_req {
483 __u32 loc_addr;
484 __u32 rmt_addr;
485 struct ip_options *opt;
488 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
489 struct tcp_v6_open_req {
490 struct in6_addr loc_addr;
491 struct in6_addr rmt_addr;
492 struct sk_buff *pktopts;
493 int iif;
495 #endif
497 /* this structure is too big */
498 struct open_request {
499 struct open_request *dl_next; /* Must be first member! */
500 __u32 rcv_isn;
501 __u32 snt_isn;
502 __u16 rmt_port;
503 __u16 mss;
504 __u8 retrans;
505 __u8 index;
506 __u16 snd_wscale : 4,
507 rcv_wscale : 4,
508 tstamp_ok : 1,
509 sack_ok : 1,
510 wscale_ok : 1,
511 ecn_ok : 1,
512 acked : 1;
513 /* The following two fields can be easily recomputed I think -AK */
514 __u32 window_clamp; /* window clamp at creation time */
515 __u32 rcv_wnd; /* rcv_wnd offered first time */
516 __u32 ts_recent;
517 unsigned long expires;
518 struct or_calltable *class;
519 struct sock *sk;
520 union {
521 struct tcp_v4_open_req v4_req;
522 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
523 struct tcp_v6_open_req v6_req;
524 #endif
525 } af;
528 /* SLAB cache for open requests. */
529 extern kmem_cache_t *tcp_openreq_cachep;
531 #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
532 #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
534 static inline void tcp_openreq_free(struct open_request *req)
536 req->class->destructor(req);
537 tcp_openreq_fastfree(req);
540 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
541 #define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
542 #else
543 #define TCP_INET_FAMILY(fam) 1
544 #endif
547 * Pointers to address related TCP functions
548 * (i.e. things that depend on the address family)
550 * BUGGG_FUTURE: all the idea behind this struct is wrong.
551 * It mixes socket frontend with transport function.
552 * With port sharing between IPv6/v4 it gives the only advantage,
553 * only poor IPv6 needs to permanently recheck, that it
554 * is still IPv6 8)8) It must be cleaned up as soon as possible.
555 * --ANK (980802)
558 struct tcp_func {
559 int (*queue_xmit) (struct sk_buff *skb);
561 void (*send_check) (struct sock *sk,
562 struct tcphdr *th,
563 int len,
564 struct sk_buff *skb);
566 int (*rebuild_header) (struct sock *sk);
568 int (*conn_request) (struct sock *sk,
569 struct sk_buff *skb);
571 struct sock * (*syn_recv_sock) (struct sock *sk,
572 struct sk_buff *skb,
573 struct open_request *req,
574 struct dst_entry *dst);
576 int (*hash_connecting) (struct sock *sk);
578 int (*remember_stamp) (struct sock *sk);
580 __u16 net_header_len;
582 int (*setsockopt) (struct sock *sk,
583 int level,
584 int optname,
585 char *optval,
586 int optlen);
588 int (*getsockopt) (struct sock *sk,
589 int level,
590 int optname,
591 char *optval,
592 int *optlen);
595 void (*addr2sockaddr) (struct sock *sk,
596 struct sockaddr *);
598 int sockaddr_len;
602 * The next routines deal with comparing 32 bit unsigned ints
603 * and worry about wraparound (automatic with unsigned arithmetic).
606 extern __inline int before(__u32 seq1, __u32 seq2)
608 return (__s32)(seq1-seq2) < 0;
611 extern __inline int after(__u32 seq1, __u32 seq2)
613 return (__s32)(seq2-seq1) < 0;
617 /* is s2<=s1<=s3 ? */
618 extern __inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
620 return seq3 - seq2 >= seq1 - seq2;
624 extern struct proto tcp_prot;
626 extern struct tcp_mib tcp_statistics[NR_CPUS*2];
627 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
628 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
629 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
631 extern void tcp_put_port(struct sock *sk);
632 extern void __tcp_put_port(struct sock *sk);
633 extern void tcp_inherit_port(struct sock *sk, struct sock *child);
635 extern void tcp_v4_err(struct sk_buff *skb,
636 unsigned char *, int);
638 extern void tcp_shutdown (struct sock *sk, int how);
640 extern int tcp_v4_rcv(struct sk_buff *skb,
641 unsigned short len);
643 extern int tcp_v4_remember_stamp(struct sock *sk);
645 extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
647 extern int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size);
649 extern int tcp_ioctl(struct sock *sk,
650 int cmd,
651 unsigned long arg);
653 extern int tcp_rcv_state_process(struct sock *sk,
654 struct sk_buff *skb,
655 struct tcphdr *th,
656 unsigned len);
658 extern int tcp_rcv_established(struct sock *sk,
659 struct sk_buff *skb,
660 struct tcphdr *th,
661 unsigned len);
663 enum tcp_ack_state_t
665 TCP_ACK_SCHED = 1,
666 TCP_ACK_TIMER = 2,
667 TCP_ACK_PUSHED= 4
670 static inline void tcp_schedule_ack(struct tcp_opt *tp)
672 tp->ack.pending |= TCP_ACK_SCHED;
675 static inline int tcp_ack_scheduled(struct tcp_opt *tp)
677 return tp->ack.pending&TCP_ACK_SCHED;
680 static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
682 if (tp->ack.quick && --tp->ack.quick == 0) {
683 /* Leaving quickack mode we deflate ATO. */
684 tp->ack.ato = TCP_ATO_MIN;
688 extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
690 static __inline__ void tcp_delack_init(struct tcp_opt *tp)
692 memset(&tp->ack, 0, sizeof(tp->ack));
695 static inline void tcp_clear_options(struct tcp_opt *tp)
697 tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
700 enum tcp_tw_status
702 TCP_TW_SUCCESS = 0,
703 TCP_TW_RST = 1,
704 TCP_TW_ACK = 2,
705 TCP_TW_SYN = 3
709 extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
710 struct sk_buff *skb,
711 struct tcphdr *th,
712 unsigned len);
714 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
715 struct open_request *req,
716 struct open_request **prev);
717 extern int tcp_child_process(struct sock *parent,
718 struct sock *child,
719 struct sk_buff *skb);
720 extern void tcp_enter_loss(struct sock *sk, int how);
721 extern void tcp_clear_retrans(struct tcp_opt *tp);
722 extern void tcp_update_metrics(struct sock *sk);
724 extern void tcp_close(struct sock *sk,
725 long timeout);
726 extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
727 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
728 extern void tcp_write_space(struct sock *sk);
730 extern int tcp_getsockopt(struct sock *sk, int level,
731 int optname, char *optval,
732 int *optlen);
733 extern int tcp_setsockopt(struct sock *sk, int level,
734 int optname, char *optval,
735 int optlen);
736 extern void tcp_set_keepalive(struct sock *sk, int val);
737 extern int tcp_recvmsg(struct sock *sk,
738 struct msghdr *msg,
739 int len, int nonblock,
740 int flags, int *addr_len);
742 extern int tcp_listen_start(struct sock *sk);
744 extern void tcp_parse_options(struct sk_buff *skb,
745 struct tcp_opt *tp,
746 int estab);
749 * TCP v4 functions exported for the inet6 API
752 extern int tcp_v4_rebuild_header(struct sock *sk);
754 extern int tcp_v4_build_header(struct sock *sk,
755 struct sk_buff *skb);
757 extern void tcp_v4_send_check(struct sock *sk,
758 struct tcphdr *th, int len,
759 struct sk_buff *skb);
761 extern int tcp_v4_conn_request(struct sock *sk,
762 struct sk_buff *skb);
764 extern struct sock * tcp_create_openreq_child(struct sock *sk,
765 struct open_request *req,
766 struct sk_buff *skb);
768 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
769 struct sk_buff *skb,
770 struct open_request *req,
771 struct dst_entry *dst);
773 extern int tcp_v4_do_rcv(struct sock *sk,
774 struct sk_buff *skb);
776 extern int tcp_v4_connect(struct sock *sk,
777 struct sockaddr *uaddr,
778 int addr_len);
780 extern int tcp_connect(struct sock *sk,
781 struct sk_buff *skb);
783 extern struct sk_buff * tcp_make_synack(struct sock *sk,
784 struct dst_entry *dst,
785 struct open_request *req);
787 extern int tcp_disconnect(struct sock *sk, int flags);
789 extern void tcp_unhash(struct sock *sk);
791 extern int tcp_v4_hash_connecting(struct sock *sk);
794 /* From syncookies.c */
795 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
796 struct ip_options *opt);
797 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
798 __u16 *mss);
800 /* tcp_output.c */
802 extern int tcp_write_xmit(struct sock *);
803 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
804 extern void tcp_xmit_retransmit_queue(struct sock *);
805 extern void tcp_simple_retransmit(struct sock *);
807 extern void tcp_send_probe0(struct sock *);
808 extern void tcp_send_partial(struct sock *);
809 extern int tcp_write_wakeup(struct sock *);
810 extern void tcp_send_fin(struct sock *sk);
811 extern void tcp_send_active_reset(struct sock *sk, int priority);
812 extern int tcp_send_synack(struct sock *);
813 extern int tcp_transmit_skb(struct sock *, struct sk_buff *);
814 extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue, unsigned mss_now);
815 extern void tcp_send_ack(struct sock *sk);
816 extern void tcp_send_delayed_ack(struct sock *sk);
818 /* tcp_timer.c */
819 extern void tcp_init_xmit_timers(struct sock *);
820 extern void tcp_clear_xmit_timers(struct sock *);
822 extern void tcp_delete_keepalive_timer (struct sock *);
823 extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
824 extern int tcp_sync_mss(struct sock *sk, u32 pmtu);
826 extern const char timer_bug_msg[];
829 static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
831 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
833 switch (what) {
834 case TCP_TIME_RETRANS:
835 case TCP_TIME_PROBE0:
836 tp->pending = 0;
838 #ifdef TCP_CLEAR_TIMERS
839 if (timer_pending(&tp->retransmit_timer) &&
840 del_timer(&tp->retransmit_timer))
841 __sock_put(sk);
842 #endif
843 break;
844 case TCP_TIME_DACK:
845 tp->ack.blocked = 0;
846 tp->ack.pending = 0;
848 #ifdef TCP_CLEAR_TIMERS
849 if (timer_pending(&tp->delack_timer) &&
850 del_timer(&tp->delack_timer))
851 __sock_put(sk);
852 #endif
853 break;
854 default:
855 printk(timer_bug_msg);
856 return;
862 * Reset the retransmission timer
864 static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
866 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
868 if (when > TCP_RTO_MAX) {
869 #ifdef TCP_DEBUG
870 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
871 #endif
872 when = TCP_RTO_MAX;
875 switch (what) {
876 case TCP_TIME_RETRANS:
877 case TCP_TIME_PROBE0:
878 tp->pending = what;
879 tp->timeout = jiffies+when;
880 if (!mod_timer(&tp->retransmit_timer, tp->timeout))
881 sock_hold(sk);
882 break;
884 case TCP_TIME_DACK:
885 tp->ack.pending |= TCP_ACK_TIMER;
886 tp->ack.timeout = jiffies+when;
887 if (!mod_timer(&tp->delack_timer, tp->ack.timeout))
888 sock_hold(sk);
889 break;
891 default:
892 printk(KERN_DEBUG "bug: unknown timer value\n");
896 /* Compute the current effective MSS, taking SACKs and IP options,
897 * and even PMTU discovery events into account.
900 static __inline__ unsigned int tcp_current_mss(struct sock *sk)
902 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
903 struct dst_entry *dst = __sk_dst_get(sk);
904 int mss_now = tp->mss_cache;
906 if (dst && dst->pmtu != tp->pmtu_cookie)
907 mss_now = tcp_sync_mss(sk, dst->pmtu);
909 if (tp->eff_sacks)
910 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
911 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK));
912 return mss_now;
915 /* Initialize RCV_MSS value.
916 * RCV_MSS is an our guess about MSS used by the peer.
917 * We haven't any direct information about the MSS.
918 * It's better to underestimate the RCV_MSS rather than overestimate.
919 * Overestimations make us ACKing less frequently than needed.
920 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
923 static inline void tcp_initialize_rcv_mss(struct sock *sk)
925 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
926 int hint = min(tp->advmss, tp->mss_cache);
928 hint = min(hint, tp->rcv_wnd/2);
930 tp->ack.rcv_mss = max(min(hint, TCP_MIN_RCVMSS), TCP_MIN_MSS);
933 static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
935 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
936 ntohl(TCP_FLAG_ACK) |
937 snd_wnd);
940 static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
942 __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
945 /* Compute the actual receive window we are currently advertising.
946 * Rcv_nxt can be after the window if our peer push more data
947 * than the offered window.
949 static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
951 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
953 if (win < 0)
954 win = 0;
955 return (u32) win;
958 /* Choose a new window, without checks for shrinking, and without
959 * scaling applied to the result. The caller does these things
960 * if necessary. This is a "raw" window selection.
962 extern u32 __tcp_select_window(struct sock *sk);
964 /* TCP timestamps are only 32-bits, this causes a slight
965 * complication on 64-bit systems since we store a snapshot
966 * of jiffies in the buffer control blocks below. We decidely
967 * only use of the low 32-bits of jiffies and hide the ugly
968 * casts with the following macro.
970 #define tcp_time_stamp ((__u32)(jiffies))
972 /* This is what the send packet queueing engine uses to pass
973 * TCP per-packet control information to the transmission
974 * code. We also store the host-order sequence numbers in
975 * here too. This is 36 bytes on 32-bit architectures,
976 * 40 bytes on 64-bit machines, if this grows please adjust
977 * skbuff.h:skbuff->cb[xxx] size appropriately.
979 struct tcp_skb_cb {
980 union {
981 struct inet_skb_parm h4;
982 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
983 struct inet6_skb_parm h6;
984 #endif
985 } header; /* For incoming frames */
986 __u32 seq; /* Starting sequence number */
987 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
988 __u32 when; /* used to compute rtt's */
989 __u8 flags; /* TCP header flags. */
991 /* NOTE: These must match up to the flags byte in a
992 * real TCP header.
994 #define TCPCB_FLAG_FIN 0x01
995 #define TCPCB_FLAG_SYN 0x02
996 #define TCPCB_FLAG_RST 0x04
997 #define TCPCB_FLAG_PSH 0x08
998 #define TCPCB_FLAG_ACK 0x10
999 #define TCPCB_FLAG_URG 0x20
1000 #define TCPCB_FLAG_ECE 0x40
1001 #define TCPCB_FLAG_CWR 0x80
1003 __u8 sacked; /* State flags for SACK/FACK. */
1004 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
1005 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
1006 #define TCPCB_LOST 0x04 /* SKB is lost */
1007 #define TCPCB_TAGBITS 0x07 /* All tag bits */
1009 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
1010 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
1012 #define TCPCB_URG 0x20 /* Urgent pointer advenced here */
1014 #define TCPCB_AT_TAIL (TCPCB_URG)
1016 __u16 urg_ptr; /* Valid w/URG flags is set. */
1017 __u32 ack_seq; /* Sequence number ACK'd */
1020 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1022 #define for_retrans_queue(skb, sk, tp) \
1023 for (skb = (sk)->write_queue.next; \
1024 (skb != (tp)->send_head) && \
1025 (skb != (struct sk_buff *)&(sk)->write_queue); \
1026 skb=skb->next)
1029 #include <net/tcp_ecn.h>
1033 * Compute minimal free write space needed to queue new packets.
1035 static inline int tcp_min_write_space(struct sock *sk)
1037 return sk->wmem_queued/2;
1040 static inline int tcp_wspace(struct sock *sk)
1042 return sk->sndbuf - sk->wmem_queued;
1046 /* This determines how many packets are "in the network" to the best
1047 * of our knowledge. In many cases it is conservative, but where
1048 * detailed information is available from the receiver (via SACK
1049 * blocks etc.) we can make more aggressive calculations.
1051 * Use this for decisions involving congestion control, use just
1052 * tp->packets_out to determine if the send queue is empty or not.
1054 * Read this equation as:
1056 * "Packets sent once on transmission queue" MINUS
1057 * "Packets left network, but not honestly ACKed yet" PLUS
1058 * "Packets fast retransmitted"
1060 static __inline__ int tcp_packets_in_flight(struct tcp_opt *tp)
1062 return tp->packets_out - tp->left_out + tp->retrans_out;
1065 /* Recalculate snd_ssthresh, we want to set it to:
1067 * one half the current congestion window, but no
1068 * less than two segments
1070 static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
1072 return max(tp->snd_cwnd>>1, 2);
1075 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1076 * The exception is rate halving phase, when cwnd is decreasing towards
1077 * ssthresh.
1079 static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
1081 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
1082 return tp->snd_ssthresh;
1083 else
1084 return max(tp->snd_ssthresh, (tp->snd_cwnd>>1)+(tp->snd_cwnd>>2));
1087 extern void tcp_cwnd_application_limited(struct sock *sk);
1089 /* Congestion window validation. (RFC2861) */
1091 static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
1093 if (tp->packets_out >= tp->snd_cwnd) {
1094 /* Network is feed fully. */
1095 tp->snd_cwnd_used = 0;
1096 tp->snd_cwnd_stamp = tcp_time_stamp;
1097 } else {
1098 /* Network starves. */
1099 if (tp->packets_out > tp->snd_cwnd_used)
1100 tp->snd_cwnd_used = tp->packets_out;
1102 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
1103 tcp_cwnd_application_limited(sk);
1107 /* Set slow start threshould and cwnd not falling to slow start */
1108 static inline void __tcp_enter_cwr(struct tcp_opt *tp)
1110 tp->undo_marker = 0;
1111 tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
1112 tp->snd_cwnd = min(tp->snd_cwnd, tcp_packets_in_flight(tp)+1);
1113 tp->snd_cwnd_cnt = 0;
1114 tp->high_seq = tp->snd_nxt;
1115 tp->snd_cwnd_stamp = tcp_time_stamp;
1116 TCP_ECN_queue_cwr(tp);
1119 static inline void tcp_enter_cwr(struct tcp_opt *tp)
1121 tp->prior_ssthresh = 0;
1122 if (tp->ca_state < TCP_CA_CWR) {
1123 __tcp_enter_cwr(tp);
1124 tp->ca_state = TCP_CA_CWR;
1128 extern __u32 tcp_init_cwnd(struct tcp_opt *tp);
1130 /* Slow start with delack produces 3 packets of burst, so that
1131 * it is safe "de facto".
1133 static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp)
1135 return 3;
1138 static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
1140 return after(tp->snd_sml,tp->snd_una) &&
1141 !after(tp->snd_sml, tp->snd_nxt);
1144 static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
1146 if (skb->len < mss)
1147 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1150 /* Return 0, if packet can be sent now without violation Nagle's rules:
1151 1. It is full sized.
1152 2. Or it contains FIN.
1153 3. Or TCP_NODELAY was set.
1154 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1155 With Minshall's modification: all sent small packets are ACKed.
1158 static __inline__ int
1159 tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
1161 return (skb->len < mss_now &&
1162 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1163 (nonagle == 2 ||
1164 (!nonagle &&
1165 tp->packets_out &&
1166 tcp_minshall_check(tp))));
1169 /* This checks if the data bearing packet SKB (usually tp->send_head)
1170 * should be put on the wire right now.
1172 static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
1173 unsigned cur_mss, int nonagle)
1175 /* RFC 1122 - section 4.2.3.4
1177 * We must queue if
1179 * a) The right edge of this frame exceeds the window
1180 * b) There are packets in flight and we have a small segment
1181 * [SWS avoidance and Nagle algorithm]
1182 * (part of SWS is done on packetization)
1183 * Minshall version sounds: there are no _small_
1184 * segments in flight. (tcp_nagle_check)
1185 * c) We have too many packets 'in flight'
1187 * Don't use the nagle rule for urgent data (or
1188 * for the final FIN -DaveM).
1190 * Also, Nagle rule does not apply to frames, which
1191 * sit in the middle of queue (they have no chances
1192 * to get new data) and if room at tail of skb is
1193 * not enough to save something seriously (<32 for now).
1196 /* Don't be strict about the congestion window for the
1197 * final FIN frame. -DaveM
1199 return ((nonagle==1 || tp->urg_mode
1200 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
1201 ((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||
1202 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
1203 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
1206 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
1208 if (!tp->packets_out && !tp->pending)
1209 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
1212 static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
1214 return (skb->next == (struct sk_buff*)&sk->write_queue);
1217 /* Push out any pending frames which were held back due to
1218 * TCP_CORK or attempt at coalescing tiny packets.
1219 * The socket must be locked by the caller.
1221 static __inline__ void __tcp_push_pending_frames(struct sock *sk,
1222 struct tcp_opt *tp,
1223 unsigned cur_mss,
1224 int nonagle)
1226 struct sk_buff *skb = tp->send_head;
1228 if (skb) {
1229 if (!tcp_skb_is_last(sk, skb))
1230 nonagle = 1;
1231 if (!tcp_snd_test(tp, skb, cur_mss, nonagle) ||
1232 tcp_write_xmit(sk))
1233 tcp_check_probe_timer(sk, tp);
1235 tcp_cwnd_validate(sk, tp);
1238 static __inline__ void tcp_push_pending_frames(struct sock *sk,
1239 struct tcp_opt *tp)
1241 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk), tp->nonagle);
1244 static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
1246 struct sk_buff *skb = tp->send_head;
1248 return (skb &&
1249 tcp_snd_test(tp, skb, tcp_current_mss(sk),
1250 tcp_skb_is_last(sk, skb) ? 1 : tp->nonagle));
1253 static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1255 tp->snd_wl1 = seq;
1258 static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1260 tp->snd_wl1 = seq;
1263 extern void tcp_destroy_sock(struct sock *sk);
1267 * Calculate(/check) TCP checksum
1269 static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
1270 unsigned long saddr, unsigned long daddr,
1271 unsigned long base)
1273 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1276 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
1278 return (unsigned short)csum_fold(csum_partial(skb->h.raw, skb->len, skb->csum));
1281 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
1283 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1284 __tcp_checksum_complete(skb);
1287 /* Prequeue for VJ style copy to user, combined with checksumming. */
1289 static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
1291 tp->ucopy.task = NULL;
1292 tp->ucopy.len = 0;
1293 tp->ucopy.memory = 0;
1294 skb_queue_head_init(&tp->ucopy.prequeue);
1297 /* Packet is added to VJ-style prequeue for processing in process
1298 * context, if a reader task is waiting. Apparently, this exciting
1299 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1300 * failed somewhere. Latency? Burstiness? Well, at least now we will
1301 * see, why it failed. 8)8) --ANK
1303 static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1305 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1307 if (tp->ucopy.task) {
1308 if ((tp->ucopy.memory += skb->truesize) <= (sk->rcvbuf<<1)) {
1309 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1310 if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1311 wake_up_interruptible(sk->sleep);
1312 if (!tcp_ack_scheduled(tp))
1313 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
1315 } else {
1316 NET_INC_STATS_BH(TCPPrequeueDropped);
1317 tp->ucopy.memory -= skb->truesize;
1318 __kfree_skb(skb);
1320 return 1;
1322 return 0;
1326 #undef STATE_TRACE
1328 #ifdef STATE_TRACE
1329 static char *statename[]={
1330 "Unused","Established","Syn Sent","Syn Recv",
1331 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1332 "Close Wait","Last ACK","Listen","Closing"
1334 #endif
1336 static __inline__ void tcp_set_state(struct sock *sk, int state)
1338 int oldstate = sk->state;
1340 switch (state) {
1341 case TCP_ESTABLISHED:
1342 if (oldstate != TCP_ESTABLISHED)
1343 TCP_INC_STATS(TcpCurrEstab);
1344 break;
1346 case TCP_CLOSE:
1347 sk->prot->unhash(sk);
1348 if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))
1349 tcp_put_port(sk);
1350 /* fall through */
1351 default:
1352 if (oldstate==TCP_ESTABLISHED)
1353 tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
1356 /* Change state AFTER socket is unhashed to avoid closed
1357 * socket sitting in hash tables.
1359 sk->state = state;
1361 #ifdef STATE_TRACE
1362 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1363 #endif
1366 static __inline__ void tcp_done(struct sock *sk)
1368 tcp_set_state(sk, TCP_CLOSE);
1369 tcp_clear_xmit_timers(sk);
1371 sk->shutdown = SHUTDOWN_MASK;
1373 if (!sk->dead)
1374 sk->state_change(sk);
1375 else
1376 tcp_destroy_sock(sk);
1379 static __inline__ void tcp_sack_reset(struct tcp_opt *tp)
1381 tp->dsack = 0;
1382 tp->eff_sacks = 0;
1383 tp->num_sacks = 0;
1386 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
1388 if (tp->tstamp_ok) {
1389 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1390 (TCPOPT_NOP << 16) |
1391 (TCPOPT_TIMESTAMP << 8) |
1392 TCPOLEN_TIMESTAMP);
1393 *ptr++ = htonl(tstamp);
1394 *ptr++ = htonl(tp->ts_recent);
1396 if (tp->eff_sacks) {
1397 struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;
1398 int this_sack;
1400 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1401 (TCPOPT_NOP << 16) |
1402 (TCPOPT_SACK << 8) |
1403 (TCPOLEN_SACK_BASE +
1404 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));
1405 for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {
1406 *ptr++ = htonl(sp[this_sack].start_seq);
1407 *ptr++ = htonl(sp[this_sack].end_seq);
1409 if (tp->dsack) {
1410 tp->dsack = 0;
1411 tp->eff_sacks--;
1416 /* Construct a tcp options header for a SYN or SYN_ACK packet.
1417 * If this is every changed make sure to change the definition of
1418 * MAX_SYN_SIZE to match the new maximum number of options that you
1419 * can generate.
1421 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1422 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1424 /* We always get an MSS option.
1425 * The option bytes which will be seen in normal data
1426 * packets should timestamps be used, must be in the MSS
1427 * advertised. But we subtract them from tp->mss_cache so
1428 * that calculations in tcp_sendmsg are simpler etc.
1429 * So account for this fact here if necessary. If we
1430 * don't do this correctly, as a receiver we won't
1431 * recognize data packets as being full sized when we
1432 * should, and thus we won't abide by the delayed ACK
1433 * rules correctly.
1434 * SACKs don't matter, we never delay an ACK when we
1435 * have any of those going out.
1437 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1438 if (ts) {
1439 if(sack)
1440 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1441 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1442 else
1443 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1444 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1445 *ptr++ = htonl(tstamp); /* TSVAL */
1446 *ptr++ = htonl(ts_recent); /* TSECR */
1447 } else if(sack)
1448 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1449 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1450 if (offer_wscale)
1451 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1454 /* Determine a window scaling and initial window to offer.
1455 * Based on the assumption that the given amount of space
1456 * will be offered. Store the results in the tp structure.
1457 * NOTE: for smooth operation initial space offering should
1458 * be a multiple of mss if possible. We assume here that mss >= 1.
1459 * This MUST be enforced by all callers.
1461 static inline void tcp_select_initial_window(int space, __u32 mss,
1462 __u32 *rcv_wnd,
1463 __u32 *window_clamp,
1464 int wscale_ok,
1465 __u8 *rcv_wscale)
1467 /* If no clamp set the clamp to the max possible scaled window */
1468 if (*window_clamp == 0)
1469 (*window_clamp) = (65535<<14);
1470 space = min(*window_clamp,space);
1472 /* Quantize space offering to a multiple of mss if possible. */
1473 if (space > mss)
1474 space = (space/mss)*mss;
1476 /* NOTE: offering an initial window larger than 32767
1477 * will break some buggy TCP stacks. We try to be nice.
1478 * If we are not window scaling, then this truncates
1479 * our initial window offering to 32k. There should also
1480 * be a sysctl option to stop being nice.
1482 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
1483 (*rcv_wscale) = 0;
1484 if (wscale_ok) {
1485 /* See RFC1323 for an explanation of the limit to 14 */
1486 while (space > 65535 && (*rcv_wscale) < 14) {
1487 space >>= 1;
1488 (*rcv_wscale)++;
1490 if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&
1491 space - max((space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
1492 (*rcv_wscale)--;
1495 /* Set initial window to value enough for senders,
1496 * following RFC1414. Senders, not following this RFC,
1497 * will be satisfied with 2.
1499 if (mss > (1<<*rcv_wscale)) {
1500 int init_cwnd = 4;
1501 if (mss > 1460*3)
1502 init_cwnd = 2;
1503 else if (mss > 1460)
1504 init_cwnd = 3;
1505 if (*rcv_wnd > init_cwnd*mss)
1506 *rcv_wnd = init_cwnd*mss;
1508 /* Set the clamp no higher than max representable value */
1509 (*window_clamp) = min(65535<<(*rcv_wscale),*window_clamp);
1512 static inline int tcp_win_from_space(int space)
1514 return sysctl_tcp_adv_win_scale<=0 ?
1515 (space>>(-sysctl_tcp_adv_win_scale)) :
1516 space - (space>>sysctl_tcp_adv_win_scale);
1519 /* Note: caller must be prepared to deal with negative returns */
1520 static inline int tcp_space(struct sock *sk)
1522 return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc));
1525 static inline int tcp_full_space( struct sock *sk)
1527 return tcp_win_from_space(sk->rcvbuf);
1530 static inline void tcp_acceptq_removed(struct sock *sk)
1532 sk->ack_backlog--;
1535 static inline void tcp_acceptq_added(struct sock *sk)
1537 sk->ack_backlog++;
1540 static inline int tcp_acceptq_is_full(struct sock *sk)
1542 return sk->ack_backlog > sk->max_ack_backlog;
1545 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
1546 struct sock *child)
1548 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1550 req->sk = child;
1551 tcp_acceptq_added(sk);
1553 if (!tp->accept_queue_tail) {
1554 tp->accept_queue = req;
1555 } else {
1556 tp->accept_queue_tail->dl_next = req;
1558 tp->accept_queue_tail = req;
1559 req->dl_next = NULL;
1562 struct tcp_listen_opt
1564 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1565 int qlen;
1566 int qlen_young;
1567 int clock_hand;
1568 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1571 static inline void
1572 tcp_synq_removed(struct sock *sk, struct open_request *req)
1574 struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
1576 if (--lopt->qlen == 0)
1577 tcp_delete_keepalive_timer(sk);
1578 if (req->retrans == 0)
1579 lopt->qlen_young--;
1582 static inline void tcp_synq_added(struct sock *sk)
1584 struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
1586 if (lopt->qlen++ == 0)
1587 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1588 lopt->qlen_young++;
1591 static inline int tcp_synq_len(struct sock *sk)
1593 return sk->tp_pinfo.af_tcp.listen_opt->qlen;
1596 static inline int tcp_synq_young(struct sock *sk)
1598 return sk->tp_pinfo.af_tcp.listen_opt->qlen_young;
1601 static inline int tcp_synq_is_full(struct sock *sk)
1603 return tcp_synq_len(sk)>>sk->tp_pinfo.af_tcp.listen_opt->max_qlen_log;
1606 static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
1607 struct open_request **prev)
1609 write_lock(&tp->syn_wait_lock);
1610 *prev = req->dl_next;
1611 write_unlock(&tp->syn_wait_lock);
1614 static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
1615 struct open_request **prev)
1617 tcp_synq_unlink(&sk->tp_pinfo.af_tcp, req, prev);
1618 tcp_synq_removed(sk, req);
1619 tcp_openreq_free(req);
1622 static __inline__ void tcp_openreq_init(struct open_request *req,
1623 struct tcp_opt *tp,
1624 struct sk_buff *skb)
1626 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1627 req->rcv_isn = TCP_SKB_CB(skb)->seq;
1628 req->mss = tp->mss_clamp;
1629 req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
1630 req->tstamp_ok = tp->tstamp_ok;
1631 req->sack_ok = tp->sack_ok;
1632 req->snd_wscale = tp->snd_wscale;
1633 req->wscale_ok = tp->wscale_ok;
1634 req->acked = 0;
1635 req->ecn_ok = 0;
1636 req->rmt_port = skb->h.th->source;
1639 #define TCP_MEM_QUANTUM ((int)PAGE_SIZE)
1641 static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
1643 sk->tp_pinfo.af_tcp.queue_shrunk = 1;
1644 sk->wmem_queued -= skb->truesize;
1645 sk->forward_alloc += skb->truesize;
1646 __kfree_skb(skb);
1649 static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
1651 sk->wmem_queued += skb->truesize;
1652 sk->forward_alloc -= skb->truesize;
1655 extern void __tcp_mem_reclaim(struct sock *sk);
1656 extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
1658 static inline void tcp_mem_reclaim(struct sock *sk)
1660 if (sk->forward_alloc >= TCP_MEM_QUANTUM)
1661 __tcp_mem_reclaim(sk);
1664 static inline void tcp_enter_memory_pressure(void)
1666 if (!tcp_memory_pressure) {
1667 NET_INC_STATS(TCPMemoryPressures);
1668 tcp_memory_pressure = 1;
1672 static inline void tcp_moderate_sndbuf(struct sock *sk)
1674 if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {
1675 sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2);
1676 sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF);
1680 static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp)
1682 struct sk_buff *skb = alloc_skb(size, gfp);
1684 if (skb) {
1685 if (sk->forward_alloc >= (int)skb->truesize ||
1686 tcp_mem_schedule(sk, skb->truesize, 0))
1687 return skb;
1688 __kfree_skb(skb);
1689 } else {
1690 tcp_enter_memory_pressure();
1691 tcp_moderate_sndbuf(sk);
1693 return NULL;
1696 static inline void tcp_writequeue_purge(struct sock *sk)
1698 struct sk_buff *skb;
1700 while ((skb = __skb_dequeue(&sk->write_queue)) != NULL)
1701 tcp_free_skb(sk, skb);
1702 tcp_mem_reclaim(sk);
1705 extern void tcp_rfree(struct sk_buff *skb);
1707 static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
1709 skb->sk = sk;
1710 skb->destructor = tcp_rfree;
1711 atomic_add(skb->truesize, &sk->rmem_alloc);
1712 sk->forward_alloc -= skb->truesize;
1715 extern void tcp_listen_wlock(void);
1717 /* - We may sleep inside this lock.
1718 * - If sleeping is not required (or called from BH),
1719 * use plain read_(un)lock(&tcp_lhash_lock).
1722 static inline void tcp_listen_lock(void)
1724 /* read_lock synchronizes to candidates to writers */
1725 read_lock(&tcp_lhash_lock);
1726 atomic_inc(&tcp_lhash_users);
1727 read_unlock(&tcp_lhash_lock);
1730 static inline void tcp_listen_unlock(void)
1732 if (atomic_dec_and_test(&tcp_lhash_users))
1733 wake_up(&tcp_lhash_wait);
1736 static inline int keepalive_intvl_when(struct tcp_opt *tp)
1738 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1741 static inline int keepalive_time_when(struct tcp_opt *tp)
1743 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1746 static inline int tcp_fin_time(struct tcp_opt *tp)
1748 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
1750 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
1751 fin_timeout = (tp->rto<<2) - (tp->rto>>1);
1753 return fin_timeout;
1756 static inline int tcp_paws_check(struct tcp_opt *tp, int rst)
1758 if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
1759 return 0;
1760 if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
1761 return 0;
1763 /* RST segments are not recommended to carry timestamp,
1764 and, if they do, it is recommended to ignore PAWS because
1765 "their cleanup function should take precedence over timestamps."
1766 Certainly, it is mistake. It is necessary to understand the reasons
1767 of this constraint to relax it: if peer reboots, clock may go
1768 out-of-sync and half-open connections will not be reset.
1769 Actually, the problem would be not existing if all
1770 the implementations followed draft about maintaining clock
1771 via reboots. Linux-2.2 DOES NOT!
1773 However, we can relax time bounds for RST segments to MSL.
1775 if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)
1776 return 0;
1777 return 1;
1780 #define TCP_CHECK_TIMER(sk) do { } while (0);
1782 #endif /* _TCP_H */