kernel: Backport from 2.6 [NET]: fix bugs in 'Whether sock accept queue' is full
[tomato.git] / release / src / linux / linux / include / net / tcp.h
blob3f84ca5898e64cd0a17d864a9441e2841682a49f
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * Definitions for the TCP module.
8 * Version: @(#)tcp.h 1.0.5 05/23/93
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License
15 * as published by the Free Software Foundation; either version
16 * 2 of the License, or (at your option) any later version.
18 #ifndef _TCP_H
19 #define _TCP_H
21 #define TCP_DEBUG 1
22 #define FASTRETRANS_DEBUG 1
24 /* Cancel timers, when they are not required. */
25 #undef TCP_CLEAR_TIMERS
27 #include <linux/config.h>
28 #include <linux/tcp.h>
29 #include <linux/slab.h>
30 #include <linux/cache.h>
31 #include <net/checksum.h>
32 #include <net/sock.h>
33 #include <net/snmp.h>
35 /* This is for all connections with a full identity, no wildcards.
36 * New scheme, half the table is for TIME_WAIT, the other half is
37 * for the rest. I'll experiment with dynamic table growth later.
39 struct tcp_ehash_bucket {
40 rwlock_t lock;
41 struct sock *chain;
42 } __attribute__((__aligned__(8)));
44 /* This is for listening sockets, thus all sockets which possess wildcards. */
45 #define TCP_LHTABLE_SIZE 32 /* Yes, really, this is all you need. */
47 /* There are a few simple rules, which allow for local port reuse by
48 * an application. In essence:
50 * 1) Sockets bound to different interfaces may share a local port.
51 * Failing that, goto test 2.
52 * 2) If all sockets have sk->reuse set, and none of them are in
53 * TCP_LISTEN state, the port may be shared.
54 * Failing that, goto test 3.
55 * 3) If all sockets are bound to a specific sk->rcv_saddr local
56 * address, and none of them are the same, the port may be
57 * shared.
58 * Failing this, the port cannot be shared.
60 * The interesting point, is test #2. This is what an FTP server does
61 * all day. To optimize this case we use a specific flag bit defined
62 * below. As we add sockets to a bind bucket list, we perform a
63 * check of: (newsk->reuse && (newsk->state != TCP_LISTEN))
64 * As long as all sockets added to a bind bucket pass this test,
65 * the flag bit will be set.
66 * The resulting situation is that tcp_v[46]_verify_bind() can just check
67 * for this flag bit, if it is set and the socket trying to bind has
68 * sk->reuse set, we don't even have to walk the owners list at all,
69 * we return that it is ok to bind this socket to the requested local port.
71 * Sounds like a lot of work, but it is worth it. In a more naive
72 * implementation (ie. current FreeBSD etc.) the entire list of ports
73 * must be walked for each data port opened by an ftp server. Needless
74 * to say, this does not scale at all. With a couple thousand FTP
75 * users logged onto your box, isn't it nice to know that new data
76 * ports are created in O(1) time? I thought so. ;-) -DaveM
78 struct tcp_bind_bucket {
79 unsigned short port;
80 signed short fastreuse;
81 struct tcp_bind_bucket *next;
82 struct sock *owners;
83 struct tcp_bind_bucket **pprev;
86 struct tcp_bind_hashbucket {
87 spinlock_t lock;
88 struct tcp_bind_bucket *chain;
91 extern struct tcp_hashinfo {
92 /* This is for sockets with full identity only. Sockets here will
93 * always be without wildcards and will have the following invariant:
95 * TCP_ESTABLISHED <= sk->state < TCP_CLOSE
97 * First half of the table is for sockets not in TIME_WAIT, second half
98 * is for TIME_WAIT sockets only.
100 struct tcp_ehash_bucket *__tcp_ehash;
102 /* Ok, let's try this, I give up, we do need a local binding
103 * TCP hash as well as the others for fast bind/connect.
105 struct tcp_bind_hashbucket *__tcp_bhash;
107 int __tcp_bhash_size;
108 int __tcp_ehash_size;
110 /* All sockets in TCP_LISTEN state will be in here. This is the only
111 * table where wildcard'd TCP sockets can exist. Hash function here
112 * is just local port number.
114 struct sock *__tcp_listening_hash[TCP_LHTABLE_SIZE];
116 /* All the above members are written once at bootup and
117 * never written again _or_ are predominantly read-access.
119 * Now align to a new cache line as all the following members
120 * are often dirty.
122 rwlock_t __tcp_lhash_lock ____cacheline_aligned;
123 atomic_t __tcp_lhash_users;
124 wait_queue_head_t __tcp_lhash_wait;
125 spinlock_t __tcp_portalloc_lock;
126 } tcp_hashinfo;
128 #define tcp_ehash (tcp_hashinfo.__tcp_ehash)
129 #define tcp_bhash (tcp_hashinfo.__tcp_bhash)
130 #define tcp_ehash_size (tcp_hashinfo.__tcp_ehash_size)
131 #define tcp_bhash_size (tcp_hashinfo.__tcp_bhash_size)
132 #define tcp_listening_hash (tcp_hashinfo.__tcp_listening_hash)
133 #define tcp_lhash_lock (tcp_hashinfo.__tcp_lhash_lock)
134 #define tcp_lhash_users (tcp_hashinfo.__tcp_lhash_users)
135 #define tcp_lhash_wait (tcp_hashinfo.__tcp_lhash_wait)
136 #define tcp_portalloc_lock (tcp_hashinfo.__tcp_portalloc_lock)
138 extern kmem_cache_t *tcp_bucket_cachep;
139 extern struct tcp_bind_bucket *tcp_bucket_create(struct tcp_bind_hashbucket *head,
140 unsigned short snum);
141 extern void tcp_bucket_unlock(struct sock *sk);
142 extern int tcp_port_rover;
143 extern struct sock *tcp_v4_lookup_listener(u32 addr, unsigned short hnum, int dif);
145 /* These are AF independent. */
146 static __inline__ int tcp_bhashfn(__u16 lport)
148 return (lport & (tcp_bhash_size - 1));
151 /* This is a TIME_WAIT bucket. It works around the memory consumption
152 * problems of sockets in such a state on heavily loaded servers, but
153 * without violating the protocol specification.
155 struct tcp_tw_bucket {
156 /* These _must_ match the beginning of struct sock precisely.
157 * XXX Yes I know this is gross, but I'd have to edit every single
158 * XXX networking file if I created a "struct sock_header". -DaveM
160 __u32 daddr;
161 __u32 rcv_saddr;
162 __u16 dport;
163 unsigned short num;
164 int bound_dev_if;
165 struct sock *next;
166 struct sock **pprev;
167 struct sock *bind_next;
168 struct sock **bind_pprev;
169 unsigned char state,
170 substate; /* "zapped" is replaced with "substate" */
171 __u16 sport;
172 unsigned short family;
173 unsigned char reuse,
174 rcv_wscale; /* It is also TW bucket specific */
175 atomic_t refcnt;
177 /* And these are ours. */
178 int hashent;
179 int timeout;
180 __u32 rcv_nxt;
181 __u32 snd_nxt;
182 __u32 rcv_wnd;
183 __u32 ts_recent;
184 long ts_recent_stamp;
185 unsigned long ttd;
186 struct tcp_bind_bucket *tb;
187 struct tcp_tw_bucket *next_death;
188 struct tcp_tw_bucket **pprev_death;
190 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
191 struct in6_addr v6_daddr;
192 struct in6_addr v6_rcv_saddr;
193 #endif
194 #ifdef CONFIG_TCP_RFC2385
195 __u8 *md5_key;
196 __u8 md5_keylen;
197 #endif
200 extern kmem_cache_t *tcp_timewait_cachep;
202 static inline void tcp_tw_put(struct tcp_tw_bucket *tw)
204 if (atomic_dec_and_test(&tw->refcnt)) {
205 #ifdef INET_REFCNT_DEBUG
206 printk(KERN_DEBUG "tw_bucket %p released\n", tw);
207 #endif
208 #ifdef CONFIG_TCP_RFC2385
209 /* Free the memory used for any md5 key */
210 if (tw->md5_key) {
211 kfree (tw->md5_key);
212 tw->md5_key = NULL;
213 tw->md5_keylen = 0;
215 #endif
216 kmem_cache_free(tcp_timewait_cachep, tw);
220 extern atomic_t tcp_orphan_count;
221 extern int tcp_tw_count;
222 extern void tcp_time_wait(struct sock *sk, int state, int timeo);
223 extern void tcp_timewait_kill(struct tcp_tw_bucket *tw);
224 extern void tcp_tw_schedule(struct tcp_tw_bucket *tw, int timeo);
225 extern void tcp_tw_deschedule(struct tcp_tw_bucket *tw);
228 /* Socket demux engine toys. */
229 #ifdef __BIG_ENDIAN
230 #define TCP_COMBINED_PORTS(__sport, __dport) \
231 (((__u32)(__sport)<<16) | (__u32)(__dport))
232 #else /* __LITTLE_ENDIAN */
233 #define TCP_COMBINED_PORTS(__sport, __dport) \
234 (((__u32)(__dport)<<16) | (__u32)(__sport))
235 #endif
237 #if (BITS_PER_LONG == 64)
238 #ifdef __BIG_ENDIAN
239 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
240 __u64 __name = (((__u64)(__saddr))<<32)|((__u64)(__daddr));
241 #else /* __LITTLE_ENDIAN */
242 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr) \
243 __u64 __name = (((__u64)(__daddr))<<32)|((__u64)(__saddr));
244 #endif /* __BIG_ENDIAN */
245 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
246 (((*((__u64 *)&((__sk)->daddr)))== (__cookie)) && \
247 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
248 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
249 #else /* 32-bit arch */
250 #define TCP_V4_ADDR_COOKIE(__name, __saddr, __daddr)
251 #define TCP_IPV4_MATCH(__sk, __cookie, __saddr, __daddr, __ports, __dif)\
252 (((__sk)->daddr == (__saddr)) && \
253 ((__sk)->rcv_saddr == (__daddr)) && \
254 ((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
255 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
256 #endif /* 64-bit arch */
258 #define TCP_IPV6_MATCH(__sk, __saddr, __daddr, __ports, __dif) \
259 (((*((__u32 *)&((__sk)->dport)))== (__ports)) && \
260 ((__sk)->family == AF_INET6) && \
261 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.daddr, (__saddr)) && \
262 !ipv6_addr_cmp(&(__sk)->net_pinfo.af_inet6.rcv_saddr, (__daddr)) && \
263 (!((__sk)->bound_dev_if) || ((__sk)->bound_dev_if == (__dif))))
265 /* These can have wildcards, don't try too hard. */
266 static __inline__ int tcp_lhashfn(unsigned short num)
268 return num & (TCP_LHTABLE_SIZE - 1);
271 static __inline__ int tcp_sk_listen_hashfn(struct sock *sk)
273 return tcp_lhashfn(sk->num);
276 #define MAX_TCP_HEADER (128 + MAX_HEADER)
279 * Never offer a window over 32767 without using window scaling. Some
280 * poor stacks do signed 16bit maths!
282 #define MAX_TCP_WINDOW 32767U
284 /* Minimal accepted MSS. It is (60+60+8) - (20+20). */
285 #define TCP_MIN_MSS 88U
287 /* Minimal RCV_MSS. */
288 #define TCP_MIN_RCVMSS 536U
290 /* After receiving this amount of duplicate ACKs fast retransmit starts. */
291 #define TCP_FASTRETRANS_THRESH 3
293 /* Maximal reordering. */
294 #define TCP_MAX_REORDERING 127
296 /* Maximal number of ACKs sent quickly to accelerate slow-start. */
297 #define TCP_MAX_QUICKACKS 16U
299 /* urg_data states */
300 #define TCP_URG_VALID 0x0100
301 #define TCP_URG_NOTYET 0x0200
302 #define TCP_URG_READ 0x0400
304 #define TCP_RETR1 3 /*
305 * This is how many retries it does before it
306 * tries to figure out if the gateway is
307 * down. Minimal RFC value is 3; it corresponds
308 * to ~3sec-8min depending on RTO.
311 #define TCP_RETR2 15 /*
312 * This should take at least
313 * 90 minutes to time out.
314 * RFC1122 says that the limit is 100 sec.
315 * 15 is ~13-30min depending on RTO.
318 #define TCP_SYN_RETRIES 5 /* number of times to retry active opening a
319 * connection: ~180sec is RFC minumum */
321 #define TCP_SYNACK_RETRIES 5 /* number of times to retry passive opening a
322 * connection: ~180sec is RFC minumum */
325 #define TCP_ORPHAN_RETRIES 7 /* number of times to retry on an orphaned
326 * socket. 7 is ~50sec-16min.
330 #define TCP_TIMEWAIT_LEN (60*HZ) /* how long to wait to destroy TIME-WAIT
331 * state, about 60 seconds */
332 #define TCP_FIN_TIMEOUT TCP_TIMEWAIT_LEN
333 /* BSD style FIN_WAIT2 deadlock breaker.
334 * It used to be 3min, new value is 60sec,
335 * to combine FIN-WAIT-2 timeout with
336 * TIME-WAIT timer.
339 #define TCP_DELACK_MAX ((unsigned)(HZ/5)) /* maximal time to delay before sending an ACK */
340 #if HZ >= 100
341 #define TCP_DELACK_MIN ((unsigned)(HZ/25)) /* minimal time to delay before sending an ACK */
342 #define TCP_ATO_MIN ((unsigned)(HZ/25))
343 #else
344 #define TCP_DELACK_MIN 4U
345 #define TCP_ATO_MIN 4U
346 #endif
347 #define TCP_RTO_MAX ((unsigned)(120*HZ))
348 #define TCP_RTO_MIN ((unsigned)(HZ/5))
349 #define TCP_TIMEOUT_INIT ((unsigned)(3*HZ)) /* RFC 1122 initial RTO value */
351 #define TCP_RESOURCE_PROBE_INTERVAL ((unsigned)(HZ/2U)) /* Maximal interval between probes
352 * for local resources.
355 #define TCP_KEEPALIVE_TIME (120*60*HZ) /* two hours */
356 #define TCP_KEEPALIVE_PROBES 9 /* Max of 9 keepalive probes */
357 #define TCP_KEEPALIVE_INTVL (75*HZ)
359 #define MAX_TCP_KEEPIDLE 32767
360 #define MAX_TCP_KEEPINTVL 32767
361 #define MAX_TCP_KEEPCNT 127
362 #define MAX_TCP_SYNCNT 127
364 /* TIME_WAIT reaping mechanism. */
365 #define TCP_TWKILL_SLOTS 8 /* Please keep this a power of 2. */
366 #define TCP_TWKILL_PERIOD (TCP_TIMEWAIT_LEN/TCP_TWKILL_SLOTS)
368 #define TCP_SYNQ_INTERVAL (HZ/5) /* Period of SYNACK timer */
369 #define TCP_SYNQ_HSIZE 512 /* Size of SYNACK hash table */
371 #define TCP_PAWS_24DAYS (60 * 60 * 24 * 24)
372 #define TCP_PAWS_MSL 60 /* Per-host timestamps are invalidated
373 * after this time. It should be equal
374 * (or greater than) TCP_TIMEWAIT_LEN
375 * to provide reliability equal to one
376 * provided by timewait state.
378 #define TCP_PAWS_WINDOW 1 /* Replay window for per-host
379 * timestamps. It must be less than
380 * minimal timewait lifetime.
383 #define TCP_TW_RECYCLE_SLOTS_LOG 5
384 #define TCP_TW_RECYCLE_SLOTS (1<<TCP_TW_RECYCLE_SLOTS_LOG)
386 /* If time > 4sec, it is "slow" path, no recycling is required,
387 so that we select tick to get range about 4 seconds.
390 #if HZ <= 16 || HZ > 4096
391 # error Unsupported: HZ <= 16 or HZ > 4096
392 #elif HZ <= 32
393 # define TCP_TW_RECYCLE_TICK (5+2-TCP_TW_RECYCLE_SLOTS_LOG)
394 #elif HZ <= 64
395 # define TCP_TW_RECYCLE_TICK (6+2-TCP_TW_RECYCLE_SLOTS_LOG)
396 #elif HZ <= 128
397 # define TCP_TW_RECYCLE_TICK (7+2-TCP_TW_RECYCLE_SLOTS_LOG)
398 #elif HZ <= 256
399 # define TCP_TW_RECYCLE_TICK (8+2-TCP_TW_RECYCLE_SLOTS_LOG)
400 #elif HZ <= 512
401 # define TCP_TW_RECYCLE_TICK (9+2-TCP_TW_RECYCLE_SLOTS_LOG)
402 #elif HZ <= 1024
403 # define TCP_TW_RECYCLE_TICK (10+2-TCP_TW_RECYCLE_SLOTS_LOG)
404 #elif HZ <= 2048
405 # define TCP_TW_RECYCLE_TICK (11+2-TCP_TW_RECYCLE_SLOTS_LOG)
406 #else
407 # define TCP_TW_RECYCLE_TICK (12+2-TCP_TW_RECYCLE_SLOTS_LOG)
408 #endif
410 #define BICTCP_BETA_SCALE 1024 /* Scale factor beta calculation
411 * max_cwnd = snd_cwnd * beta
413 #define BICTCP_MAX_INCREMENT 32 /*
414 * Limit on the amount of
415 * increment allowed during
416 * binary search.
418 #define BICTCP_FUNC_OF_MIN_INCR 11 /*
419 * log(B/Smin)/log(B/(B-1))+1,
420 * Smin:min increment
421 * B:log factor
423 #define BICTCP_B 4 /*
424 * In binary search,
425 * go to point (max+min)/N
429 * TCP option
432 #define TCPOPT_NOP 1 /* Padding */
433 #define TCPOPT_EOL 0 /* End of options */
434 #define TCPOPT_MSS 2 /* Segment size negotiating */
435 #define TCPOPT_WINDOW 3 /* Window scaling */
436 #define TCPOPT_SACK_PERM 4 /* SACK Permitted */
437 #define TCPOPT_SACK 5 /* SACK Block */
438 #define TCPOPT_TIMESTAMP 8 /* Better RTT estimations/PAWS */
439 #define TCPOPT_RFC2385 19 /* MD5 protection */
442 * TCP option lengths
445 #define TCPOLEN_MSS 4
446 #define TCPOLEN_WINDOW 3
447 #define TCPOLEN_SACK_PERM 2
448 #define TCPOLEN_TIMESTAMP 10
449 #define TCPOLEN_RFC2385 18
451 /* But this is what stacks really send out. */
452 #define TCPOLEN_TSTAMP_ALIGNED 12
453 #define TCPOLEN_WSCALE_ALIGNED 4
454 #define TCPOLEN_SACKPERM_ALIGNED 4
455 #define TCPOLEN_SACK_BASE 2
456 #define TCPOLEN_SACK_BASE_ALIGNED 4
457 #define TCPOLEN_SACK_PERBLOCK 8
458 #define TCPOLEN_RFC2385_ALIGNED 20
460 #define TCP_TIME_RETRANS 1 /* Retransmit timer */
461 #define TCP_TIME_DACK 2 /* Delayed ack timer */
462 #define TCP_TIME_PROBE0 3 /* Zero window probe timer */
463 #define TCP_TIME_KEEPOPEN 4 /* Keepalive timer */
465 /* sysctl variables for tcp */
466 extern int sysctl_max_syn_backlog;
467 extern int sysctl_tcp_timestamps;
468 extern int sysctl_tcp_window_scaling;
469 extern int sysctl_tcp_sack;
470 extern int sysctl_tcp_fin_timeout;
471 extern int sysctl_tcp_tw_recycle;
472 extern int sysctl_tcp_keepalive_time;
473 extern int sysctl_tcp_keepalive_probes;
474 extern int sysctl_tcp_keepalive_intvl;
475 extern int sysctl_tcp_syn_retries;
476 extern int sysctl_tcp_synack_retries;
477 extern int sysctl_tcp_retries1;
478 extern int sysctl_tcp_retries2;
479 extern int sysctl_tcp_orphan_retries;
480 extern int sysctl_tcp_syncookies;
481 extern int sysctl_tcp_retrans_collapse;
482 extern int sysctl_tcp_stdurg;
483 extern int sysctl_tcp_rfc1337;
484 extern int sysctl_tcp_abort_on_overflow;
485 extern int sysctl_tcp_max_orphans;
486 extern int sysctl_tcp_max_tw_buckets;
487 extern int sysctl_tcp_fack;
488 extern int sysctl_tcp_reordering;
489 extern int sysctl_tcp_ecn;
490 extern int sysctl_tcp_dsack;
491 extern int sysctl_tcp_mem[3];
492 extern int sysctl_tcp_wmem[3];
493 extern int sysctl_tcp_rmem[3];
494 extern int sysctl_tcp_app_win;
495 extern int sysctl_tcp_adv_win_scale;
496 extern int sysctl_tcp_tw_reuse;
497 extern int sysctl_tcp_frto;
498 extern int sysctl_tcp_low_latency;
499 extern int sysctl_tcp_westwood;
500 extern int sysctl_tcp_vegas_cong_avoid;
501 extern int sysctl_tcp_vegas_alpha;
502 extern int sysctl_tcp_vegas_beta;
503 extern int sysctl_tcp_vegas_gamma;
504 extern int sysctl_tcp_nometrics_save;
505 extern int sysctl_tcp_bic;
506 extern int sysctl_tcp_bic_fast_convergence;
507 extern int sysctl_tcp_bic_low_window;
508 extern int sysctl_tcp_bic_beta;
509 extern int sysctl_tcp_default_win_scale;
510 extern int sysctl_tcp_moderate_rcvbuf;
512 extern atomic_t tcp_memory_allocated;
513 extern atomic_t tcp_sockets_allocated;
514 extern int tcp_memory_pressure;
516 struct open_request;
518 struct or_calltable {
519 int family;
520 int (*rtx_syn_ack) (struct sock *sk, struct open_request *req, struct dst_entry*);
521 void (*send_ack) (struct sk_buff *skb, struct open_request *req);
522 void (*destructor) (struct open_request *req);
523 void (*send_reset) (struct sock *sk, struct sk_buff *skb);
526 struct tcp_v4_open_req {
527 __u32 loc_addr;
528 __u32 rmt_addr;
529 struct ip_options *opt;
532 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
533 struct tcp_v6_open_req {
534 struct in6_addr loc_addr;
535 struct in6_addr rmt_addr;
536 struct sk_buff *pktopts;
537 int iif;
539 #endif
541 /* this structure is too big */
542 struct open_request {
543 struct open_request *dl_next; /* Must be first member! */
544 __u32 rcv_isn;
545 __u32 snt_isn;
546 __u16 rmt_port;
547 __u16 mss;
548 __u8 retrans;
549 __u8 __pad;
550 __u16 snd_wscale : 4,
551 rcv_wscale : 4,
552 tstamp_ok : 1,
553 sack_ok : 1,
554 wscale_ok : 1,
555 ecn_ok : 1,
556 acked : 1;
557 /* The following two fields can be easily recomputed I think -AK */
558 __u32 window_clamp; /* window clamp at creation time */
559 __u32 rcv_wnd; /* rcv_wnd offered first time */
560 __u32 ts_recent;
561 unsigned long expires;
562 struct or_calltable *class;
563 struct sock *sk;
564 union {
565 struct tcp_v4_open_req v4_req;
566 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
567 struct tcp_v6_open_req v6_req;
568 #endif
569 } af;
572 /* SLAB cache for open requests. */
573 extern kmem_cache_t *tcp_openreq_cachep;
575 #define tcp_openreq_alloc() kmem_cache_alloc(tcp_openreq_cachep, SLAB_ATOMIC)
576 #define tcp_openreq_fastfree(req) kmem_cache_free(tcp_openreq_cachep, req)
578 static inline void tcp_openreq_free(struct open_request *req)
580 req->class->destructor(req);
581 tcp_openreq_fastfree(req);
584 #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
585 #define TCP_INET_FAMILY(fam) ((fam) == AF_INET)
586 #else
587 #define TCP_INET_FAMILY(fam) 1
588 #endif
591 * Pointers to address related TCP functions
592 * (i.e. things that depend on the address family)
594 * BUGGG_FUTURE: all the idea behind this struct is wrong.
595 * It mixes socket frontend with transport function.
596 * With port sharing between IPv6/v4 it gives the only advantage,
597 * only poor IPv6 needs to permanently recheck, that it
598 * is still IPv6 8)8) It must be cleaned up as soon as possible.
599 * --ANK (980802)
602 struct tcp_func {
603 int (*queue_xmit) (struct sk_buff *skb,
604 int ipfragok);
606 void (*send_check) (struct sock *sk,
607 struct tcphdr *th,
608 int len,
609 struct sk_buff *skb);
611 int (*rebuild_header) (struct sock *sk);
613 int (*conn_request) (struct sock *sk,
614 struct sk_buff *skb);
616 struct sock * (*syn_recv_sock) (struct sock *sk,
617 struct sk_buff *skb,
618 struct open_request *req,
619 struct dst_entry *dst);
621 int (*remember_stamp) (struct sock *sk);
623 __u16 net_header_len;
625 int (*setsockopt) (struct sock *sk,
626 int level,
627 int optname,
628 char *optval,
629 int optlen);
631 int (*getsockopt) (struct sock *sk,
632 int level,
633 int optname,
634 char *optval,
635 int *optlen);
638 void (*addr2sockaddr) (struct sock *sk,
639 struct sockaddr *);
641 int sockaddr_len;
645 * The next routines deal with comparing 32 bit unsigned ints
646 * and worry about wraparound (automatic with unsigned arithmetic).
649 static inline int before(__u32 seq1, __u32 seq2)
651 return (__s32)(seq1-seq2) < 0;
654 static inline int after(__u32 seq1, __u32 seq2)
656 return (__s32)(seq2-seq1) < 0;
660 /* is s2<=s1<=s3 ? */
661 static inline int between(__u32 seq1, __u32 seq2, __u32 seq3)
663 return seq3 - seq2 >= seq1 - seq2;
667 extern struct proto tcp_prot;
669 extern struct tcp_mib tcp_statistics[NR_CPUS*2];
670 #define TCP_INC_STATS(field) SNMP_INC_STATS(tcp_statistics, field)
671 #define TCP_INC_STATS_BH(field) SNMP_INC_STATS_BH(tcp_statistics, field)
672 #define TCP_INC_STATS_USER(field) SNMP_INC_STATS_USER(tcp_statistics, field)
673 #define TCP_ADD_STATS_BH(field, val) SNMP_ADD_STATS_BH(tcp_statistics, field, val)
674 #define TCP_ADD_STATS_USER(field, val) SNMP_ADD_STATS_USER(tcp_statistics, field, val)
676 extern void tcp_put_port(struct sock *sk);
677 extern void __tcp_put_port(struct sock *sk);
678 extern void tcp_inherit_port(struct sock *sk, struct sock *child);
680 extern void tcp_v4_err(struct sk_buff *skb, u32);
682 extern void tcp_shutdown (struct sock *sk, int how);
684 extern int tcp_v4_rcv(struct sk_buff *skb);
686 extern int tcp_v4_remember_stamp(struct sock *sk);
688 extern int tcp_v4_tw_remember_stamp(struct tcp_tw_bucket *tw);
690 extern int tcp_sendmsg(struct sock *sk, struct msghdr *msg, int size);
691 extern ssize_t tcp_sendpage(struct socket *sock, struct page *page, int offset, size_t size, int flags);
693 extern int tcp_ioctl(struct sock *sk,
694 int cmd,
695 unsigned long arg);
697 extern int tcp_rcv_state_process(struct sock *sk,
698 struct sk_buff *skb,
699 struct tcphdr *th,
700 unsigned len);
702 extern int tcp_rcv_established(struct sock *sk,
703 struct sk_buff *skb,
704 struct tcphdr *th,
705 unsigned len);
707 extern void tcp_rcv_space_adjust(struct sock *sk);
709 enum tcp_ack_state_t
711 TCP_ACK_SCHED = 1,
712 TCP_ACK_TIMER = 2,
713 TCP_ACK_PUSHED = 4,
714 TCP_ACK_PUSHED2 = 8
717 static inline void tcp_schedule_ack(struct tcp_opt *tp)
719 tp->ack.pending |= TCP_ACK_SCHED;
722 static inline int tcp_ack_scheduled(struct tcp_opt *tp)
724 return tp->ack.pending&TCP_ACK_SCHED;
727 static __inline__ void tcp_dec_quickack_mode(struct tcp_opt *tp)
729 if (tp->ack.quick && --tp->ack.quick == 0) {
730 /* Leaving quickack mode we deflate ATO. */
731 tp->ack.ato = TCP_ATO_MIN;
735 extern void tcp_enter_quickack_mode(struct tcp_opt *tp);
737 static __inline__ void tcp_delack_init(struct tcp_opt *tp)
739 memset(&tp->ack, 0, sizeof(tp->ack));
742 static inline void tcp_clear_options(struct tcp_opt *tp)
744 tp->tstamp_ok = tp->sack_ok = tp->wscale_ok = tp->snd_wscale = 0;
745 #ifdef CONFIG_TCP_RFC2385
746 tp->md5_db_entries = 0;
747 #endif
750 enum tcp_tw_status
752 TCP_TW_SUCCESS = 0,
753 TCP_TW_RST = 1,
754 TCP_TW_ACK = 2,
755 TCP_TW_SYN = 3
759 extern enum tcp_tw_status tcp_timewait_state_process(struct tcp_tw_bucket *tw,
760 struct sk_buff *skb,
761 struct tcphdr *th,
762 unsigned len);
764 extern struct sock * tcp_check_req(struct sock *sk,struct sk_buff *skb,
765 struct open_request *req,
766 struct open_request **prev);
767 extern int tcp_child_process(struct sock *parent,
768 struct sock *child,
769 struct sk_buff *skb);
770 extern void tcp_enter_frto(struct sock *sk);
771 extern void tcp_enter_loss(struct sock *sk, int how);
772 extern void tcp_clear_retrans(struct tcp_opt *tp);
773 extern void tcp_update_metrics(struct sock *sk);
775 extern void tcp_close(struct sock *sk,
776 long timeout);
777 extern struct sock * tcp_accept(struct sock *sk, int flags, int *err);
778 extern unsigned int tcp_poll(struct file * file, struct socket *sock, struct poll_table_struct *wait);
779 extern void tcp_write_space(struct sock *sk);
781 extern int tcp_getsockopt(struct sock *sk, int level,
782 int optname, char *optval,
783 int *optlen);
784 extern int tcp_setsockopt(struct sock *sk, int level,
785 int optname, char *optval,
786 int optlen);
787 extern void tcp_set_keepalive(struct sock *sk, int val);
788 extern int tcp_recvmsg(struct sock *sk,
789 struct msghdr *msg,
790 int len, int nonblock,
791 int flags, int *addr_len);
793 extern int tcp_listen_start(struct sock *sk);
795 extern void tcp_parse_options(struct sk_buff *skb,
796 struct tcp_opt *tp,
797 int estab);
800 * TCP v4 functions exported for the inet6 API
803 extern int tcp_v4_rebuild_header(struct sock *sk);
805 extern int tcp_v4_build_header(struct sock *sk,
806 struct sk_buff *skb);
808 extern void tcp_v4_send_check(struct sock *sk,
809 struct tcphdr *th, int len,
810 struct sk_buff *skb);
812 extern int tcp_v4_conn_request(struct sock *sk,
813 struct sk_buff *skb);
815 extern struct sock * tcp_create_openreq_child(struct sock *sk,
816 struct open_request *req,
817 struct sk_buff *skb);
819 extern struct sock * tcp_v4_syn_recv_sock(struct sock *sk,
820 struct sk_buff *skb,
821 struct open_request *req,
822 struct dst_entry *dst);
824 extern int tcp_v4_do_rcv(struct sock *sk,
825 struct sk_buff *skb);
827 extern int tcp_v4_connect(struct sock *sk,
828 struct sockaddr *uaddr,
829 int addr_len);
831 extern int tcp_connect(struct sock *sk);
833 extern struct sk_buff * tcp_make_synack(struct sock *sk,
834 struct dst_entry *dst,
835 struct open_request *req);
837 extern int tcp_disconnect(struct sock *sk, int flags);
839 extern void tcp_unhash(struct sock *sk);
841 extern int tcp_v4_hash_connecting(struct sock *sk);
844 /* From syncookies.c */
845 extern struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb,
846 struct ip_options *opt);
847 extern __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb,
848 __u16 *mss);
850 /* tcp_output.c */
852 extern int tcp_write_xmit(struct sock *, unsigned int mss_now, int nonagle);
853 extern int tcp_retransmit_skb(struct sock *, struct sk_buff *);
854 extern void tcp_xmit_retransmit_queue(struct sock *);
855 extern void tcp_simple_retransmit(struct sock *);
857 extern void tcp_send_probe0(struct sock *);
858 extern void tcp_send_partial(struct sock *);
859 extern int tcp_write_wakeup(struct sock *);
860 extern void tcp_send_fin(struct sock *sk);
861 extern void tcp_send_active_reset(struct sock *sk, int priority);
862 extern int tcp_send_synack(struct sock *);
863 extern int tcp_transmit_skb(struct sock *, struct sk_buff *);
864 extern void tcp_send_skb(struct sock *, struct sk_buff *, int force_queue, unsigned mss_now);
865 extern void tcp_push_one(struct sock *, unsigned mss_now);
866 extern void tcp_send_ack(struct sock *sk);
867 extern void tcp_send_delayed_ack(struct sock *sk);
869 /* tcp_timer.c */
870 extern void tcp_init_xmit_timers(struct sock *);
871 extern void tcp_clear_xmit_timers(struct sock *);
873 extern void tcp_delete_keepalive_timer (struct sock *);
874 extern void tcp_reset_keepalive_timer (struct sock *, unsigned long);
875 extern int tcp_sync_mss(struct sock *sk, u32 pmtu);
877 extern const char timer_bug_msg[];
879 /* Read 'sendfile()'-style from a TCP socket */
880 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
881 unsigned int, size_t);
882 extern int tcp_read_sock(struct sock *sk, read_descriptor_t *desc,
883 sk_read_actor_t recv_actor);
885 static inline void tcp_clear_xmit_timer(struct sock *sk, int what)
887 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
889 switch (what) {
890 case TCP_TIME_RETRANS:
891 case TCP_TIME_PROBE0:
892 tp->pending = 0;
894 #ifdef TCP_CLEAR_TIMERS
895 if (timer_pending(&tp->retransmit_timer) &&
896 del_timer(&tp->retransmit_timer))
897 __sock_put(sk);
898 #endif
899 break;
900 case TCP_TIME_DACK:
901 tp->ack.blocked = 0;
902 tp->ack.pending = 0;
904 #ifdef TCP_CLEAR_TIMERS
905 if (timer_pending(&tp->delack_timer) &&
906 del_timer(&tp->delack_timer))
907 __sock_put(sk);
908 #endif
909 break;
910 default:
911 printk(timer_bug_msg);
912 return;
918 * Reset the retransmission timer
920 static inline void tcp_reset_xmit_timer(struct sock *sk, int what, unsigned long when)
922 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
924 if (when > TCP_RTO_MAX) {
925 #ifdef TCP_DEBUG
926 printk(KERN_DEBUG "reset_xmit_timer sk=%p %d when=0x%lx, caller=%p\n", sk, what, when, current_text_addr());
927 #endif
928 when = TCP_RTO_MAX;
931 switch (what) {
932 case TCP_TIME_RETRANS:
933 case TCP_TIME_PROBE0:
934 tp->pending = what;
935 tp->timeout = jiffies+when;
936 if (!mod_timer(&tp->retransmit_timer, tp->timeout))
937 sock_hold(sk);
938 break;
940 case TCP_TIME_DACK:
941 tp->ack.pending |= TCP_ACK_TIMER;
942 tp->ack.timeout = jiffies+when;
943 if (!mod_timer(&tp->delack_timer, tp->ack.timeout))
944 sock_hold(sk);
945 break;
947 default:
948 printk(KERN_DEBUG "bug: unknown timer value\n");
952 /* Compute the current effective MSS, taking SACKs and IP options,
953 * and even PMTU discovery events into account.
956 static __inline__ unsigned int tcp_current_mss(struct sock *sk)
958 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
959 struct dst_entry *dst = __sk_dst_get(sk);
960 int mss_now = tp->mss_cache;
962 if (dst && dst->pmtu != tp->pmtu_cookie)
963 mss_now = tcp_sync_mss(sk, dst->pmtu);
965 if (tp->eff_sacks)
966 mss_now -= (TCPOLEN_SACK_BASE_ALIGNED +
967 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK));
969 #ifdef CONFIG_TCP_RFC2385
970 if (tcp_v4_md5_lookup (sk, sk->daddr))
971 mss_now -= TCPOLEN_RFC2385_ALIGNED;
972 #endif
974 return mss_now;
977 /* Initialize RCV_MSS value.
978 * RCV_MSS is an our guess about MSS used by the peer.
979 * We haven't any direct information about the MSS.
980 * It's better to underestimate the RCV_MSS rather than overestimate.
981 * Overestimations make us ACKing less frequently than needed.
982 * Underestimations are more easy to detect and fix by tcp_measure_rcv_mss().
985 static inline void tcp_initialize_rcv_mss(struct sock *sk)
987 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
988 unsigned int hint = min(tp->advmss, tp->mss_cache);
990 hint = min(hint, tp->rcv_wnd/2);
991 hint = min(hint, TCP_MIN_RCVMSS);
992 hint = max(hint, TCP_MIN_MSS);
994 tp->ack.rcv_mss = hint;
997 static __inline__ void __tcp_fast_path_on(struct tcp_opt *tp, u32 snd_wnd)
999 tp->pred_flags = htonl((tp->tcp_header_len << 26) |
1000 ntohl(TCP_FLAG_ACK) |
1001 snd_wnd);
1004 static __inline__ void tcp_fast_path_on(struct tcp_opt *tp)
1006 __tcp_fast_path_on(tp, tp->snd_wnd>>tp->snd_wscale);
1009 static inline void tcp_fast_path_check(struct sock *sk, struct tcp_opt *tp)
1011 if (skb_queue_len(&tp->out_of_order_queue) == 0 &&
1012 tp->rcv_wnd &&
1013 atomic_read(&sk->rmem_alloc) < sk->rcvbuf &&
1014 !tp->urg_data)
1015 tcp_fast_path_on(tp);
1018 /* Compute the actual receive window we are currently advertising.
1019 * Rcv_nxt can be after the window if our peer push more data
1020 * than the offered window.
1022 static __inline__ u32 tcp_receive_window(struct tcp_opt *tp)
1024 s32 win = tp->rcv_wup + tp->rcv_wnd - tp->rcv_nxt;
1026 if (win < 0)
1027 win = 0;
1028 return (u32) win;
1031 /* Choose a new window, without checks for shrinking, and without
1032 * scaling applied to the result. The caller does these things
1033 * if necessary. This is a "raw" window selection.
1035 extern u32 __tcp_select_window(struct sock *sk);
1037 /* TCP timestamps are only 32-bits, this causes a slight
1038 * complication on 64-bit systems since we store a snapshot
1039 * of jiffies in the buffer control blocks below. We decidely
1040 * only use of the low 32-bits of jiffies and hide the ugly
1041 * casts with the following macro.
1043 #define tcp_time_stamp ((__u32)(jiffies))
1045 /* This is what the send packet queueing engine uses to pass
1046 * TCP per-packet control information to the transmission
1047 * code. We also store the host-order sequence numbers in
1048 * here too. This is 36 bytes on 32-bit architectures,
1049 * 40 bytes on 64-bit machines, if this grows please adjust
1050 * skbuff.h:skbuff->cb[xxx] size appropriately.
1052 struct tcp_skb_cb {
1053 union {
1054 struct inet_skb_parm h4;
1055 #if defined(CONFIG_IPV6) || defined (CONFIG_IPV6_MODULE)
1056 struct inet6_skb_parm h6;
1057 #endif
1058 } header; /* For incoming frames */
1059 __u32 seq; /* Starting sequence number */
1060 __u32 end_seq; /* SEQ + FIN + SYN + datalen */
1061 __u32 when; /* used to compute rtt's */
1062 __u8 flags; /* TCP header flags. */
1064 /* NOTE: These must match up to the flags byte in a
1065 * real TCP header.
1067 #define TCPCB_FLAG_FIN 0x01
1068 #define TCPCB_FLAG_SYN 0x02
1069 #define TCPCB_FLAG_RST 0x04
1070 #define TCPCB_FLAG_PSH 0x08
1071 #define TCPCB_FLAG_ACK 0x10
1072 #define TCPCB_FLAG_URG 0x20
1073 #define TCPCB_FLAG_ECE 0x40
1074 #define TCPCB_FLAG_CWR 0x80
1076 __u8 sacked; /* State flags for SACK/FACK. */
1077 #define TCPCB_SACKED_ACKED 0x01 /* SKB ACK'd by a SACK block */
1078 #define TCPCB_SACKED_RETRANS 0x02 /* SKB retransmitted */
1079 #define TCPCB_LOST 0x04 /* SKB is lost */
1080 #define TCPCB_TAGBITS 0x07 /* All tag bits */
1082 #define TCPCB_EVER_RETRANS 0x80 /* Ever retransmitted frame */
1083 #define TCPCB_RETRANS (TCPCB_SACKED_RETRANS|TCPCB_EVER_RETRANS)
1085 #define TCPCB_URG 0x20 /* Urgent pointer advenced here */
1087 #define TCPCB_AT_TAIL (TCPCB_URG)
1089 __u16 urg_ptr; /* Valid w/URG flags is set. */
1090 __u32 ack_seq; /* Sequence number ACK'd */
1093 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
1095 #define for_retrans_queue(skb, sk, tp) \
1096 for (skb = (sk)->write_queue.next; \
1097 (skb != (tp)->send_head) && \
1098 (skb != (struct sk_buff *)&(sk)->write_queue); \
1099 skb=skb->next)
1102 #include <net/tcp_ecn.h>
1106 * Compute minimal free write space needed to queue new packets.
1108 static inline int tcp_min_write_space(struct sock *sk)
1110 return sk->wmem_queued/2;
1113 static inline int tcp_wspace(struct sock *sk)
1115 return sk->sndbuf - sk->wmem_queued;
1119 /* This determines how many packets are "in the network" to the best
1120 * of our knowledge. In many cases it is conservative, but where
1121 * detailed information is available from the receiver (via SACK
1122 * blocks etc.) we can make more aggressive calculations.
1124 * Use this for decisions involving congestion control, use just
1125 * tp->packets_out to determine if the send queue is empty or not.
1127 * Read this equation as:
1129 * "Packets sent once on transmission queue" MINUS
1130 * "Packets left network, but not honestly ACKed yet" PLUS
1131 * "Packets fast retransmitted"
1133 static __inline__ unsigned int tcp_packets_in_flight(struct tcp_opt *tp)
1135 return tp->packets_out - tp->left_out + tp->retrans_out;
1139 * Which congestion algorithim is in use on the connection.
1141 #define tcp_is_vegas(__tp) ((__tp)->adv_cong == TCP_VEGAS)
1142 #define tcp_is_westwood(__tp) ((__tp)->adv_cong == TCP_WESTWOOD)
1143 #define tcp_is_bic(__tp) ((__tp)->adv_cong == TCP_BIC)
1145 /* Recalculate snd_ssthresh, we want to set it to:
1147 * Reno:
1148 * one half the current congestion window, but no
1149 * less than two segments
1151 * BIC:
1152 * behave like Reno until low_window is reached,
1153 * then increase congestion window slowly
1155 static inline __u32 tcp_recalc_ssthresh(struct tcp_opt *tp)
1157 if (tcp_is_bic(tp)) {
1158 if (sysctl_tcp_bic_fast_convergence &&
1159 tp->snd_cwnd < tp->bictcp.last_max_cwnd)
1160 tp->bictcp.last_max_cwnd = (tp->snd_cwnd *
1161 (BICTCP_BETA_SCALE
1162 + sysctl_tcp_bic_beta))
1163 / (2 * BICTCP_BETA_SCALE);
1164 else
1165 tp->bictcp.last_max_cwnd = tp->snd_cwnd;
1167 if (tp->snd_cwnd > sysctl_tcp_bic_low_window)
1168 return max((tp->snd_cwnd * sysctl_tcp_bic_beta)
1169 / BICTCP_BETA_SCALE, 2U);
1172 return max(tp->snd_cwnd >> 1U, 2U);
1175 /* Stop taking Vegas samples for now. */
1176 #define tcp_vegas_disable(__tp) ((__tp)->vegas.doing_vegas_now = 0)
1178 static inline void tcp_vegas_enable(struct tcp_opt *tp)
1180 /* There are several situations when we must "re-start" Vegas:
1182 * o when a connection is established
1183 * o after an RTO
1184 * o after fast recovery
1185 * o when we send a packet and there is no outstanding
1186 * unacknowledged data (restarting an idle connection)
1188 * In these circumstances we cannot do a Vegas calculation at the
1189 * end of the first RTT, because any calculation we do is using
1190 * stale info -- both the saved cwnd and congestion feedback are
1191 * stale.
1193 * Instead we must wait until the completion of an RTT during
1194 * which we actually receive ACKs.
1197 /* Begin taking Vegas samples next time we send something. */
1198 tp->vegas.doing_vegas_now = 1;
1200 /* Set the beginning of the next send window. */
1201 tp->vegas.beg_snd_nxt = tp->snd_nxt;
1203 tp->vegas.cntRTT = 0;
1204 tp->vegas.minRTT = 0x7fffffff;
1207 /* Should we be taking Vegas samples right now? */
1208 #define tcp_vegas_enabled(__tp) ((__tp)->vegas.doing_vegas_now)
1210 extern void tcp_ca_init(struct tcp_opt *tp);
1212 static inline void tcp_set_ca_state(struct tcp_opt *tp, u8 ca_state)
1214 if (tcp_is_vegas(tp)) {
1215 if (ca_state == TCP_CA_Open)
1216 tcp_vegas_enable(tp);
1217 else
1218 tcp_vegas_disable(tp);
1220 tp->ca_state = ca_state;
1223 /* If cwnd > ssthresh, we may raise ssthresh to be half-way to cwnd.
1224 * The exception is rate halving phase, when cwnd is decreasing towards
1225 * ssthresh.
1227 static inline __u32 tcp_current_ssthresh(struct tcp_opt *tp)
1229 if ((1<<tp->ca_state)&(TCPF_CA_CWR|TCPF_CA_Recovery))
1230 return tp->snd_ssthresh;
1231 else
1232 return max(tp->snd_ssthresh,
1233 ((tp->snd_cwnd >> 1) +
1234 (tp->snd_cwnd >> 2)));
1237 static inline void tcp_sync_left_out(struct tcp_opt *tp)
1239 if (tp->sack_ok && tp->sacked_out >= tp->packets_out - tp->lost_out)
1240 tp->sacked_out = tp->packets_out - tp->lost_out;
1241 tp->left_out = tp->sacked_out + tp->lost_out;
1244 extern void tcp_cwnd_application_limited(struct sock *sk);
1246 /* Congestion window validation. (RFC2861) */
1248 static inline void tcp_cwnd_validate(struct sock *sk, struct tcp_opt *tp)
1250 if (tp->packets_out >= tp->snd_cwnd) {
1251 /* Network is feed fully. */
1252 tp->snd_cwnd_used = 0;
1253 tp->snd_cwnd_stamp = tcp_time_stamp;
1254 } else {
1255 /* Network starves. */
1256 if (tp->packets_out > tp->snd_cwnd_used)
1257 tp->snd_cwnd_used = tp->packets_out;
1259 if ((s32)(tcp_time_stamp - tp->snd_cwnd_stamp) >= tp->rto)
1260 tcp_cwnd_application_limited(sk);
1264 /* Set slow start threshould and cwnd not falling to slow start */
1265 static inline void __tcp_enter_cwr(struct tcp_opt *tp)
1267 tp->undo_marker = 0;
1268 tp->snd_ssthresh = tcp_recalc_ssthresh(tp);
1269 tp->snd_cwnd = min(tp->snd_cwnd,
1270 tcp_packets_in_flight(tp) + 1U);
1271 tp->snd_cwnd_cnt = 0;
1272 tp->high_seq = tp->snd_nxt;
1273 tp->snd_cwnd_stamp = tcp_time_stamp;
1274 TCP_ECN_queue_cwr(tp);
1277 static inline void tcp_enter_cwr(struct tcp_opt *tp)
1279 tp->prior_ssthresh = 0;
1280 if (tp->ca_state < TCP_CA_CWR) {
1281 __tcp_enter_cwr(tp);
1282 tcp_set_ca_state(tp, TCP_CA_CWR);
1286 extern __u32 tcp_init_cwnd(struct tcp_opt *tp);
1288 /* Slow start with delack produces 3 packets of burst, so that
1289 * it is safe "de facto".
1291 static __inline__ __u32 tcp_max_burst(struct tcp_opt *tp)
1293 return 3;
1296 static __inline__ int tcp_minshall_check(struct tcp_opt *tp)
1298 return after(tp->snd_sml,tp->snd_una) &&
1299 !after(tp->snd_sml, tp->snd_nxt);
1302 static __inline__ void tcp_minshall_update(struct tcp_opt *tp, int mss, struct sk_buff *skb)
1304 if (skb->len < mss)
1305 tp->snd_sml = TCP_SKB_CB(skb)->end_seq;
1308 /* Return 0, if packet can be sent now without violation Nagle's rules:
1309 1. It is full sized.
1310 2. Or it contains FIN.
1311 3. Or TCP_NODELAY was set.
1312 4. Or TCP_CORK is not set, and all sent packets are ACKed.
1313 With Minshall's modification: all sent small packets are ACKed.
1316 static __inline__ int
1317 tcp_nagle_check(struct tcp_opt *tp, struct sk_buff *skb, unsigned mss_now, int nonagle)
1319 return (skb->len < mss_now &&
1320 !(TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN) &&
1321 (nonagle == 2 ||
1322 (!nonagle &&
1323 tp->packets_out &&
1324 tcp_minshall_check(tp))));
1327 /* This checks if the data bearing packet SKB (usually tp->send_head)
1328 * should be put on the wire right now.
1330 static __inline__ int tcp_snd_test(struct tcp_opt *tp, struct sk_buff *skb,
1331 unsigned cur_mss, int nonagle)
1333 /* RFC 1122 - section 4.2.3.4
1335 * We must queue if
1337 * a) The right edge of this frame exceeds the window
1338 * b) There are packets in flight and we have a small segment
1339 * [SWS avoidance and Nagle algorithm]
1340 * (part of SWS is done on packetization)
1341 * Minshall version sounds: there are no _small_
1342 * segments in flight. (tcp_nagle_check)
1343 * c) We have too many packets 'in flight'
1345 * Don't use the nagle rule for urgent data (or
1346 * for the final FIN -DaveM).
1348 * Also, Nagle rule does not apply to frames, which
1349 * sit in the middle of queue (they have no chances
1350 * to get new data) and if room at tail of skb is
1351 * not enough to save something seriously (<32 for now).
1354 /* Don't be strict about the congestion window for the
1355 * final FIN frame. -DaveM
1357 return ((nonagle==1 || tp->urg_mode
1358 || !tcp_nagle_check(tp, skb, cur_mss, nonagle)) &&
1359 ((tcp_packets_in_flight(tp) < tp->snd_cwnd) ||
1360 (TCP_SKB_CB(skb)->flags & TCPCB_FLAG_FIN)) &&
1361 !after(TCP_SKB_CB(skb)->end_seq, tp->snd_una + tp->snd_wnd));
1364 static __inline__ void tcp_check_probe_timer(struct sock *sk, struct tcp_opt *tp)
1366 if (!tp->packets_out && !tp->pending)
1367 tcp_reset_xmit_timer(sk, TCP_TIME_PROBE0, tp->rto);
1370 static __inline__ int tcp_skb_is_last(struct sock *sk, struct sk_buff *skb)
1372 return (skb->next == (struct sk_buff*)&sk->write_queue);
1375 /* Push out any pending frames which were held back due to
1376 * TCP_CORK or attempt at coalescing tiny packets.
1377 * The socket must be locked by the caller.
1379 static __inline__ void __tcp_push_pending_frames(struct sock *sk,
1380 struct tcp_opt *tp,
1381 unsigned int cur_mss,
1382 int nonagle)
1384 struct sk_buff *skb = tp->send_head;
1386 if (skb) {
1387 if (tcp_write_xmit(sk, cur_mss, nonagle))
1388 tcp_check_probe_timer(sk, tp);
1390 tcp_cwnd_validate(sk, tp);
1393 static __inline__ void tcp_push_pending_frames(struct sock *sk,
1394 struct tcp_opt *tp)
1396 __tcp_push_pending_frames(sk, tp, tcp_current_mss(sk), tp->nonagle);
1399 static __inline__ int tcp_may_send_now(struct sock *sk, struct tcp_opt *tp)
1401 struct sk_buff *skb = tp->send_head;
1403 return (skb &&
1404 tcp_snd_test(tp, skb, tcp_current_mss(sk),
1405 tcp_skb_is_last(sk, skb) ? 1 : tp->nonagle));
1408 static __inline__ void tcp_init_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1410 tp->snd_wl1 = seq;
1413 static __inline__ void tcp_update_wl(struct tcp_opt *tp, u32 ack, u32 seq)
1415 tp->snd_wl1 = seq;
1418 extern void tcp_destroy_sock(struct sock *sk);
1422 * Calculate(/check) TCP checksum
1424 static __inline__ u16 tcp_v4_check(struct tcphdr *th, int len,
1425 unsigned long saddr, unsigned long daddr,
1426 unsigned long base)
1428 return csum_tcpudp_magic(saddr,daddr,len,IPPROTO_TCP,base);
1431 static __inline__ int __tcp_checksum_complete(struct sk_buff *skb)
1433 return (unsigned short)csum_fold(skb_checksum(skb, 0, skb->len, skb->csum));
1436 static __inline__ int tcp_checksum_complete(struct sk_buff *skb)
1438 return skb->ip_summed != CHECKSUM_UNNECESSARY &&
1439 __tcp_checksum_complete(skb);
1442 /* Prequeue for VJ style copy to user, combined with checksumming. */
1444 static __inline__ void tcp_prequeue_init(struct tcp_opt *tp)
1446 tp->ucopy.task = NULL;
1447 tp->ucopy.len = 0;
1448 tp->ucopy.memory = 0;
1449 skb_queue_head_init(&tp->ucopy.prequeue);
1452 /* Packet is added to VJ-style prequeue for processing in process
1453 * context, if a reader task is waiting. Apparently, this exciting
1454 * idea (VJ's mail "Re: query about TCP header on tcp-ip" of 07 Sep 93)
1455 * failed somewhere. Latency? Burstiness? Well, at least now we will
1456 * see, why it failed. 8)8) --ANK
1458 * NOTE: is this not too big to inline?
1460 static __inline__ int tcp_prequeue(struct sock *sk, struct sk_buff *skb)
1462 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1464 if (!sysctl_tcp_low_latency && tp->ucopy.task) {
1465 __skb_queue_tail(&tp->ucopy.prequeue, skb);
1466 tp->ucopy.memory += skb->truesize;
1467 if (tp->ucopy.memory > sk->rcvbuf) {
1468 struct sk_buff *skb1;
1470 if (sk->lock.users)
1471 out_of_line_bug();
1473 while ((skb1 = __skb_dequeue(&tp->ucopy.prequeue)) != NULL) {
1474 sk->backlog_rcv(sk, skb1);
1475 NET_INC_STATS_BH(TCPPrequeueDropped);
1478 tp->ucopy.memory = 0;
1479 } else if (skb_queue_len(&tp->ucopy.prequeue) == 1) {
1480 wake_up_interruptible(sk->sleep);
1481 if (!tcp_ack_scheduled(tp))
1482 tcp_reset_xmit_timer(sk, TCP_TIME_DACK, (3*TCP_RTO_MIN)/4);
1484 return 1;
1486 return 0;
1490 #undef STATE_TRACE
1492 #ifdef STATE_TRACE
1493 static char *statename[]={
1494 "Unused","Established","Syn Sent","Syn Recv",
1495 "Fin Wait 1","Fin Wait 2","Time Wait", "Close",
1496 "Close Wait","Last ACK","Listen","Closing"
1498 #endif
1500 static __inline__ void tcp_set_state(struct sock *sk, int state)
1502 int oldstate = sk->state;
1504 switch (state) {
1505 case TCP_ESTABLISHED:
1506 if (oldstate != TCP_ESTABLISHED)
1507 TCP_INC_STATS(TcpCurrEstab);
1508 break;
1510 case TCP_CLOSE:
1511 if (oldstate == TCP_CLOSE_WAIT || oldstate == TCP_ESTABLISHED)
1512 TCP_INC_STATS(TcpEstabResets);
1514 sk->prot->unhash(sk);
1515 if (sk->prev && !(sk->userlocks&SOCK_BINDPORT_LOCK))
1516 tcp_put_port(sk);
1517 /* fall through */
1518 default:
1519 if (oldstate==TCP_ESTABLISHED)
1520 tcp_statistics[smp_processor_id()*2+!in_softirq()].TcpCurrEstab--;
1523 /* Change state AFTER socket is unhashed to avoid closed
1524 * socket sitting in hash tables.
1526 sk->state = state;
1528 #ifdef STATE_TRACE
1529 SOCK_DEBUG(sk, "TCP sk=%p, State %s -> %s\n",sk, statename[oldstate],statename[state]);
1530 #endif
1533 static __inline__ void tcp_done(struct sock *sk)
1535 tcp_set_state(sk, TCP_CLOSE);
1536 tcp_clear_xmit_timers(sk);
1538 sk->shutdown = SHUTDOWN_MASK;
1540 if (!sk->dead)
1541 sk->state_change(sk);
1542 else
1543 tcp_destroy_sock(sk);
1546 static __inline__ void tcp_sack_reset(struct tcp_opt *tp)
1548 tp->dsack = 0;
1549 tp->eff_sacks = 0;
1550 tp->num_sacks = 0;
1553 #ifdef CONFIG_TCP_RFC2385
1554 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp, int md5, __u8 **md5_hash)
1555 #else
1556 static __inline__ void tcp_build_and_update_options(__u32 *ptr, struct tcp_opt *tp, __u32 tstamp)
1557 #endif
1559 if (tp->tstamp_ok) {
1560 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1561 (TCPOPT_NOP << 16) |
1562 (TCPOPT_TIMESTAMP << 8) |
1563 TCPOLEN_TIMESTAMP);
1564 *ptr++ = htonl(tstamp);
1565 *ptr++ = htonl(tp->ts_recent);
1567 if (tp->eff_sacks) {
1568 struct tcp_sack_block *sp = tp->dsack ? tp->duplicate_sack : tp->selective_acks;
1569 int this_sack;
1571 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) |
1572 (TCPOPT_NOP << 16) |
1573 (TCPOPT_SACK << 8) |
1574 (TCPOLEN_SACK_BASE +
1575 (tp->eff_sacks * TCPOLEN_SACK_PERBLOCK)));
1576 for(this_sack = 0; this_sack < tp->eff_sacks; this_sack++) {
1577 *ptr++ = htonl(sp[this_sack].start_seq);
1578 *ptr++ = htonl(sp[this_sack].end_seq);
1580 if (tp->dsack) {
1581 tp->dsack = 0;
1582 tp->eff_sacks--;
1586 #ifdef CONFIG_TCP_RFC2385
1587 if (md5) {
1588 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1589 (TCPOPT_RFC2385 << 8) | 18);
1590 *md5_hash = (__u8 *)ptr;
1592 #endif
1595 /* Construct a tcp options header for a SYN or SYN_ACK packet.
1596 * If this is every changed make sure to change the definition of
1597 * MAX_SYN_SIZE to match the new maximum number of options that you
1598 * can generate.
1601 * Note - that with the CONFIG_TCP_RFC2385 option, we make room for the
1602 * 16 byte MD5 hash. This will be filled in later, so the pointer for the
1603 * location to be filled is passed back up
1605 #ifdef CONFIG_TCP_RFC2385
1606 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1607 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent,
1608 __u8 md5_enabled, __u8 **md5_hash)
1609 #else
1610 static inline void tcp_syn_build_options(__u32 *ptr, int mss, int ts, int sack,
1611 int offer_wscale, int wscale, __u32 tstamp, __u32 ts_recent)
1612 #endif
1614 /* We always get an MSS option.
1615 * The option bytes which will be seen in normal data
1616 * packets should timestamps be used, must be in the MSS
1617 * advertised. But we subtract them from tp->mss_cache so
1618 * that calculations in tcp_sendmsg are simpler etc.
1619 * So account for this fact here if necessary. If we
1620 * don't do this correctly, as a receiver we won't
1621 * recognize data packets as being full sized when we
1622 * should, and thus we won't abide by the delayed ACK
1623 * rules correctly.
1624 * SACKs don't matter, we never delay an ACK when we
1625 * have any of those going out.
1627 *ptr++ = htonl((TCPOPT_MSS << 24) | (TCPOLEN_MSS << 16) | mss);
1628 if (ts) {
1629 if(sack)
1630 *ptr++ = __constant_htonl((TCPOPT_SACK_PERM << 24) | (TCPOLEN_SACK_PERM << 16) |
1631 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1632 else
1633 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1634 (TCPOPT_TIMESTAMP << 8) | TCPOLEN_TIMESTAMP);
1635 *ptr++ = htonl(tstamp); /* TSVAL */
1636 *ptr++ = htonl(ts_recent); /* TSECR */
1637 } else if(sack)
1638 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1639 (TCPOPT_SACK_PERM << 8) | TCPOLEN_SACK_PERM);
1640 if (offer_wscale)
1641 *ptr++ = htonl((TCPOPT_NOP << 24) | (TCPOPT_WINDOW << 16) | (TCPOLEN_WINDOW << 8) | (wscale));
1643 #ifdef CONFIG_TCP_RFC2385
1644 /* If MD5 is enabled, then we set the option, and include the size
1645 * (always 18). The actual MD5 hash is added just before the
1646 * packet is sent */
1647 if (md5_enabled) {
1648 *ptr++ = __constant_htonl((TCPOPT_NOP << 24) | (TCPOPT_NOP << 16) |
1649 (TCPOPT_RFC2385 << 8) | 18);
1650 *md5_hash = (__u8 *)ptr;
1652 #endif
1655 /* Determine a window scaling and initial window to offer.
1656 * Based on the assumption that the given amount of space
1657 * will be offered. Store the results in the tp structure.
1658 * NOTE: for smooth operation initial space offering should
1659 * be a multiple of mss if possible. We assume here that mss >= 1.
1660 * This MUST be enforced by all callers.
1662 static inline void tcp_select_initial_window(int __space, __u32 mss,
1663 __u32 *rcv_wnd,
1664 __u32 *window_clamp,
1665 int wscale_ok,
1666 __u8 *rcv_wscale)
1668 unsigned int space = (__space < 0 ? 0 : __space);
1670 /* If no clamp set the clamp to the max possible scaled window */
1671 if (*window_clamp == 0)
1672 (*window_clamp) = (65535 << 14);
1673 space = min(*window_clamp, space);
1675 /* Quantize space offering to a multiple of mss if possible. */
1676 if (space > mss)
1677 space = (space / mss) * mss;
1679 /* NOTE: offering an initial window larger than 32767
1680 * will break some buggy TCP stacks. We try to be nice.
1681 * If we are not window scaling, then this truncates
1682 * our initial window offering to 32k. There should also
1683 * be a sysctl option to stop being nice.
1685 (*rcv_wnd) = min(space, MAX_TCP_WINDOW);
1686 (*rcv_wscale) = 0;
1687 if (wscale_ok) {
1688 /* See RFC1323 for an explanation of the limit to 14 */
1689 while (space > 65535 && (*rcv_wscale) < 14) {
1690 space >>= 1;
1691 (*rcv_wscale)++;
1693 if (*rcv_wscale && sysctl_tcp_app_win && space>=mss &&
1694 space - max((space>>sysctl_tcp_app_win), mss>>*rcv_wscale) < 65536/2)
1695 (*rcv_wscale)--;
1697 *rcv_wscale = max((__u8)sysctl_tcp_default_win_scale,
1698 *rcv_wscale);
1701 /* Set initial window to value enough for senders,
1702 * following RFC1414. Senders, not following this RFC,
1703 * will be satisfied with 2.
1705 if (mss > (1<<*rcv_wscale)) {
1706 int init_cwnd = 4;
1707 if (mss > 1460*3)
1708 init_cwnd = 2;
1709 else if (mss > 1460)
1710 init_cwnd = 3;
1711 if (*rcv_wnd > init_cwnd*mss)
1712 *rcv_wnd = init_cwnd*mss;
1714 /* Set the clamp no higher than max representable value */
1715 (*window_clamp) = min(65535U << (*rcv_wscale), *window_clamp);
1718 static inline int tcp_win_from_space(int space)
1720 return sysctl_tcp_adv_win_scale<=0 ?
1721 (space>>(-sysctl_tcp_adv_win_scale)) :
1722 space - (space>>sysctl_tcp_adv_win_scale);
1725 /* Note: caller must be prepared to deal with negative returns */
1726 static inline int tcp_space(struct sock *sk)
1728 return tcp_win_from_space(sk->rcvbuf - atomic_read(&sk->rmem_alloc));
1731 static inline int tcp_full_space( struct sock *sk)
1733 return tcp_win_from_space(sk->rcvbuf);
1736 static inline void tcp_acceptq_removed(struct sock *sk)
1738 sk->ack_backlog--;
1741 static inline void tcp_acceptq_added(struct sock *sk)
1743 sk->ack_backlog++;
1746 static inline int tcp_acceptq_is_full(struct sock *sk)
1748 return sk->ack_backlog >= sk->max_ack_backlog;
1751 static inline void tcp_acceptq_queue(struct sock *sk, struct open_request *req,
1752 struct sock *child)
1754 struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
1756 req->sk = child;
1757 tcp_acceptq_added(sk);
1759 if (!tp->accept_queue_tail) {
1760 tp->accept_queue = req;
1761 } else {
1762 tp->accept_queue_tail->dl_next = req;
1764 tp->accept_queue_tail = req;
1765 req->dl_next = NULL;
1768 struct tcp_listen_opt
1770 u8 max_qlen_log; /* log_2 of maximal queued SYNs */
1771 int qlen;
1772 int qlen_young;
1773 int clock_hand;
1774 u32 hash_rnd;
1775 struct open_request *syn_table[TCP_SYNQ_HSIZE];
1778 static inline void
1779 tcp_synq_removed(struct sock *sk, struct open_request *req)
1781 struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
1783 if (--lopt->qlen == 0)
1784 tcp_delete_keepalive_timer(sk);
1785 if (req->retrans == 0)
1786 lopt->qlen_young--;
1789 static inline void tcp_synq_added(struct sock *sk)
1791 struct tcp_listen_opt *lopt = sk->tp_pinfo.af_tcp.listen_opt;
1793 if (lopt->qlen++ == 0)
1794 tcp_reset_keepalive_timer(sk, TCP_TIMEOUT_INIT);
1795 lopt->qlen_young++;
1798 static inline int tcp_synq_len(struct sock *sk)
1800 return sk->tp_pinfo.af_tcp.listen_opt->qlen;
1803 static inline int tcp_synq_young(struct sock *sk)
1805 return sk->tp_pinfo.af_tcp.listen_opt->qlen_young;
1808 static inline int tcp_synq_is_full(struct sock *sk)
1810 return tcp_synq_len(sk)>>sk->tp_pinfo.af_tcp.listen_opt->max_qlen_log;
1813 static inline void tcp_synq_unlink(struct tcp_opt *tp, struct open_request *req,
1814 struct open_request **prev)
1816 write_lock(&tp->syn_wait_lock);
1817 *prev = req->dl_next;
1818 write_unlock(&tp->syn_wait_lock);
1821 static inline void tcp_synq_drop(struct sock *sk, struct open_request *req,
1822 struct open_request **prev)
1824 tcp_synq_unlink(&sk->tp_pinfo.af_tcp, req, prev);
1825 tcp_synq_removed(sk, req);
1826 tcp_openreq_free(req);
1829 static __inline__ void tcp_openreq_init(struct open_request *req,
1830 struct tcp_opt *tp,
1831 struct sk_buff *skb)
1833 req->rcv_wnd = 0; /* So that tcp_send_synack() knows! */
1834 req->rcv_isn = TCP_SKB_CB(skb)->seq;
1835 req->mss = tp->mss_clamp;
1836 req->ts_recent = tp->saw_tstamp ? tp->rcv_tsval : 0;
1837 req->tstamp_ok = tp->tstamp_ok;
1838 req->sack_ok = tp->sack_ok;
1839 req->snd_wscale = tp->snd_wscale;
1840 req->wscale_ok = tp->wscale_ok;
1841 req->acked = 0;
1842 req->ecn_ok = 0;
1843 req->rmt_port = skb->h.th->source;
1846 #define TCP_MEM_QUANTUM ((int)PAGE_SIZE)
1848 static inline void tcp_free_skb(struct sock *sk, struct sk_buff *skb)
1850 sk->tp_pinfo.af_tcp.queue_shrunk = 1;
1851 sk->wmem_queued -= skb->truesize;
1852 sk->forward_alloc += skb->truesize;
1853 __kfree_skb(skb);
1856 static inline void tcp_charge_skb(struct sock *sk, struct sk_buff *skb)
1858 sk->wmem_queued += skb->truesize;
1859 sk->forward_alloc -= skb->truesize;
1862 extern void __tcp_mem_reclaim(struct sock *sk);
1863 extern int tcp_mem_schedule(struct sock *sk, int size, int kind);
1865 static inline void tcp_mem_reclaim(struct sock *sk)
1867 if (sk->forward_alloc >= TCP_MEM_QUANTUM)
1868 __tcp_mem_reclaim(sk);
1871 static inline void tcp_enter_memory_pressure(void)
1873 if (!tcp_memory_pressure) {
1874 NET_INC_STATS(TCPMemoryPressures);
1875 tcp_memory_pressure = 1;
1879 static inline void tcp_moderate_sndbuf(struct sock *sk)
1881 if (!(sk->userlocks&SOCK_SNDBUF_LOCK)) {
1882 sk->sndbuf = min(sk->sndbuf, sk->wmem_queued/2);
1883 sk->sndbuf = max(sk->sndbuf, SOCK_MIN_SNDBUF);
1887 static inline struct sk_buff *tcp_alloc_pskb(struct sock *sk, int size, int mem, int gfp)
1889 struct sk_buff *skb = alloc_skb(size+MAX_TCP_HEADER, gfp);
1891 if (skb) {
1892 skb->truesize += mem;
1893 if (sk->forward_alloc >= (int)skb->truesize ||
1894 tcp_mem_schedule(sk, skb->truesize, 0)) {
1895 skb_reserve(skb, MAX_TCP_HEADER);
1896 return skb;
1898 __kfree_skb(skb);
1899 } else {
1900 tcp_enter_memory_pressure();
1901 tcp_moderate_sndbuf(sk);
1903 return NULL;
1906 static inline struct sk_buff *tcp_alloc_skb(struct sock *sk, int size, int gfp)
1908 return tcp_alloc_pskb(sk, size, 0, gfp);
1911 static inline struct page * tcp_alloc_page(struct sock *sk)
1913 if (sk->forward_alloc >= (int)PAGE_SIZE ||
1914 tcp_mem_schedule(sk, PAGE_SIZE, 0)) {
1915 struct page *page = alloc_pages(sk->allocation, 0);
1916 if (page)
1917 return page;
1919 tcp_enter_memory_pressure();
1920 tcp_moderate_sndbuf(sk);
1921 return NULL;
1924 static inline void tcp_writequeue_purge(struct sock *sk)
1926 struct sk_buff *skb;
1928 while ((skb = __skb_dequeue(&sk->write_queue)) != NULL)
1929 tcp_free_skb(sk, skb);
1930 tcp_mem_reclaim(sk);
1933 extern void tcp_rfree(struct sk_buff *skb);
1935 static inline void tcp_set_owner_r(struct sk_buff *skb, struct sock *sk)
1937 skb->sk = sk;
1938 skb->destructor = tcp_rfree;
1939 atomic_add(skb->truesize, &sk->rmem_alloc);
1940 sk->forward_alloc -= skb->truesize;
1943 extern void tcp_listen_wlock(void);
1945 /* - We may sleep inside this lock.
1946 * - If sleeping is not required (or called from BH),
1947 * use plain read_(un)lock(&tcp_lhash_lock).
1950 static inline void tcp_listen_lock(void)
1952 /* read_lock synchronizes to candidates to writers */
1953 read_lock(&tcp_lhash_lock);
1954 atomic_inc(&tcp_lhash_users);
1955 read_unlock(&tcp_lhash_lock);
1958 static inline void tcp_listen_unlock(void)
1960 if (atomic_dec_and_test(&tcp_lhash_users))
1961 wake_up(&tcp_lhash_wait);
1964 static inline int keepalive_intvl_when(struct tcp_opt *tp)
1966 return tp->keepalive_intvl ? : sysctl_tcp_keepalive_intvl;
1969 static inline int keepalive_time_when(struct tcp_opt *tp)
1971 return tp->keepalive_time ? : sysctl_tcp_keepalive_time;
1974 static inline int tcp_fin_time(struct tcp_opt *tp)
1976 int fin_timeout = tp->linger2 ? : sysctl_tcp_fin_timeout;
1978 if (fin_timeout < (tp->rto<<2) - (tp->rto>>1))
1979 fin_timeout = (tp->rto<<2) - (tp->rto>>1);
1981 return fin_timeout;
1984 static inline int tcp_paws_check(struct tcp_opt *tp, int rst)
1986 if ((s32)(tp->rcv_tsval - tp->ts_recent) >= 0)
1987 return 0;
1988 if (xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_24DAYS)
1989 return 0;
1991 /* RST segments are not recommended to carry timestamp,
1992 and, if they do, it is recommended to ignore PAWS because
1993 "their cleanup function should take precedence over timestamps."
1994 Certainly, it is mistake. It is necessary to understand the reasons
1995 of this constraint to relax it: if peer reboots, clock may go
1996 out-of-sync and half-open connections will not be reset.
1997 Actually, the problem would be not existing if all
1998 the implementations followed draft about maintaining clock
1999 via reboots. Linux-2.2 DOES NOT!
2001 However, we can relax time bounds for RST segments to MSL.
2003 if (rst && xtime.tv_sec >= tp->ts_recent_stamp + TCP_PAWS_MSL)
2004 return 0;
2005 return 1;
2008 #define TCP_CHECK_TIMER(sk) do { } while (0)
2010 static inline int tcp_use_frto(const struct sock *sk)
2012 const struct tcp_opt *tp = &sk->tp_pinfo.af_tcp;
2014 /* F-RTO must be activated in sysctl and there must be some
2015 * unsent new data, and the advertised window should allow
2016 * sending it.
2018 return (sysctl_tcp_frto && tp->send_head &&
2019 !after(TCP_SKB_CB(tp->send_head)->end_seq,
2020 tp->snd_una + tp->snd_wnd));
2023 static inline void tcp_mib_init(void)
2025 /* See RFC 2012 */
2026 TCP_ADD_STATS_USER(TcpRtoAlgorithm, 1);
2027 TCP_ADD_STATS_USER(TcpRtoMin, TCP_RTO_MIN*1000/HZ);
2028 TCP_ADD_STATS_USER(TcpRtoMax, TCP_RTO_MAX*1000/HZ);
2029 TCP_ADD_STATS_USER(TcpMaxConn, -1);
2033 /* TCP Westwood functions and constants */
2035 #define TCP_WESTWOOD_INIT_RTT 20*HZ /* maybe too conservative?! */
2036 #define TCP_WESTWOOD_RTT_MIN HZ/20 /* 50ms */
2038 static inline void tcp_westwood_update_rtt(struct tcp_opt *tp, __u32 rtt_seq)
2040 if (tcp_is_westwood(tp))
2041 tp->westwood.rtt = rtt_seq;
2044 void __tcp_westwood_fast_bw(struct sock *, struct sk_buff *);
2045 void __tcp_westwood_slow_bw(struct sock *, struct sk_buff *);
2048 * This function initializes fields used in TCP Westwood+. We can't
2049 * get no information about RTTmin at this time so we simply set it to
2050 * TCP_WESTWOOD_INIT_RTT. This value was chosen to be too conservative
2051 * since in this way we're sure it will be updated in a consistent
2052 * way as soon as possible. It will reasonably happen within the first
2053 * RTT period of the connection lifetime.
2056 static inline void __tcp_init_westwood(struct sock *sk)
2058 struct tcp_opt *tp = &(sk->tp_pinfo.af_tcp);
2060 tp->westwood.bw_ns_est = 0;
2061 tp->westwood.bw_est = 0;
2062 tp->westwood.accounted = 0;
2063 tp->westwood.cumul_ack = 0;
2064 tp->westwood.rtt_win_sx = tcp_time_stamp;
2065 tp->westwood.rtt = TCP_WESTWOOD_INIT_RTT;
2066 tp->westwood.rtt_min = TCP_WESTWOOD_INIT_RTT;
2067 tp->westwood.snd_una = tp->snd_una;
2070 static inline void tcp_init_westwood(struct sock *sk)
2072 __tcp_init_westwood(sk);
2075 static inline void tcp_westwood_fast_bw(struct sock *sk, struct sk_buff *skb)
2077 if (tcp_is_westwood(&(sk->tp_pinfo.af_tcp)))
2078 __tcp_westwood_fast_bw(sk, skb);
2081 static inline void tcp_westwood_slow_bw(struct sock *sk, struct sk_buff *skb)
2083 if (tcp_is_westwood(&(sk->tp_pinfo.af_tcp)))
2084 __tcp_westwood_slow_bw(sk, skb);
2087 static inline __u32 __tcp_westwood_bw_rttmin(struct tcp_opt *tp)
2089 return (__u32) ((tp->westwood.bw_est) * (tp->westwood.rtt_min) /
2090 (__u32) (tp->mss_cache));
2093 static inline __u32 tcp_westwood_bw_rttmin(struct tcp_opt *tp)
2095 __u32 ret = 0;
2097 if (tcp_is_westwood(tp))
2098 ret = (__u32) (max(__tcp_westwood_bw_rttmin(tp), 2U));
2100 return ret;
2103 static inline int tcp_westwood_ssthresh(struct tcp_opt *tp)
2105 int ret = 0;
2106 __u32 ssthresh;
2108 if (tcp_is_westwood(tp)) {
2109 if (!(ssthresh = tcp_westwood_bw_rttmin(tp)))
2110 return ret;
2112 tp->snd_ssthresh = ssthresh;
2113 ret = 1;
2116 return ret;
2119 static inline int tcp_westwood_cwnd(struct tcp_opt *tp)
2121 int ret = 0;
2122 __u32 cwnd;
2124 if (tcp_is_westwood(tp)) {
2125 if (!(cwnd = tcp_westwood_bw_rttmin(tp)))
2126 return ret;
2128 tp->snd_cwnd = cwnd;
2129 ret = 1;
2132 return ret;
2135 static inline int tcp_westwood_complete_cwr(struct tcp_opt *tp)
2137 int ret = 0;
2139 if (tcp_is_westwood(tp)) {
2140 if (tcp_westwood_cwnd(tp)) {
2141 tp->snd_ssthresh = tp->snd_cwnd;
2142 ret = 1;
2146 return ret;
2149 #endif /* _TCP_H */