Actually hook powernow.4 into the build.
[dragonfly.git] / sys / netinet / tcp_output.c
blob97c5ed78fec299f33bc600fc325098a6cf3823a6
1 /*
2 * Copyright (c) 2004 Jeffrey M. Hsu. All rights reserved.
3 * Copyright (c) 2004 The DragonFly Project. All rights reserved.
5 * This code is derived from software contributed to The DragonFly Project
6 * by Jeffrey M. Hsu.
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
10 * are met:
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 * 3. Neither the name of The DragonFly Project nor the names of its
17 * contributors may be used to endorse or promote products derived
18 * from this software without specific, prior written permission.
20 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
23 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
24 * COPYRIGHT HOLDERS OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
25 * INCIDENTAL, SPECIAL, EXEMPLARY OR CONSEQUENTIAL DAMAGES (INCLUDING,
26 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
27 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
28 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
29 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
30 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * SUCH DAMAGE.
35 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1995
36 * The Regents of the University of California. All rights reserved.
38 * Redistribution and use in source and binary forms, with or without
39 * modification, are permitted provided that the following conditions
40 * are met:
41 * 1. Redistributions of source code must retain the above copyright
42 * notice, this list of conditions and the following disclaimer.
43 * 2. Redistributions in binary form must reproduce the above copyright
44 * notice, this list of conditions and the following disclaimer in the
45 * documentation and/or other materials provided with the distribution.
46 * 3. All advertising materials mentioning features or use of this software
47 * must display the following acknowledgement:
48 * This product includes software developed by the University of
49 * California, Berkeley and its contributors.
50 * 4. Neither the name of the University nor the names of its contributors
51 * may be used to endorse or promote products derived from this software
52 * without specific prior written permission.
54 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
55 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
56 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
57 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
58 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
59 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
60 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
61 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
62 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
63 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
64 * SUCH DAMAGE.
66 * @(#)tcp_output.c 8.4 (Berkeley) 5/24/95
67 * $FreeBSD: src/sys/netinet/tcp_output.c,v 1.39.2.20 2003/01/29 22:45:36 hsu Exp $
68 * $DragonFly: src/sys/netinet/tcp_output.c,v 1.34 2007/04/22 01:13:14 dillon Exp $
71 #include "opt_inet6.h"
72 #include "opt_ipsec.h"
73 #include "opt_tcpdebug.h"
75 #include <sys/param.h>
76 #include <sys/systm.h>
77 #include <sys/kernel.h>
78 #include <sys/sysctl.h>
79 #include <sys/mbuf.h>
80 #include <sys/domain.h>
81 #include <sys/protosw.h>
82 #include <sys/socket.h>
83 #include <sys/socketvar.h>
84 #include <sys/in_cksum.h>
85 #include <sys/thread.h>
86 #include <sys/globaldata.h>
88 #include <net/route.h>
90 #include <netinet/in.h>
91 #include <netinet/in_systm.h>
92 #include <netinet/ip.h>
93 #include <netinet/in_pcb.h>
94 #include <netinet/ip_var.h>
95 #include <netinet6/in6_pcb.h>
96 #include <netinet/ip6.h>
97 #include <netinet6/ip6_var.h>
98 #include <netinet/tcp.h>
99 #define TCPOUTFLAGS
100 #include <netinet/tcp_fsm.h>
101 #include <netinet/tcp_seq.h>
102 #include <netinet/tcp_timer.h>
103 #include <netinet/tcp_timer2.h>
104 #include <netinet/tcp_var.h>
105 #include <netinet/tcpip.h>
106 #ifdef TCPDEBUG
107 #include <netinet/tcp_debug.h>
108 #endif
110 #ifdef IPSEC
111 #include <netinet6/ipsec.h>
112 #endif /*IPSEC*/
114 #ifdef FAST_IPSEC
115 #include <netproto/ipsec/ipsec.h>
116 #define IPSEC
117 #endif /*FAST_IPSEC*/
119 #ifdef notyet
120 extern struct mbuf *m_copypack();
121 #endif
123 int path_mtu_discovery = 0;
124 SYSCTL_INT(_net_inet_tcp, OID_AUTO, path_mtu_discovery, CTLFLAG_RW,
125 &path_mtu_discovery, 1, "Enable Path MTU Discovery");
127 static int avoid_pure_win_update = 1;
128 SYSCTL_INT(_net_inet_tcp, OID_AUTO, avoid_pure_win_update, CTLFLAG_RW,
129 &avoid_pure_win_update, 1, "Avoid pure window updates when possible");
131 int tcp_do_autosndbuf = 1;
132 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_auto, CTLFLAG_RW,
133 &tcp_do_autosndbuf, 0, "Enable automatic send buffer sizing");
135 int tcp_autosndbuf_inc = 8*1024;
136 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_inc, CTLFLAG_RW,
137 &tcp_autosndbuf_inc, 0, "Incrementor step size of automatic send buffer");
139 int tcp_autosndbuf_max = 2*1024*1024;
140 SYSCTL_INT(_net_inet_tcp, OID_AUTO, sendbuf_max, CTLFLAG_RW,
141 &tcp_autosndbuf_max, 0, "Max size of automatic send buffer");
144 * Tcp output routine: figure out what should be sent and send it.
147 tcp_output(struct tcpcb *tp)
149 struct inpcb * const inp = tp->t_inpcb;
150 struct socket *so = inp->inp_socket;
151 long len, recvwin, sendwin;
152 int nsacked = 0;
153 int off, flags, error;
154 struct mbuf *m;
155 struct ip *ip = NULL;
156 struct ipovly *ipov = NULL;
157 struct tcphdr *th;
158 u_char opt[TCP_MAXOLEN];
159 unsigned int ipoptlen, optlen, hdrlen;
160 int idle;
161 boolean_t sendalot;
162 struct ip6_hdr *ip6 = NULL;
163 #ifdef INET6
164 const boolean_t isipv6 = (inp->inp_vflag & INP_IPV6) != 0;
165 #else
166 const boolean_t isipv6 = FALSE;
167 #endif
170 * Determine length of data that should be transmitted,
171 * and flags that will be used.
172 * If there is some data or critical controls (SYN, RST)
173 * to send, then transmit; otherwise, investigate further.
177 * If we have been idle for a while, the send congestion window
178 * could be no longer representative of the current state of the link.
179 * So unless we are expecting more acks to come in, slow-start from
180 * scratch to re-determine the send congestion window.
182 if (tp->snd_max == tp->snd_una &&
183 (ticks - tp->t_rcvtime) >= tp->t_rxtcur) {
184 if (tcp_do_rfc3390) {
185 int initial_cwnd =
186 min(4 * tp->t_maxseg, max(2 * tp->t_maxseg, 4380));
188 tp->snd_cwnd = min(tp->snd_cwnd, initial_cwnd);
189 } else {
190 tp->snd_cwnd = tp->t_maxseg;
192 tp->snd_wacked = 0;
196 * Calculate whether the transmit stream was previously idle
197 * and adjust TF_LASTIDLE for the next time.
199 idle = (tp->t_flags & TF_LASTIDLE) || (tp->snd_max == tp->snd_una);
200 if (idle && (tp->t_flags & TF_MORETOCOME))
201 tp->t_flags |= TF_LASTIDLE;
202 else
203 tp->t_flags &= ~TF_LASTIDLE;
205 if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max &&
206 !IN_FASTRECOVERY(tp))
207 nsacked = tcp_sack_bytes_below(&tp->scb, tp->snd_nxt);
209 again:
210 /* Make use of SACK information when slow-starting after a RTO. */
211 if (TCP_DO_SACK(tp) && tp->snd_nxt != tp->snd_max &&
212 !IN_FASTRECOVERY(tp)) {
213 tcp_seq old_snd_nxt = tp->snd_nxt;
215 tcp_sack_skip_sacked(&tp->scb, &tp->snd_nxt);
216 nsacked += tp->snd_nxt - old_snd_nxt;
219 sendalot = FALSE;
220 off = tp->snd_nxt - tp->snd_una;
221 sendwin = min(tp->snd_wnd, tp->snd_cwnd + nsacked);
222 sendwin = min(sendwin, tp->snd_bwnd);
224 flags = tcp_outflags[tp->t_state];
226 * Get standard flags, and add SYN or FIN if requested by 'hidden'
227 * state flags.
229 if (tp->t_flags & TF_NEEDFIN)
230 flags |= TH_FIN;
231 if (tp->t_flags & TF_NEEDSYN)
232 flags |= TH_SYN;
235 * If in persist timeout with window of 0, send 1 byte.
236 * Otherwise, if window is small but nonzero
237 * and timer expired, we will send what we can
238 * and go to transmit state.
240 if (tp->t_flags & TF_FORCE) {
241 if (sendwin == 0) {
243 * If we still have some data to send, then
244 * clear the FIN bit. Usually this would
245 * happen below when it realizes that we
246 * aren't sending all the data. However,
247 * if we have exactly 1 byte of unsent data,
248 * then it won't clear the FIN bit below,
249 * and if we are in persist state, we wind
250 * up sending the packet without recording
251 * that we sent the FIN bit.
253 * We can't just blindly clear the FIN bit,
254 * because if we don't have any more data
255 * to send then the probe will be the FIN
256 * itself.
258 if (off < so->so_snd.ssb_cc)
259 flags &= ~TH_FIN;
260 sendwin = 1;
261 } else {
262 tcp_callout_stop(tp, tp->tt_persist);
263 tp->t_rxtshift = 0;
268 * If snd_nxt == snd_max and we have transmitted a FIN, the
269 * offset will be > 0 even if so_snd.ssb_cc is 0, resulting in
270 * a negative length. This can also occur when TCP opens up
271 * its congestion window while receiving additional duplicate
272 * acks after fast-retransmit because TCP will reset snd_nxt
273 * to snd_max after the fast-retransmit.
275 * In the normal retransmit-FIN-only case, however, snd_nxt will
276 * be set to snd_una, the offset will be 0, and the length may
277 * wind up 0.
279 len = (long)ulmin(so->so_snd.ssb_cc, sendwin) - off;
282 * Lop off SYN bit if it has already been sent. However, if this
283 * is SYN-SENT state and if segment contains data, suppress sending
284 * segment (sending the segment would be an option if we still
285 * did TAO and the remote host supported it).
287 if ((flags & TH_SYN) && SEQ_GT(tp->snd_nxt, tp->snd_una)) {
288 flags &= ~TH_SYN;
289 off--, len++;
290 if (len > 0 && tp->t_state == TCPS_SYN_SENT)
291 return 0;
295 * Be careful not to send data and/or FIN on SYN segments.
296 * This measure is needed to prevent interoperability problems
297 * with not fully conformant TCP implementations.
299 if (flags & TH_SYN) {
300 len = 0;
301 flags &= ~TH_FIN;
304 if (len < 0) {
306 * If FIN has been sent but not acked,
307 * but we haven't been called to retransmit,
308 * len will be < 0. Otherwise, window shrank
309 * after we sent into it. If window shrank to 0,
310 * cancel pending retransmit, pull snd_nxt back
311 * to (closed) window, and set the persist timer
312 * if it isn't already going. If the window didn't
313 * close completely, just wait for an ACK.
315 len = 0;
316 if (sendwin == 0) {
317 tcp_callout_stop(tp, tp->tt_rexmt);
318 tp->t_rxtshift = 0;
319 tp->snd_nxt = tp->snd_una;
320 if (!tcp_callout_active(tp, tp->tt_persist))
321 tcp_setpersist(tp);
325 KASSERT(len >= 0, ("%s: len < 0", __func__));
327 * Automatic sizing of send socket buffer. Often the send buffer
328 * size is not optimally adjusted to the actual network conditions
329 * at hand (delay bandwidth product). Setting the buffer size too
330 * small limits throughput on links with high bandwidth and high
331 * delay (eg. trans-continental/oceanic links). Setting the
332 * buffer size too big consumes too much real kernel memory,
333 * especially with many connections on busy servers.
335 * The criteria to step up the send buffer one notch are:
336 * 1. receive window of remote host is larger than send buffer
337 * (with a fudge factor of 5/4th);
338 * 2. send buffer is filled to 7/8th with data (so we actually
339 * have data to make use of it);
340 * 3. send buffer fill has not hit maximal automatic size;
341 * 4. our send window (slow start and cogestion controlled) is
342 * larger than sent but unacknowledged data in send buffer.
344 * The remote host receive window scaling factor may limit the
345 * growing of the send buffer before it reaches its allowed
346 * maximum.
348 * It scales directly with slow start or congestion window
349 * and does at most one step per received ACK. This fast
350 * scaling has the drawback of growing the send buffer beyond
351 * what is strictly necessary to make full use of a given
352 * delay*bandwith product. However testing has shown this not
353 * to be much of an problem. At worst we are trading wasting
354 * of available bandwith (the non-use of it) for wasting some
355 * socket buffer memory.
357 * TODO: Shrink send buffer during idle periods together
358 * with congestion window. Requires another timer. Has to
359 * wait for upcoming tcp timer rewrite.
361 if (tcp_do_autosndbuf && so->so_snd.ssb_flags & SSB_AUTOSIZE) {
362 if ((tp->snd_wnd / 4 * 5) >= so->so_snd.ssb_hiwat &&
363 so->so_snd.ssb_cc >= (so->so_snd.ssb_hiwat / 8 * 7) &&
364 so->so_snd.ssb_cc < tcp_autosndbuf_max &&
365 sendwin >= (so->so_snd.ssb_cc - (tp->snd_nxt - tp->snd_una))) {
366 u_long newsize;
368 newsize = ulmin(so->so_snd.ssb_hiwat +
369 tcp_autosndbuf_inc,
370 tcp_autosndbuf_max);
371 if (!ssb_reserve(&so->so_snd, newsize, so, NULL))
372 so->so_snd.ssb_flags &= ~SSB_AUTOSIZE;
373 if (newsize >= (TCP_MAXWIN << tp->snd_scale))
374 so->so_snd.ssb_flags &= ~SSB_AUTOSIZE;
379 * Truncate to the maximum segment length and ensure that FIN is
380 * removed if the length no longer contains the last data byte.
382 if (len > tp->t_maxseg) {
383 len = tp->t_maxseg;
384 sendalot = TRUE;
386 if (SEQ_LT(tp->snd_nxt + len, tp->snd_una + so->so_snd.ssb_cc))
387 flags &= ~TH_FIN;
389 recvwin = ssb_space(&so->so_rcv);
392 * Sender silly window avoidance. We transmit under the following
393 * conditions when len is non-zero:
395 * - We have a full segment
396 * - This is the last buffer in a write()/send() and we are
397 * either idle or running NODELAY
398 * - we've timed out (e.g. persist timer)
399 * - we have more then 1/2 the maximum send window's worth of
400 * data (receiver may be limiting the window size)
401 * - we need to retransmit
403 if (len) {
404 if (len == tp->t_maxseg)
405 goto send;
407 * NOTE! on localhost connections an 'ack' from the remote
408 * end may occur synchronously with the output and cause
409 * us to flush a buffer queued with moretocome. XXX
411 * note: the len + off check is almost certainly unnecessary.
413 if (!(tp->t_flags & TF_MORETOCOME) && /* normal case */
414 (idle || (tp->t_flags & TF_NODELAY)) &&
415 len + off >= so->so_snd.ssb_cc &&
416 !(tp->t_flags & TF_NOPUSH)) {
417 goto send;
419 if (tp->t_flags & TF_FORCE) /* typ. timeout case */
420 goto send;
421 if (len >= tp->max_sndwnd / 2 && tp->max_sndwnd > 0)
422 goto send;
423 if (SEQ_LT(tp->snd_nxt, tp->snd_max)) /* retransmit case */
424 goto send;
428 * Compare available window to amount of window
429 * known to peer (as advertised window less
430 * next expected input). If the difference is at least two
431 * max size segments, or at least 50% of the maximum possible
432 * window, then want to send a window update to peer.
434 if (recvwin > 0) {
436 * "adv" is the amount we can increase the window,
437 * taking into account that we are limited by
438 * TCP_MAXWIN << tp->rcv_scale.
440 long adv = min(recvwin, (long)TCP_MAXWIN << tp->rcv_scale) -
441 (tp->rcv_adv - tp->rcv_nxt);
442 long hiwat;
445 * This ack case typically occurs when the user has drained
446 * the TCP socket buffer sufficiently to warrent an ack
447 * containing a 'pure window update'... that is, an ack that
448 * ONLY updates the tcp window.
450 * It is unclear why we would need to do a pure window update
451 * past 2 segments if we are going to do one at 1/2 the high
452 * water mark anyway, especially since under normal conditions
453 * the user program will drain the socket buffer quickly.
454 * The 2-segment pure window update will often add a large
455 * number of extra, unnecessary acks to the stream.
457 * avoid_pure_win_update now defaults to 1.
459 if (avoid_pure_win_update == 0 ||
460 (tp->t_flags & TF_RXRESIZED)) {
461 if (adv >= (long) (2 * tp->t_maxseg)) {
462 goto send;
465 hiwat = (long)(TCP_MAXWIN << tp->rcv_scale);
466 if (hiwat > (long)so->so_rcv.ssb_hiwat)
467 hiwat = (long)so->so_rcv.ssb_hiwat;
468 if (adv >= hiwat / 2)
469 goto send;
473 * Send if we owe the peer an ACK, RST, SYN, or urgent data. ACKNOW
474 * is also a catch-all for the retransmit timer timeout case.
476 if (tp->t_flags & TF_ACKNOW)
477 goto send;
478 if ((flags & TH_RST) ||
479 ((flags & TH_SYN) && !(tp->t_flags & TF_NEEDSYN)))
480 goto send;
481 if (SEQ_GT(tp->snd_up, tp->snd_una))
482 goto send;
484 * If our state indicates that FIN should be sent
485 * and we have not yet done so, then we need to send.
487 if (flags & TH_FIN &&
488 (!(tp->t_flags & TF_SENTFIN) || tp->snd_nxt == tp->snd_una))
489 goto send;
492 * TCP window updates are not reliable, rather a polling protocol
493 * using ``persist'' packets is used to insure receipt of window
494 * updates. The three ``states'' for the output side are:
495 * idle not doing retransmits or persists
496 * persisting to move a small or zero window
497 * (re)transmitting and thereby not persisting
499 * tcp_callout_active(tp, tp->tt_persist)
500 * is true when we are in persist state.
501 * The TF_FORCE flag in tp->t_flags
502 * is set when we are called to send a persist packet.
503 * tcp_callout_active(tp, tp->tt_rexmt)
504 * is set when we are retransmitting
505 * The output side is idle when both timers are zero.
507 * If send window is too small, there is data to transmit, and no
508 * retransmit or persist is pending, then go to persist state.
509 * If nothing happens soon, send when timer expires:
510 * if window is nonzero, transmit what we can,
511 * otherwise force out a byte.
513 if (so->so_snd.ssb_cc > 0 &&
514 !tcp_callout_active(tp, tp->tt_rexmt) &&
515 !tcp_callout_active(tp, tp->tt_persist)) {
516 tp->t_rxtshift = 0;
517 tcp_setpersist(tp);
521 * No reason to send a segment, just return.
523 return (0);
525 send:
527 * Before ESTABLISHED, force sending of initial options
528 * unless TCP set not to do any options.
529 * NOTE: we assume that the IP/TCP header plus TCP options
530 * always fit in a single mbuf, leaving room for a maximum
531 * link header, i.e.
532 * max_linkhdr + sizeof(struct tcpiphdr) + optlen <= MCLBYTES
534 optlen = 0;
535 if (isipv6)
536 hdrlen = sizeof(struct ip6_hdr) + sizeof(struct tcphdr);
537 else
538 hdrlen = sizeof(struct tcpiphdr);
539 if (flags & TH_SYN) {
540 tp->snd_nxt = tp->iss;
541 if (!(tp->t_flags & TF_NOOPT)) {
542 u_short mss;
544 opt[0] = TCPOPT_MAXSEG;
545 opt[1] = TCPOLEN_MAXSEG;
546 mss = htons((u_short) tcp_mssopt(tp));
547 memcpy(opt + 2, &mss, sizeof mss);
548 optlen = TCPOLEN_MAXSEG;
550 if ((tp->t_flags & TF_REQ_SCALE) &&
551 (!(flags & TH_ACK) ||
552 (tp->t_flags & TF_RCVD_SCALE))) {
553 *((u_int32_t *)(opt + optlen)) = htonl(
554 TCPOPT_NOP << 24 |
555 TCPOPT_WINDOW << 16 |
556 TCPOLEN_WINDOW << 8 |
557 tp->request_r_scale);
558 optlen += 4;
561 if ((tcp_do_sack && !(flags & TH_ACK)) ||
562 tp->t_flags & TF_SACK_PERMITTED) {
563 uint32_t *lp = (uint32_t *)(opt + optlen);
565 *lp = htonl(TCPOPT_SACK_PERMITTED_ALIGNED);
566 optlen += TCPOLEN_SACK_PERMITTED_ALIGNED;
572 * Send a timestamp and echo-reply if this is a SYN and our side
573 * wants to use timestamps (TF_REQ_TSTMP is set) or both our side
574 * and our peer have sent timestamps in our SYN's.
576 if ((tp->t_flags & (TF_REQ_TSTMP | TF_NOOPT)) == TF_REQ_TSTMP &&
577 !(flags & TH_RST) &&
578 (!(flags & TH_ACK) || (tp->t_flags & TF_RCVD_TSTMP))) {
579 u_int32_t *lp = (u_int32_t *)(opt + optlen);
581 /* Form timestamp option as shown in appendix A of RFC 1323. */
582 *lp++ = htonl(TCPOPT_TSTAMP_HDR);
583 *lp++ = htonl(ticks);
584 *lp = htonl(tp->ts_recent);
585 optlen += TCPOLEN_TSTAMP_APPA;
588 /* Set receive buffer autosizing timestamp. */
589 if (tp->rfbuf_ts == 0 && (so->so_rcv.ssb_flags & SSB_AUTOSIZE))
590 tp->rfbuf_ts = ticks;
593 * If this is a SACK connection and we have a block to report,
594 * fill in the SACK blocks in the TCP options.
596 if ((tp->t_flags & (TF_SACK_PERMITTED | TF_NOOPT)) ==
597 TF_SACK_PERMITTED &&
598 (!LIST_EMPTY(&tp->t_segq) ||
599 tp->reportblk.rblk_start != tp->reportblk.rblk_end))
600 tcp_sack_fill_report(tp, opt, &optlen);
602 KASSERT(optlen <= TCP_MAXOLEN, ("too many TCP options"));
603 hdrlen += optlen;
605 if (isipv6) {
606 ipoptlen = ip6_optlen(inp);
607 } else {
608 if (inp->inp_options) {
609 ipoptlen = inp->inp_options->m_len -
610 offsetof(struct ipoption, ipopt_list);
611 } else {
612 ipoptlen = 0;
615 #ifdef IPSEC
616 ipoptlen += ipsec_hdrsiz_tcp(tp);
617 #endif
620 * Adjust data length if insertion of options will bump the packet
621 * length beyond the t_maxopd length. Clear FIN to prevent premature
622 * closure since there is still more data to send after this (now
623 * truncated) packet.
625 * If just the options do not fit we are in a no-win situation and
626 * we treat it as an unreachable host.
628 if (len + optlen + ipoptlen > tp->t_maxopd) {
629 if (tp->t_maxopd <= optlen + ipoptlen) {
630 static time_t last_optlen_report;
632 if (last_optlen_report != time_second) {
633 last_optlen_report = time_second;
634 kprintf("tcpcb %p: MSS (%d) too small to hold options!\n", tp, tp->t_maxopd);
636 error = EHOSTUNREACH;
637 goto out;
638 } else {
639 flags &= ~TH_FIN;
640 len = tp->t_maxopd - optlen - ipoptlen;
641 sendalot = TRUE;
645 #ifdef INET6
646 KASSERT(max_linkhdr + hdrlen <= MCLBYTES, ("tcphdr too big"));
647 #else
648 KASSERT(max_linkhdr + hdrlen <= MHLEN, ("tcphdr too big"));
649 #endif
652 * Grab a header mbuf, attaching a copy of data to
653 * be transmitted, and initialize the header from
654 * the template for sends on this connection.
656 if (len) {
657 if ((tp->t_flags & TF_FORCE) && len == 1)
658 tcpstat.tcps_sndprobe++;
659 else if (SEQ_LT(tp->snd_nxt, tp->snd_max)) {
660 if (tp->snd_nxt == tp->snd_una)
661 tp->snd_max_rexmt = tp->snd_max;
662 tcpstat.tcps_sndrexmitpack++;
663 tcpstat.tcps_sndrexmitbyte += len;
664 } else {
665 tcpstat.tcps_sndpack++;
666 tcpstat.tcps_sndbyte += len;
668 #ifdef notyet
669 if ((m = m_copypack(so->so_snd.ssb_mb, off, (int)len,
670 max_linkhdr + hdrlen)) == NULL) {
671 error = ENOBUFS;
672 goto out;
675 * m_copypack left space for our hdr; use it.
677 m->m_len += hdrlen;
678 m->m_data -= hdrlen;
679 #else
680 #ifndef INET6
681 m = m_gethdr(MB_DONTWAIT, MT_HEADER);
682 #else
683 m = m_getl(hdrlen + max_linkhdr, MB_DONTWAIT, MT_HEADER,
684 M_PKTHDR, NULL);
685 #endif
686 if (m == NULL) {
687 error = ENOBUFS;
688 goto out;
690 m->m_data += max_linkhdr;
691 m->m_len = hdrlen;
692 if (len <= MHLEN - hdrlen - max_linkhdr) {
693 m_copydata(so->so_snd.ssb_mb, off, (int) len,
694 mtod(m, caddr_t) + hdrlen);
695 m->m_len += len;
696 } else {
697 m->m_next = m_copy(so->so_snd.ssb_mb, off, (int) len);
698 if (m->m_next == NULL) {
699 m_free(m);
700 error = ENOBUFS;
701 goto out;
704 #endif
706 * If we're sending everything we've got, set PUSH.
707 * (This will keep happy those implementations which only
708 * give data to the user when a buffer fills or
709 * a PUSH comes in.)
711 if (off + len == so->so_snd.ssb_cc)
712 flags |= TH_PUSH;
713 } else {
714 if (tp->t_flags & TF_ACKNOW)
715 tcpstat.tcps_sndacks++;
716 else if (flags & (TH_SYN | TH_FIN | TH_RST))
717 tcpstat.tcps_sndctrl++;
718 else if (SEQ_GT(tp->snd_up, tp->snd_una))
719 tcpstat.tcps_sndurg++;
720 else
721 tcpstat.tcps_sndwinup++;
723 MGETHDR(m, MB_DONTWAIT, MT_HEADER);
724 if (m == NULL) {
725 error = ENOBUFS;
726 goto out;
728 if (isipv6 &&
729 (hdrlen + max_linkhdr > MHLEN) && hdrlen <= MHLEN)
730 MH_ALIGN(m, hdrlen);
731 else
732 m->m_data += max_linkhdr;
733 m->m_len = hdrlen;
735 m->m_pkthdr.rcvif = NULL;
736 if (isipv6) {
737 ip6 = mtod(m, struct ip6_hdr *);
738 th = (struct tcphdr *)(ip6 + 1);
739 tcp_fillheaders(tp, ip6, th);
740 } else {
741 ip = mtod(m, struct ip *);
742 ipov = (struct ipovly *)ip;
743 th = (struct tcphdr *)(ip + 1);
744 /* this picks up the pseudo header (w/o the length) */
745 tcp_fillheaders(tp, ip, th);
749 * Fill in fields, remembering maximum advertised
750 * window for use in delaying messages about window sizes.
751 * If resending a FIN, be sure not to use a new sequence number.
753 if (flags & TH_FIN && tp->t_flags & TF_SENTFIN &&
754 tp->snd_nxt == tp->snd_max)
755 tp->snd_nxt--;
757 * If we are doing retransmissions, then snd_nxt will
758 * not reflect the first unsent octet. For ACK only
759 * packets, we do not want the sequence number of the
760 * retransmitted packet, we want the sequence number
761 * of the next unsent octet. So, if there is no data
762 * (and no SYN or FIN), use snd_max instead of snd_nxt
763 * when filling in ti_seq. But if we are in persist
764 * state, snd_max might reflect one byte beyond the
765 * right edge of the window, so use snd_nxt in that
766 * case, since we know we aren't doing a retransmission.
767 * (retransmit and persist are mutually exclusive...)
769 if (len || (flags & (TH_SYN|TH_FIN)) ||
770 tcp_callout_active(tp, tp->tt_persist))
771 th->th_seq = htonl(tp->snd_nxt);
772 else
773 th->th_seq = htonl(tp->snd_max);
774 th->th_ack = htonl(tp->rcv_nxt);
775 if (optlen) {
776 bcopy(opt, th + 1, optlen);
777 th->th_off = (sizeof(struct tcphdr) + optlen) >> 2;
779 th->th_flags = flags;
782 * Calculate receive window. Don't shrink window, but avoid
783 * silly window syndrome by sending a 0 window if the actual
784 * window is less then one segment.
786 if (recvwin < (long)(so->so_rcv.ssb_hiwat / 4) &&
787 recvwin < (long)tp->t_maxseg)
788 recvwin = 0;
789 if (recvwin < (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt))
790 recvwin = (tcp_seq_diff_t)(tp->rcv_adv - tp->rcv_nxt);
791 if (recvwin > (long)TCP_MAXWIN << tp->rcv_scale)
792 recvwin = (long)TCP_MAXWIN << tp->rcv_scale;
793 th->th_win = htons((u_short) (recvwin>>tp->rcv_scale));
796 * Adjust the RXWIN0SENT flag - indicate that we have advertised
797 * a 0 window. This may cause the remote transmitter to stall. This
798 * flag tells soreceive() to disable delayed acknowledgements when
799 * draining the buffer. This can occur if the receiver is attempting
800 * to read more data then can be buffered prior to transmitting on
801 * the connection.
803 if (recvwin == 0)
804 tp->t_flags |= TF_RXWIN0SENT;
805 else
806 tp->t_flags &= ~TF_RXWIN0SENT;
808 if (SEQ_GT(tp->snd_up, tp->snd_nxt)) {
809 th->th_urp = htons((u_short)(tp->snd_up - tp->snd_nxt));
810 th->th_flags |= TH_URG;
811 } else {
813 * If no urgent pointer to send, then we pull
814 * the urgent pointer to the left edge of the send window
815 * so that it doesn't drift into the send window on sequence
816 * number wraparound.
818 tp->snd_up = tp->snd_una; /* drag it along */
822 * Put TCP length in extended header, and then
823 * checksum extended header and data.
825 m->m_pkthdr.len = hdrlen + len; /* in6_cksum() need this */
826 if (isipv6) {
828 * ip6_plen is not need to be filled now, and will be filled
829 * in ip6_output().
831 th->th_sum = in6_cksum(m, IPPROTO_TCP, sizeof(struct ip6_hdr),
832 sizeof(struct tcphdr) + optlen + len);
833 } else {
834 m->m_pkthdr.csum_flags = CSUM_TCP;
835 m->m_pkthdr.csum_data = offsetof(struct tcphdr, th_sum);
836 if (len + optlen)
837 th->th_sum = in_addword(th->th_sum,
838 htons((u_short)(optlen + len)));
840 /* IP version must be set here for ipv4/ipv6 checking later */
841 KASSERT(ip->ip_v == IPVERSION,
842 ("%s: IP version incorrect: %d", __func__, ip->ip_v));
846 * In transmit state, time the transmission and arrange for
847 * the retransmit. In persist state, just set snd_max.
849 if (!(tp->t_flags & TF_FORCE) ||
850 !tcp_callout_active(tp, tp->tt_persist)) {
851 tcp_seq startseq = tp->snd_nxt;
854 * Advance snd_nxt over sequence space of this segment.
856 if (flags & (TH_SYN | TH_FIN)) {
857 if (flags & TH_SYN)
858 tp->snd_nxt++;
859 if (flags & TH_FIN) {
860 tp->snd_nxt++;
861 tp->t_flags |= TF_SENTFIN;
864 tp->snd_nxt += len;
865 if (SEQ_GT(tp->snd_nxt, tp->snd_max)) {
866 tp->snd_max = tp->snd_nxt;
868 * Time this transmission if not a retransmission and
869 * not currently timing anything.
871 if (tp->t_rtttime == 0) {
872 tp->t_rtttime = ticks;
873 tp->t_rtseq = startseq;
874 tcpstat.tcps_segstimed++;
879 * Set retransmit timer if not currently set,
880 * and not doing a pure ack or a keep-alive probe.
881 * Initial value for retransmit timer is smoothed
882 * round-trip time + 2 * round-trip time variance.
883 * Initialize shift counter which is used for backoff
884 * of retransmit time.
886 if (!tcp_callout_active(tp, tp->tt_rexmt) &&
887 tp->snd_nxt != tp->snd_una) {
888 if (tcp_callout_active(tp, tp->tt_persist)) {
889 tcp_callout_stop(tp, tp->tt_persist);
890 tp->t_rxtshift = 0;
892 tcp_callout_reset(tp, tp->tt_rexmt, tp->t_rxtcur,
893 tcp_timer_rexmt);
895 } else {
897 * Persist case, update snd_max but since we are in
898 * persist mode (no window) we do not update snd_nxt.
900 int xlen = len;
901 if (flags & TH_SYN)
902 ++xlen;
903 if (flags & TH_FIN) {
904 ++xlen;
905 tp->t_flags |= TF_SENTFIN;
907 if (SEQ_GT(tp->snd_nxt + xlen, tp->snd_max))
908 tp->snd_max = tp->snd_nxt + xlen;
911 #ifdef TCPDEBUG
913 * Trace.
915 if (so->so_options & SO_DEBUG)
916 tcp_trace(TA_OUTPUT, tp->t_state, tp, mtod(m, void *), th, 0);
917 #endif
920 * Fill in IP length and desired time to live and
921 * send to IP level. There should be a better way
922 * to handle ttl and tos; we could keep them in
923 * the template, but need a way to checksum without them.
926 * m->m_pkthdr.len should have been set before cksum calcuration,
927 * because in6_cksum() need it.
929 if (isipv6) {
931 * we separately set hoplimit for every segment, since the
932 * user might want to change the value via setsockopt.
933 * Also, desired default hop limit might be changed via
934 * Neighbor Discovery.
936 ip6->ip6_hlim = in6_selecthlim(inp,
937 (inp->in6p_route.ro_rt ?
938 inp->in6p_route.ro_rt->rt_ifp : NULL));
940 /* TODO: IPv6 IP6TOS_ECT bit on */
941 error = ip6_output(m, inp->in6p_outputopts, &inp->in6p_route,
942 (so->so_options & SO_DONTROUTE), NULL, NULL,
943 inp);
944 } else {
945 struct rtentry *rt;
946 ip->ip_len = m->m_pkthdr.len;
947 #ifdef INET6
948 if (INP_CHECK_SOCKAF(so, AF_INET6))
949 ip->ip_ttl = in6_selecthlim(inp,
950 (inp->in6p_route.ro_rt ?
951 inp->in6p_route.ro_rt->rt_ifp : NULL));
952 else
953 #endif
954 ip->ip_ttl = inp->inp_ip_ttl; /* XXX */
956 ip->ip_tos = inp->inp_ip_tos; /* XXX */
958 * See if we should do MTU discovery.
959 * We do it only if the following are true:
960 * 1) we have a valid route to the destination
961 * 2) the MTU is not locked (if it is,
962 * then discovery has been disabled)
964 if (path_mtu_discovery &&
965 (rt = inp->inp_route.ro_rt) && (rt->rt_flags & RTF_UP) &&
966 !(rt->rt_rmx.rmx_locks & RTV_MTU))
967 ip->ip_off |= IP_DF;
969 error = ip_output(m, inp->inp_options, &inp->inp_route,
970 (so->so_options & SO_DONTROUTE) |
971 IP_DEBUGROUTE, NULL, inp);
973 if (error) {
976 * We know that the packet was lost, so back out the
977 * sequence number advance, if any.
979 if (!(tp->t_flags & TF_FORCE) ||
980 !tcp_callout_active(tp, tp->tt_persist)) {
982 * No need to check for TH_FIN here because
983 * the TF_SENTFIN flag handles that case.
985 if (!(flags & TH_SYN))
986 tp->snd_nxt -= len;
989 out:
990 if (error == ENOBUFS) {
992 * If we can't send, make sure there is something
993 * to get us going again later. Persist state
994 * is not necessarily right, but it is close enough.
996 if (!tcp_callout_active(tp, tp->tt_rexmt) &&
997 !tcp_callout_active(tp, tp->tt_persist)) {
998 tp->t_rxtshift = 0;
999 tcp_setpersist(tp);
1001 tcp_quench(inp, 0);
1002 return (0);
1004 if (error == EMSGSIZE) {
1006 * ip_output() will have already fixed the route
1007 * for us. tcp_mtudisc() will, as its last action,
1008 * initiate retransmission, so it is important to
1009 * not do so here.
1011 tcp_mtudisc(inp, 0);
1012 return 0;
1014 if ((error == EHOSTUNREACH || error == ENETDOWN) &&
1015 TCPS_HAVERCVDSYN(tp->t_state)) {
1016 tp->t_softerror = error;
1017 return (0);
1019 return (error);
1021 tcpstat.tcps_sndtotal++;
1024 * Data sent (as far as we can tell).
1026 * If this advertises a larger window than any other segment,
1027 * then remember the size of the advertised window.
1029 * Any pending ACK has now been sent.
1031 if (recvwin > 0 && SEQ_GT(tp->rcv_nxt + recvwin, tp->rcv_adv)) {
1032 tp->rcv_adv = tp->rcv_nxt + recvwin;
1033 tp->t_flags &= ~TF_RXRESIZED;
1035 tp->last_ack_sent = tp->rcv_nxt;
1036 tp->t_flags &= ~TF_ACKNOW;
1037 if (tcp_delack_enabled)
1038 tcp_callout_stop(tp, tp->tt_delack);
1039 if (sendalot)
1040 goto again;
1041 return (0);
1044 void
1045 tcp_setpersist(struct tcpcb *tp)
1047 int t = ((tp->t_srtt >> 2) + tp->t_rttvar) >> 1;
1048 int tt;
1050 if (tcp_callout_active(tp, tp->tt_rexmt))
1051 panic("tcp_setpersist: retransmit pending");
1053 * Start/restart persistance timer.
1055 TCPT_RANGESET(tt, t * tcp_backoff[tp->t_rxtshift], TCPTV_PERSMIN,
1056 TCPTV_PERSMAX);
1057 tcp_callout_reset(tp, tp->tt_persist, tt, tcp_timer_persist);
1058 if (tp->t_rxtshift < TCP_MAXRXTSHIFT)
1059 tp->t_rxtshift++;