slirp: Factor out internal state structure
[qemu.git] / slirp / tcp_input.c
blob47cf0ad8c72379cc4195cfb0687bc08cb4c9e786
1 /*
2 * Copyright (c) 1982, 1986, 1988, 1990, 1993, 1994
3 * The Regents of the University of California. All rights reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 * 3. Neither the name of the University nor the names of its contributors
14 * may be used to endorse or promote products derived from this software
15 * without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * SUCH DAMAGE.
29 * @(#)tcp_input.c 8.5 (Berkeley) 4/10/94
30 * tcp_input.c,v 1.10 1994/10/13 18:36:32 wollman Exp
34 * Changes and additions relating to SLiRP
35 * Copyright (c) 1995 Danny Gasparovski.
37 * Please read the file COPYRIGHT for the
38 * terms and conditions of the copyright.
41 #include <slirp.h>
42 #include "ip_icmp.h"
44 #define TCPREXMTTHRESH 3
46 #define TCP_PAWS_IDLE (24 * 24 * 60 * 60 * PR_SLOWHZ)
48 /* for modulo comparisons of timestamps */
49 #define TSTMP_LT(a,b) ((int)((a)-(b)) < 0)
50 #define TSTMP_GEQ(a,b) ((int)((a)-(b)) >= 0)
53 * Insert segment ti into reassembly queue of tcp with
54 * control block tp. Return TH_FIN if reassembly now includes
55 * a segment with FIN. The macro form does the common case inline
56 * (segment is the next to be received on an established connection,
57 * and the queue is empty), avoiding linkage into and removal
58 * from the queue and repetition of various conversions.
59 * Set DELACK for segments received in order, but ack immediately
60 * when segments are out of order (so fast retransmit can work).
62 #ifdef TCP_ACK_HACK
63 #define TCP_REASS(tp, ti, m, so, flags) {\
64 if ((ti)->ti_seq == (tp)->rcv_nxt && \
65 tcpfrag_list_empty(tp) && \
66 (tp)->t_state == TCPS_ESTABLISHED) {\
67 if (ti->ti_flags & TH_PUSH) \
68 tp->t_flags |= TF_ACKNOW; \
69 else \
70 tp->t_flags |= TF_DELACK; \
71 (tp)->rcv_nxt += (ti)->ti_len; \
72 flags = (ti)->ti_flags & TH_FIN; \
73 if (so->so_emu) { \
74 if (tcp_emu((so),(m))) sbappend((so), (m)); \
75 } else \
76 sbappend((so), (m)); \
77 } else {\
78 (flags) = tcp_reass((tp), (ti), (m)); \
79 tp->t_flags |= TF_ACKNOW; \
80 } \
82 #else
83 #define TCP_REASS(tp, ti, m, so, flags) { \
84 if ((ti)->ti_seq == (tp)->rcv_nxt && \
85 tcpfrag_list_empty(tp) && \
86 (tp)->t_state == TCPS_ESTABLISHED) { \
87 tp->t_flags |= TF_DELACK; \
88 (tp)->rcv_nxt += (ti)->ti_len; \
89 flags = (ti)->ti_flags & TH_FIN; \
90 if (so->so_emu) { \
91 if (tcp_emu((so),(m))) sbappend(so, (m)); \
92 } else \
93 sbappend((so), (m)); \
94 } else { \
95 (flags) = tcp_reass((tp), (ti), (m)); \
96 tp->t_flags |= TF_ACKNOW; \
97 } \
99 #endif
100 static void tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt,
101 struct tcpiphdr *ti);
102 static void tcp_xmit_timer(register struct tcpcb *tp, int rtt);
104 static int
105 tcp_reass(register struct tcpcb *tp, register struct tcpiphdr *ti,
106 struct mbuf *m)
108 register struct tcpiphdr *q;
109 struct socket *so = tp->t_socket;
110 int flags;
113 * Call with ti==NULL after become established to
114 * force pre-ESTABLISHED data up to user socket.
116 if (ti == NULL)
117 goto present;
120 * Find a segment which begins after this one does.
122 for (q = tcpfrag_list_first(tp); !tcpfrag_list_end(q, tp);
123 q = tcpiphdr_next(q))
124 if (SEQ_GT(q->ti_seq, ti->ti_seq))
125 break;
128 * If there is a preceding segment, it may provide some of
129 * our data already. If so, drop the data from the incoming
130 * segment. If it provides all of our data, drop us.
132 if (!tcpfrag_list_end(tcpiphdr_prev(q), tp)) {
133 register int i;
134 q = tcpiphdr_prev(q);
135 /* conversion to int (in i) handles seq wraparound */
136 i = q->ti_seq + q->ti_len - ti->ti_seq;
137 if (i > 0) {
138 if (i >= ti->ti_len) {
139 m_freem(m);
141 * Try to present any queued data
142 * at the left window edge to the user.
143 * This is needed after the 3-WHS
144 * completes.
146 goto present; /* ??? */
148 m_adj(m, i);
149 ti->ti_len -= i;
150 ti->ti_seq += i;
152 q = tcpiphdr_next(q);
154 ti->ti_mbuf = m;
157 * While we overlap succeeding segments trim them or,
158 * if they are completely covered, dequeue them.
160 while (!tcpfrag_list_end(q, tp)) {
161 register int i = (ti->ti_seq + ti->ti_len) - q->ti_seq;
162 if (i <= 0)
163 break;
164 if (i < q->ti_len) {
165 q->ti_seq += i;
166 q->ti_len -= i;
167 m_adj(q->ti_mbuf, i);
168 break;
170 q = tcpiphdr_next(q);
171 m = tcpiphdr_prev(q)->ti_mbuf;
172 remque(tcpiphdr2qlink(tcpiphdr_prev(q)));
173 m_freem(m);
177 * Stick new segment in its place.
179 insque(tcpiphdr2qlink(ti), tcpiphdr2qlink(tcpiphdr_prev(q)));
181 present:
183 * Present data to user, advancing rcv_nxt through
184 * completed sequence space.
186 if (!TCPS_HAVEESTABLISHED(tp->t_state))
187 return (0);
188 ti = tcpfrag_list_first(tp);
189 if (tcpfrag_list_end(ti, tp) || ti->ti_seq != tp->rcv_nxt)
190 return (0);
191 if (tp->t_state == TCPS_SYN_RECEIVED && ti->ti_len)
192 return (0);
193 do {
194 tp->rcv_nxt += ti->ti_len;
195 flags = ti->ti_flags & TH_FIN;
196 remque(tcpiphdr2qlink(ti));
197 m = ti->ti_mbuf;
198 ti = tcpiphdr_next(ti);
199 if (so->so_state & SS_FCANTSENDMORE)
200 m_freem(m);
201 else {
202 if (so->so_emu) {
203 if (tcp_emu(so,m)) sbappend(so, m);
204 } else
205 sbappend(so, m);
207 } while (ti != (struct tcpiphdr *)tp && ti->ti_seq == tp->rcv_nxt);
208 return (flags);
212 * TCP input routine, follows pages 65-76 of the
213 * protocol specification dated September, 1981 very closely.
215 void
216 tcp_input(struct mbuf *m, int iphlen, struct socket *inso)
218 struct ip save_ip, *ip;
219 register struct tcpiphdr *ti;
220 caddr_t optp = NULL;
221 int optlen = 0;
222 int len, tlen, off;
223 register struct tcpcb *tp = NULL;
224 register int tiflags;
225 struct socket *so = NULL;
226 int todrop, acked, ourfinisacked, needoutput = 0;
227 int iss = 0;
228 u_long tiwin;
229 int ret;
230 struct ex_list *ex_ptr;
231 Slirp *slirp;
233 DEBUG_CALL("tcp_input");
234 DEBUG_ARGS((dfd," m = %8lx iphlen = %2d inso = %lx\n",
235 (long )m, iphlen, (long )inso ));
238 * If called with m == 0, then we're continuing the connect
240 if (m == NULL) {
241 so = inso;
242 slirp = so->slirp;
244 /* Re-set a few variables */
245 tp = sototcpcb(so);
246 m = so->so_m;
247 so->so_m = NULL;
248 ti = so->so_ti;
249 tiwin = ti->ti_win;
250 tiflags = ti->ti_flags;
252 goto cont_conn;
254 slirp = m->slirp;
257 * Get IP and TCP header together in first mbuf.
258 * Note: IP leaves IP header in first mbuf.
260 ti = mtod(m, struct tcpiphdr *);
261 if (iphlen > sizeof(struct ip )) {
262 ip_stripoptions(m, (struct mbuf *)0);
263 iphlen=sizeof(struct ip );
265 /* XXX Check if too short */
269 * Save a copy of the IP header in case we want restore it
270 * for sending an ICMP error message in response.
272 ip=mtod(m, struct ip *);
273 save_ip = *ip;
274 save_ip.ip_len+= iphlen;
277 * Checksum extended TCP header and data.
279 tlen = ((struct ip *)ti)->ip_len;
280 tcpiphdr2qlink(ti)->next = tcpiphdr2qlink(ti)->prev = NULL;
281 memset(&ti->ti_i.ih_mbuf, 0 , sizeof(struct mbuf_ptr));
282 ti->ti_x1 = 0;
283 ti->ti_len = htons((u_int16_t)tlen);
284 len = sizeof(struct ip ) + tlen;
285 if(cksum(m, len)) {
286 goto drop;
290 * Check that TCP offset makes sense,
291 * pull out TCP options and adjust length. XXX
293 off = ti->ti_off << 2;
294 if (off < sizeof (struct tcphdr) || off > tlen) {
295 goto drop;
297 tlen -= off;
298 ti->ti_len = tlen;
299 if (off > sizeof (struct tcphdr)) {
300 optlen = off - sizeof (struct tcphdr);
301 optp = mtod(m, caddr_t) + sizeof (struct tcpiphdr);
303 tiflags = ti->ti_flags;
306 * Convert TCP protocol specific fields to host format.
308 NTOHL(ti->ti_seq);
309 NTOHL(ti->ti_ack);
310 NTOHS(ti->ti_win);
311 NTOHS(ti->ti_urp);
314 * Drop TCP, IP headers and TCP options.
316 m->m_data += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
317 m->m_len -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
319 if (slirp->restricted) {
320 for (ex_ptr = slirp->exec_list; ex_ptr; ex_ptr = ex_ptr->ex_next) {
321 if (ex_ptr->ex_fport == ti->ti_dport &&
322 ti->ti_dst.s_addr == ex_ptr->ex_addr.s_addr) {
323 break;
326 if (!ex_ptr)
327 goto drop;
330 * Locate pcb for segment.
332 findso:
333 so = slirp->tcp_last_so;
334 if (so->so_fport != ti->ti_dport ||
335 so->so_lport != ti->ti_sport ||
336 so->so_laddr.s_addr != ti->ti_src.s_addr ||
337 so->so_faddr.s_addr != ti->ti_dst.s_addr) {
338 so = solookup(&slirp->tcb, ti->ti_src, ti->ti_sport,
339 ti->ti_dst, ti->ti_dport);
340 if (so)
341 slirp->tcp_last_so = so;
345 * If the state is CLOSED (i.e., TCB does not exist) then
346 * all data in the incoming segment is discarded.
347 * If the TCB exists but is in CLOSED state, it is embryonic,
348 * but should either do a listen or a connect soon.
350 * state == CLOSED means we've done socreate() but haven't
351 * attached it to a protocol yet...
353 * XXX If a TCB does not exist, and the TH_SYN flag is
354 * the only flag set, then create a session, mark it
355 * as if it was LISTENING, and continue...
357 if (so == NULL) {
358 if ((tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) != TH_SYN)
359 goto dropwithreset;
361 if ((so = socreate(slirp)) == NULL)
362 goto dropwithreset;
363 if (tcp_attach(so) < 0) {
364 free(so); /* Not sofree (if it failed, it's not insqued) */
365 goto dropwithreset;
368 sbreserve(&so->so_snd, TCP_SNDSPACE);
369 sbreserve(&so->so_rcv, TCP_RCVSPACE);
371 so->so_laddr = ti->ti_src;
372 so->so_lport = ti->ti_sport;
373 so->so_faddr = ti->ti_dst;
374 so->so_fport = ti->ti_dport;
376 if ((so->so_iptos = tcp_tos(so)) == 0)
377 so->so_iptos = ((struct ip *)ti)->ip_tos;
379 tp = sototcpcb(so);
380 tp->t_state = TCPS_LISTEN;
384 * If this is a still-connecting socket, this probably
385 * a retransmit of the SYN. Whether it's a retransmit SYN
386 * or something else, we nuke it.
388 if (so->so_state & SS_ISFCONNECTING)
389 goto drop;
391 tp = sototcpcb(so);
393 /* XXX Should never fail */
394 if (tp == NULL)
395 goto dropwithreset;
396 if (tp->t_state == TCPS_CLOSED)
397 goto drop;
399 tiwin = ti->ti_win;
402 * Segment received on connection.
403 * Reset idle time and keep-alive timer.
405 tp->t_idle = 0;
406 if (SO_OPTIONS)
407 tp->t_timer[TCPT_KEEP] = TCPTV_KEEPINTVL;
408 else
409 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_IDLE;
412 * Process options if not in LISTEN state,
413 * else do it below (after getting remote address).
415 if (optp && tp->t_state != TCPS_LISTEN)
416 tcp_dooptions(tp, (u_char *)optp, optlen, ti);
419 * Header prediction: check for the two common cases
420 * of a uni-directional data xfer. If the packet has
421 * no control flags, is in-sequence, the window didn't
422 * change and we're not retransmitting, it's a
423 * candidate. If the length is zero and the ack moved
424 * forward, we're the sender side of the xfer. Just
425 * free the data acked & wake any higher level process
426 * that was blocked waiting for space. If the length
427 * is non-zero and the ack didn't move, we're the
428 * receiver side. If we're getting packets in-order
429 * (the reassembly queue is empty), add the data to
430 * the socket buffer and note that we need a delayed ack.
432 * XXX Some of these tests are not needed
433 * eg: the tiwin == tp->snd_wnd prevents many more
434 * predictions.. with no *real* advantage..
436 if (tp->t_state == TCPS_ESTABLISHED &&
437 (tiflags & (TH_SYN|TH_FIN|TH_RST|TH_URG|TH_ACK)) == TH_ACK &&
438 ti->ti_seq == tp->rcv_nxt &&
439 tiwin && tiwin == tp->snd_wnd &&
440 tp->snd_nxt == tp->snd_max) {
441 if (ti->ti_len == 0) {
442 if (SEQ_GT(ti->ti_ack, tp->snd_una) &&
443 SEQ_LEQ(ti->ti_ack, tp->snd_max) &&
444 tp->snd_cwnd >= tp->snd_wnd) {
446 * this is a pure ack for outstanding data.
448 if (tp->t_rtt &&
449 SEQ_GT(ti->ti_ack, tp->t_rtseq))
450 tcp_xmit_timer(tp, tp->t_rtt);
451 acked = ti->ti_ack - tp->snd_una;
452 sbdrop(&so->so_snd, acked);
453 tp->snd_una = ti->ti_ack;
454 m_freem(m);
457 * If all outstanding data are acked, stop
458 * retransmit timer, otherwise restart timer
459 * using current (possibly backed-off) value.
460 * If process is waiting for space,
461 * wakeup/selwakeup/signal. If data
462 * are ready to send, let tcp_output
463 * decide between more output or persist.
465 if (tp->snd_una == tp->snd_max)
466 tp->t_timer[TCPT_REXMT] = 0;
467 else if (tp->t_timer[TCPT_PERSIST] == 0)
468 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
471 * This is called because sowwakeup might have
472 * put data into so_snd. Since we don't so sowwakeup,
473 * we don't need this.. XXX???
475 if (so->so_snd.sb_cc)
476 (void) tcp_output(tp);
478 return;
480 } else if (ti->ti_ack == tp->snd_una &&
481 tcpfrag_list_empty(tp) &&
482 ti->ti_len <= sbspace(&so->so_rcv)) {
484 * this is a pure, in-sequence data packet
485 * with nothing on the reassembly queue and
486 * we have enough buffer space to take it.
488 tp->rcv_nxt += ti->ti_len;
490 * Add data to socket buffer.
492 if (so->so_emu) {
493 if (tcp_emu(so,m)) sbappend(so, m);
494 } else
495 sbappend(so, m);
498 * If this is a short packet, then ACK now - with Nagel
499 * congestion avoidance sender won't send more until
500 * he gets an ACK.
502 * It is better to not delay acks at all to maximize
503 * TCP throughput. See RFC 2581.
505 tp->t_flags |= TF_ACKNOW;
506 tcp_output(tp);
507 return;
509 } /* header prediction */
511 * Calculate amount of space in receive window,
512 * and then do TCP input processing.
513 * Receive window is amount of space in rcv queue,
514 * but not less than advertised window.
516 { int win;
517 win = sbspace(&so->so_rcv);
518 if (win < 0)
519 win = 0;
520 tp->rcv_wnd = max(win, (int)(tp->rcv_adv - tp->rcv_nxt));
523 switch (tp->t_state) {
526 * If the state is LISTEN then ignore segment if it contains an RST.
527 * If the segment contains an ACK then it is bad and send a RST.
528 * If it does not contain a SYN then it is not interesting; drop it.
529 * Don't bother responding if the destination was a broadcast.
530 * Otherwise initialize tp->rcv_nxt, and tp->irs, select an initial
531 * tp->iss, and send a segment:
532 * <SEQ=ISS><ACK=RCV_NXT><CTL=SYN,ACK>
533 * Also initialize tp->snd_nxt to tp->iss+1 and tp->snd_una to tp->iss.
534 * Fill in remote peer address fields if not previously specified.
535 * Enter SYN_RECEIVED state, and process any other fields of this
536 * segment in this state.
538 case TCPS_LISTEN: {
540 if (tiflags & TH_RST)
541 goto drop;
542 if (tiflags & TH_ACK)
543 goto dropwithreset;
544 if ((tiflags & TH_SYN) == 0)
545 goto drop;
548 * This has way too many gotos...
549 * But a bit of spaghetti code never hurt anybody :)
553 * If this is destined for the control address, then flag to
554 * tcp_ctl once connected, otherwise connect
556 if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) ==
557 slirp->vnetwork_addr.s_addr) {
558 if (so->so_faddr.s_addr != slirp->vhost_addr.s_addr &&
559 so->so_faddr.s_addr != slirp->vnameserver_addr.s_addr) {
560 /* May be an add exec */
561 for (ex_ptr = slirp->exec_list; ex_ptr;
562 ex_ptr = ex_ptr->ex_next) {
563 if(ex_ptr->ex_fport == so->so_fport &&
564 so->so_faddr.s_addr == ex_ptr->ex_addr.s_addr) {
565 so->so_state |= SS_CTL;
566 break;
569 if (so->so_state & SS_CTL) {
570 goto cont_input;
573 /* CTL_ALIAS: Do nothing, tcp_fconnect will be called on it */
576 if (so->so_emu & EMU_NOCONNECT) {
577 so->so_emu &= ~EMU_NOCONNECT;
578 goto cont_input;
581 if((tcp_fconnect(so) == -1) && (errno != EINPROGRESS) && (errno != EWOULDBLOCK)) {
582 u_char code=ICMP_UNREACH_NET;
583 DEBUG_MISC((dfd," tcp fconnect errno = %d-%s\n",
584 errno,strerror(errno)));
585 if(errno == ECONNREFUSED) {
586 /* ACK the SYN, send RST to refuse the connection */
587 tcp_respond(tp, ti, m, ti->ti_seq+1, (tcp_seq)0,
588 TH_RST|TH_ACK);
589 } else {
590 if(errno == EHOSTUNREACH) code=ICMP_UNREACH_HOST;
591 HTONL(ti->ti_seq); /* restore tcp header */
592 HTONL(ti->ti_ack);
593 HTONS(ti->ti_win);
594 HTONS(ti->ti_urp);
595 m->m_data -= sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
596 m->m_len += sizeof(struct tcpiphdr)+off-sizeof(struct tcphdr);
597 *ip=save_ip;
598 icmp_error(m, ICMP_UNREACH,code, 0,strerror(errno));
600 tp = tcp_close(tp);
601 m_free(m);
602 } else {
604 * Haven't connected yet, save the current mbuf
605 * and ti, and return
606 * XXX Some OS's don't tell us whether the connect()
607 * succeeded or not. So we must time it out.
609 so->so_m = m;
610 so->so_ti = ti;
611 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
612 tp->t_state = TCPS_SYN_RECEIVED;
614 return;
616 cont_conn:
617 /* m==NULL
618 * Check if the connect succeeded
620 if (so->so_state & SS_NOFDREF) {
621 tp = tcp_close(tp);
622 goto dropwithreset;
624 cont_input:
625 tcp_template(tp);
627 if (optp)
628 tcp_dooptions(tp, (u_char *)optp, optlen, ti);
630 if (iss)
631 tp->iss = iss;
632 else
633 tp->iss = slirp->tcp_iss;
634 slirp->tcp_iss += TCP_ISSINCR/2;
635 tp->irs = ti->ti_seq;
636 tcp_sendseqinit(tp);
637 tcp_rcvseqinit(tp);
638 tp->t_flags |= TF_ACKNOW;
639 tp->t_state = TCPS_SYN_RECEIVED;
640 tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT;
641 goto trimthenstep6;
642 } /* case TCPS_LISTEN */
645 * If the state is SYN_SENT:
646 * if seg contains an ACK, but not for our SYN, drop the input.
647 * if seg contains a RST, then drop the connection.
648 * if seg does not contain SYN, then drop it.
649 * Otherwise this is an acceptable SYN segment
650 * initialize tp->rcv_nxt and tp->irs
651 * if seg contains ack then advance tp->snd_una
652 * if SYN has been acked change to ESTABLISHED else SYN_RCVD state
653 * arrange for segment to be acked (eventually)
654 * continue processing rest of data/controls, beginning with URG
656 case TCPS_SYN_SENT:
657 if ((tiflags & TH_ACK) &&
658 (SEQ_LEQ(ti->ti_ack, tp->iss) ||
659 SEQ_GT(ti->ti_ack, tp->snd_max)))
660 goto dropwithreset;
662 if (tiflags & TH_RST) {
663 if (tiflags & TH_ACK)
664 tp = tcp_drop(tp,0); /* XXX Check t_softerror! */
665 goto drop;
668 if ((tiflags & TH_SYN) == 0)
669 goto drop;
670 if (tiflags & TH_ACK) {
671 tp->snd_una = ti->ti_ack;
672 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
673 tp->snd_nxt = tp->snd_una;
676 tp->t_timer[TCPT_REXMT] = 0;
677 tp->irs = ti->ti_seq;
678 tcp_rcvseqinit(tp);
679 tp->t_flags |= TF_ACKNOW;
680 if (tiflags & TH_ACK && SEQ_GT(tp->snd_una, tp->iss)) {
681 soisfconnected(so);
682 tp->t_state = TCPS_ESTABLISHED;
684 (void) tcp_reass(tp, (struct tcpiphdr *)0,
685 (struct mbuf *)0);
687 * if we didn't have to retransmit the SYN,
688 * use its rtt as our initial srtt & rtt var.
690 if (tp->t_rtt)
691 tcp_xmit_timer(tp, tp->t_rtt);
692 } else
693 tp->t_state = TCPS_SYN_RECEIVED;
695 trimthenstep6:
697 * Advance ti->ti_seq to correspond to first data byte.
698 * If data, trim to stay within window,
699 * dropping FIN if necessary.
701 ti->ti_seq++;
702 if (ti->ti_len > tp->rcv_wnd) {
703 todrop = ti->ti_len - tp->rcv_wnd;
704 m_adj(m, -todrop);
705 ti->ti_len = tp->rcv_wnd;
706 tiflags &= ~TH_FIN;
708 tp->snd_wl1 = ti->ti_seq - 1;
709 tp->rcv_up = ti->ti_seq;
710 goto step6;
711 } /* switch tp->t_state */
713 * States other than LISTEN or SYN_SENT.
714 * Check that at least some bytes of segment are within
715 * receive window. If segment begins before rcv_nxt,
716 * drop leading data (and SYN); if nothing left, just ack.
718 todrop = tp->rcv_nxt - ti->ti_seq;
719 if (todrop > 0) {
720 if (tiflags & TH_SYN) {
721 tiflags &= ~TH_SYN;
722 ti->ti_seq++;
723 if (ti->ti_urp > 1)
724 ti->ti_urp--;
725 else
726 tiflags &= ~TH_URG;
727 todrop--;
730 * Following if statement from Stevens, vol. 2, p. 960.
732 if (todrop > ti->ti_len
733 || (todrop == ti->ti_len && (tiflags & TH_FIN) == 0)) {
735 * Any valid FIN must be to the left of the window.
736 * At this point the FIN must be a duplicate or out
737 * of sequence; drop it.
739 tiflags &= ~TH_FIN;
742 * Send an ACK to resynchronize and drop any data.
743 * But keep on processing for RST or ACK.
745 tp->t_flags |= TF_ACKNOW;
746 todrop = ti->ti_len;
748 m_adj(m, todrop);
749 ti->ti_seq += todrop;
750 ti->ti_len -= todrop;
751 if (ti->ti_urp > todrop)
752 ti->ti_urp -= todrop;
753 else {
754 tiflags &= ~TH_URG;
755 ti->ti_urp = 0;
759 * If new data are received on a connection after the
760 * user processes are gone, then RST the other end.
762 if ((so->so_state & SS_NOFDREF) &&
763 tp->t_state > TCPS_CLOSE_WAIT && ti->ti_len) {
764 tp = tcp_close(tp);
765 goto dropwithreset;
769 * If segment ends after window, drop trailing data
770 * (and PUSH and FIN); if nothing left, just ACK.
772 todrop = (ti->ti_seq+ti->ti_len) - (tp->rcv_nxt+tp->rcv_wnd);
773 if (todrop > 0) {
774 if (todrop >= ti->ti_len) {
776 * If a new connection request is received
777 * while in TIME_WAIT, drop the old connection
778 * and start over if the sequence numbers
779 * are above the previous ones.
781 if (tiflags & TH_SYN &&
782 tp->t_state == TCPS_TIME_WAIT &&
783 SEQ_GT(ti->ti_seq, tp->rcv_nxt)) {
784 iss = tp->rcv_nxt + TCP_ISSINCR;
785 tp = tcp_close(tp);
786 goto findso;
789 * If window is closed can only take segments at
790 * window edge, and have to drop data and PUSH from
791 * incoming segments. Continue processing, but
792 * remember to ack. Otherwise, drop segment
793 * and ack.
795 if (tp->rcv_wnd == 0 && ti->ti_seq == tp->rcv_nxt) {
796 tp->t_flags |= TF_ACKNOW;
797 } else {
798 goto dropafterack;
801 m_adj(m, -todrop);
802 ti->ti_len -= todrop;
803 tiflags &= ~(TH_PUSH|TH_FIN);
807 * If the RST bit is set examine the state:
808 * SYN_RECEIVED STATE:
809 * If passive open, return to LISTEN state.
810 * If active open, inform user that connection was refused.
811 * ESTABLISHED, FIN_WAIT_1, FIN_WAIT2, CLOSE_WAIT STATES:
812 * Inform user that connection was reset, and close tcb.
813 * CLOSING, LAST_ACK, TIME_WAIT STATES
814 * Close the tcb.
816 if (tiflags&TH_RST) switch (tp->t_state) {
818 case TCPS_SYN_RECEIVED:
819 case TCPS_ESTABLISHED:
820 case TCPS_FIN_WAIT_1:
821 case TCPS_FIN_WAIT_2:
822 case TCPS_CLOSE_WAIT:
823 tp->t_state = TCPS_CLOSED;
824 tp = tcp_close(tp);
825 goto drop;
827 case TCPS_CLOSING:
828 case TCPS_LAST_ACK:
829 case TCPS_TIME_WAIT:
830 tp = tcp_close(tp);
831 goto drop;
835 * If a SYN is in the window, then this is an
836 * error and we send an RST and drop the connection.
838 if (tiflags & TH_SYN) {
839 tp = tcp_drop(tp,0);
840 goto dropwithreset;
844 * If the ACK bit is off we drop the segment and return.
846 if ((tiflags & TH_ACK) == 0) goto drop;
849 * Ack processing.
851 switch (tp->t_state) {
853 * In SYN_RECEIVED state if the ack ACKs our SYN then enter
854 * ESTABLISHED state and continue processing, otherwise
855 * send an RST. una<=ack<=max
857 case TCPS_SYN_RECEIVED:
859 if (SEQ_GT(tp->snd_una, ti->ti_ack) ||
860 SEQ_GT(ti->ti_ack, tp->snd_max))
861 goto dropwithreset;
862 tp->t_state = TCPS_ESTABLISHED;
864 * The sent SYN is ack'ed with our sequence number +1
865 * The first data byte already in the buffer will get
866 * lost if no correction is made. This is only needed for
867 * SS_CTL since the buffer is empty otherwise.
868 * tp->snd_una++; or:
870 tp->snd_una=ti->ti_ack;
871 if (so->so_state & SS_CTL) {
872 /* So tcp_ctl reports the right state */
873 ret = tcp_ctl(so);
874 if (ret == 1) {
875 soisfconnected(so);
876 so->so_state &= ~SS_CTL; /* success XXX */
877 } else if (ret == 2) {
878 so->so_state &= SS_PERSISTENT_MASK;
879 so->so_state |= SS_NOFDREF; /* CTL_CMD */
880 } else {
881 needoutput = 1;
882 tp->t_state = TCPS_FIN_WAIT_1;
884 } else {
885 soisfconnected(so);
888 (void) tcp_reass(tp, (struct tcpiphdr *)0, (struct mbuf *)0);
889 tp->snd_wl1 = ti->ti_seq - 1;
890 /* Avoid ack processing; snd_una==ti_ack => dup ack */
891 goto synrx_to_est;
892 /* fall into ... */
895 * In ESTABLISHED state: drop duplicate ACKs; ACK out of range
896 * ACKs. If the ack is in the range
897 * tp->snd_una < ti->ti_ack <= tp->snd_max
898 * then advance tp->snd_una to ti->ti_ack and drop
899 * data from the retransmission queue. If this ACK reflects
900 * more up to date window information we update our window information.
902 case TCPS_ESTABLISHED:
903 case TCPS_FIN_WAIT_1:
904 case TCPS_FIN_WAIT_2:
905 case TCPS_CLOSE_WAIT:
906 case TCPS_CLOSING:
907 case TCPS_LAST_ACK:
908 case TCPS_TIME_WAIT:
910 if (SEQ_LEQ(ti->ti_ack, tp->snd_una)) {
911 if (ti->ti_len == 0 && tiwin == tp->snd_wnd) {
912 DEBUG_MISC((dfd," dup ack m = %lx so = %lx \n",
913 (long )m, (long )so));
915 * If we have outstanding data (other than
916 * a window probe), this is a completely
917 * duplicate ack (ie, window info didn't
918 * change), the ack is the biggest we've
919 * seen and we've seen exactly our rexmt
920 * threshold of them, assume a packet
921 * has been dropped and retransmit it.
922 * Kludge snd_nxt & the congestion
923 * window so we send only this one
924 * packet.
926 * We know we're losing at the current
927 * window size so do congestion avoidance
928 * (set ssthresh to half the current window
929 * and pull our congestion window back to
930 * the new ssthresh).
932 * Dup acks mean that packets have left the
933 * network (they're now cached at the receiver)
934 * so bump cwnd by the amount in the receiver
935 * to keep a constant cwnd packets in the
936 * network.
938 if (tp->t_timer[TCPT_REXMT] == 0 ||
939 ti->ti_ack != tp->snd_una)
940 tp->t_dupacks = 0;
941 else if (++tp->t_dupacks == TCPREXMTTHRESH) {
942 tcp_seq onxt = tp->snd_nxt;
943 u_int win =
944 min(tp->snd_wnd, tp->snd_cwnd) / 2 /
945 tp->t_maxseg;
947 if (win < 2)
948 win = 2;
949 tp->snd_ssthresh = win * tp->t_maxseg;
950 tp->t_timer[TCPT_REXMT] = 0;
951 tp->t_rtt = 0;
952 tp->snd_nxt = ti->ti_ack;
953 tp->snd_cwnd = tp->t_maxseg;
954 (void) tcp_output(tp);
955 tp->snd_cwnd = tp->snd_ssthresh +
956 tp->t_maxseg * tp->t_dupacks;
957 if (SEQ_GT(onxt, tp->snd_nxt))
958 tp->snd_nxt = onxt;
959 goto drop;
960 } else if (tp->t_dupacks > TCPREXMTTHRESH) {
961 tp->snd_cwnd += tp->t_maxseg;
962 (void) tcp_output(tp);
963 goto drop;
965 } else
966 tp->t_dupacks = 0;
967 break;
969 synrx_to_est:
971 * If the congestion window was inflated to account
972 * for the other side's cached packets, retract it.
974 if (tp->t_dupacks > TCPREXMTTHRESH &&
975 tp->snd_cwnd > tp->snd_ssthresh)
976 tp->snd_cwnd = tp->snd_ssthresh;
977 tp->t_dupacks = 0;
978 if (SEQ_GT(ti->ti_ack, tp->snd_max)) {
979 goto dropafterack;
981 acked = ti->ti_ack - tp->snd_una;
984 * If transmit timer is running and timed sequence
985 * number was acked, update smoothed round trip time.
986 * Since we now have an rtt measurement, cancel the
987 * timer backoff (cf., Phil Karn's retransmit alg.).
988 * Recompute the initial retransmit timer.
990 if (tp->t_rtt && SEQ_GT(ti->ti_ack, tp->t_rtseq))
991 tcp_xmit_timer(tp,tp->t_rtt);
994 * If all outstanding data is acked, stop retransmit
995 * timer and remember to restart (more output or persist).
996 * If there is more data to be acked, restart retransmit
997 * timer, using current (possibly backed-off) value.
999 if (ti->ti_ack == tp->snd_max) {
1000 tp->t_timer[TCPT_REXMT] = 0;
1001 needoutput = 1;
1002 } else if (tp->t_timer[TCPT_PERSIST] == 0)
1003 tp->t_timer[TCPT_REXMT] = tp->t_rxtcur;
1005 * When new data is acked, open the congestion window.
1006 * If the window gives us less than ssthresh packets
1007 * in flight, open exponentially (maxseg per packet).
1008 * Otherwise open linearly: maxseg per window
1009 * (maxseg^2 / cwnd per packet).
1012 register u_int cw = tp->snd_cwnd;
1013 register u_int incr = tp->t_maxseg;
1015 if (cw > tp->snd_ssthresh)
1016 incr = incr * incr / cw;
1017 tp->snd_cwnd = min(cw + incr, TCP_MAXWIN<<tp->snd_scale);
1019 if (acked > so->so_snd.sb_cc) {
1020 tp->snd_wnd -= so->so_snd.sb_cc;
1021 sbdrop(&so->so_snd, (int )so->so_snd.sb_cc);
1022 ourfinisacked = 1;
1023 } else {
1024 sbdrop(&so->so_snd, acked);
1025 tp->snd_wnd -= acked;
1026 ourfinisacked = 0;
1028 tp->snd_una = ti->ti_ack;
1029 if (SEQ_LT(tp->snd_nxt, tp->snd_una))
1030 tp->snd_nxt = tp->snd_una;
1032 switch (tp->t_state) {
1035 * In FIN_WAIT_1 STATE in addition to the processing
1036 * for the ESTABLISHED state if our FIN is now acknowledged
1037 * then enter FIN_WAIT_2.
1039 case TCPS_FIN_WAIT_1:
1040 if (ourfinisacked) {
1042 * If we can't receive any more
1043 * data, then closing user can proceed.
1044 * Starting the timer is contrary to the
1045 * specification, but if we don't get a FIN
1046 * we'll hang forever.
1048 if (so->so_state & SS_FCANTRCVMORE) {
1049 tp->t_timer[TCPT_2MSL] = TCP_MAXIDLE;
1051 tp->t_state = TCPS_FIN_WAIT_2;
1053 break;
1056 * In CLOSING STATE in addition to the processing for
1057 * the ESTABLISHED state if the ACK acknowledges our FIN
1058 * then enter the TIME-WAIT state, otherwise ignore
1059 * the segment.
1061 case TCPS_CLOSING:
1062 if (ourfinisacked) {
1063 tp->t_state = TCPS_TIME_WAIT;
1064 tcp_canceltimers(tp);
1065 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1067 break;
1070 * In LAST_ACK, we may still be waiting for data to drain
1071 * and/or to be acked, as well as for the ack of our FIN.
1072 * If our FIN is now acknowledged, delete the TCB,
1073 * enter the closed state and return.
1075 case TCPS_LAST_ACK:
1076 if (ourfinisacked) {
1077 tp = tcp_close(tp);
1078 goto drop;
1080 break;
1083 * In TIME_WAIT state the only thing that should arrive
1084 * is a retransmission of the remote FIN. Acknowledge
1085 * it and restart the finack timer.
1087 case TCPS_TIME_WAIT:
1088 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1089 goto dropafterack;
1091 } /* switch(tp->t_state) */
1093 step6:
1095 * Update window information.
1096 * Don't look at window if no ACK: TAC's send garbage on first SYN.
1098 if ((tiflags & TH_ACK) &&
1099 (SEQ_LT(tp->snd_wl1, ti->ti_seq) ||
1100 (tp->snd_wl1 == ti->ti_seq && (SEQ_LT(tp->snd_wl2, ti->ti_ack) ||
1101 (tp->snd_wl2 == ti->ti_ack && tiwin > tp->snd_wnd))))) {
1102 tp->snd_wnd = tiwin;
1103 tp->snd_wl1 = ti->ti_seq;
1104 tp->snd_wl2 = ti->ti_ack;
1105 if (tp->snd_wnd > tp->max_sndwnd)
1106 tp->max_sndwnd = tp->snd_wnd;
1107 needoutput = 1;
1111 * Process segments with URG.
1113 if ((tiflags & TH_URG) && ti->ti_urp &&
1114 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1116 * This is a kludge, but if we receive and accept
1117 * random urgent pointers, we'll crash in
1118 * soreceive. It's hard to imagine someone
1119 * actually wanting to send this much urgent data.
1121 if (ti->ti_urp + so->so_rcv.sb_cc > so->so_rcv.sb_datalen) {
1122 ti->ti_urp = 0;
1123 tiflags &= ~TH_URG;
1124 goto dodata;
1127 * If this segment advances the known urgent pointer,
1128 * then mark the data stream. This should not happen
1129 * in CLOSE_WAIT, CLOSING, LAST_ACK or TIME_WAIT STATES since
1130 * a FIN has been received from the remote side.
1131 * In these states we ignore the URG.
1133 * According to RFC961 (Assigned Protocols),
1134 * the urgent pointer points to the last octet
1135 * of urgent data. We continue, however,
1136 * to consider it to indicate the first octet
1137 * of data past the urgent section as the original
1138 * spec states (in one of two places).
1140 if (SEQ_GT(ti->ti_seq+ti->ti_urp, tp->rcv_up)) {
1141 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1142 so->so_urgc = so->so_rcv.sb_cc +
1143 (tp->rcv_up - tp->rcv_nxt); /* -1; */
1144 tp->rcv_up = ti->ti_seq + ti->ti_urp;
1147 } else
1149 * If no out of band data is expected,
1150 * pull receive urgent pointer along
1151 * with the receive window.
1153 if (SEQ_GT(tp->rcv_nxt, tp->rcv_up))
1154 tp->rcv_up = tp->rcv_nxt;
1155 dodata:
1158 * Process the segment text, merging it into the TCP sequencing queue,
1159 * and arranging for acknowledgment of receipt if necessary.
1160 * This process logically involves adjusting tp->rcv_wnd as data
1161 * is presented to the user (this happens in tcp_usrreq.c,
1162 * case PRU_RCVD). If a FIN has already been received on this
1163 * connection then we just ignore the text.
1165 if ((ti->ti_len || (tiflags&TH_FIN)) &&
1166 TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1167 TCP_REASS(tp, ti, m, so, tiflags);
1169 * Note the amount of data that peer has sent into
1170 * our window, in order to estimate the sender's
1171 * buffer size.
1173 len = so->so_rcv.sb_datalen - (tp->rcv_adv - tp->rcv_nxt);
1174 } else {
1175 m_free(m);
1176 tiflags &= ~TH_FIN;
1180 * If FIN is received ACK the FIN and let the user know
1181 * that the connection is closing.
1183 if (tiflags & TH_FIN) {
1184 if (TCPS_HAVERCVDFIN(tp->t_state) == 0) {
1186 * If we receive a FIN we can't send more data,
1187 * set it SS_FDRAIN
1188 * Shutdown the socket if there is no rx data in the
1189 * buffer.
1190 * soread() is called on completion of shutdown() and
1191 * will got to TCPS_LAST_ACK, and use tcp_output()
1192 * to send the FIN.
1194 sofwdrain(so);
1196 tp->t_flags |= TF_ACKNOW;
1197 tp->rcv_nxt++;
1199 switch (tp->t_state) {
1202 * In SYN_RECEIVED and ESTABLISHED STATES
1203 * enter the CLOSE_WAIT state.
1205 case TCPS_SYN_RECEIVED:
1206 case TCPS_ESTABLISHED:
1207 if(so->so_emu == EMU_CTL) /* no shutdown on socket */
1208 tp->t_state = TCPS_LAST_ACK;
1209 else
1210 tp->t_state = TCPS_CLOSE_WAIT;
1211 break;
1214 * If still in FIN_WAIT_1 STATE FIN has not been acked so
1215 * enter the CLOSING state.
1217 case TCPS_FIN_WAIT_1:
1218 tp->t_state = TCPS_CLOSING;
1219 break;
1222 * In FIN_WAIT_2 state enter the TIME_WAIT state,
1223 * starting the time-wait timer, turning off the other
1224 * standard timers.
1226 case TCPS_FIN_WAIT_2:
1227 tp->t_state = TCPS_TIME_WAIT;
1228 tcp_canceltimers(tp);
1229 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1230 break;
1233 * In TIME_WAIT state restart the 2 MSL time_wait timer.
1235 case TCPS_TIME_WAIT:
1236 tp->t_timer[TCPT_2MSL] = 2 * TCPTV_MSL;
1237 break;
1242 * If this is a small packet, then ACK now - with Nagel
1243 * congestion avoidance sender won't send more until
1244 * he gets an ACK.
1246 * See above.
1248 if (ti->ti_len && (unsigned)ti->ti_len <= 5 &&
1249 ((struct tcpiphdr_2 *)ti)->first_char == (char)27) {
1250 tp->t_flags |= TF_ACKNOW;
1254 * Return any desired output.
1256 if (needoutput || (tp->t_flags & TF_ACKNOW)) {
1257 (void) tcp_output(tp);
1259 return;
1261 dropafterack:
1263 * Generate an ACK dropping incoming segment if it occupies
1264 * sequence space, where the ACK reflects our state.
1266 if (tiflags & TH_RST)
1267 goto drop;
1268 m_freem(m);
1269 tp->t_flags |= TF_ACKNOW;
1270 (void) tcp_output(tp);
1271 return;
1273 dropwithreset:
1274 /* reuses m if m!=NULL, m_free() unnecessary */
1275 if (tiflags & TH_ACK)
1276 tcp_respond(tp, ti, m, (tcp_seq)0, ti->ti_ack, TH_RST);
1277 else {
1278 if (tiflags & TH_SYN) ti->ti_len++;
1279 tcp_respond(tp, ti, m, ti->ti_seq+ti->ti_len, (tcp_seq)0,
1280 TH_RST|TH_ACK);
1283 return;
1285 drop:
1287 * Drop space held by incoming segment and return.
1289 m_free(m);
1291 return;
1294 static void
1295 tcp_dooptions(struct tcpcb *tp, u_char *cp, int cnt, struct tcpiphdr *ti)
1297 u_int16_t mss;
1298 int opt, optlen;
1300 DEBUG_CALL("tcp_dooptions");
1301 DEBUG_ARGS((dfd," tp = %lx cnt=%i \n", (long )tp, cnt));
1303 for (; cnt > 0; cnt -= optlen, cp += optlen) {
1304 opt = cp[0];
1305 if (opt == TCPOPT_EOL)
1306 break;
1307 if (opt == TCPOPT_NOP)
1308 optlen = 1;
1309 else {
1310 optlen = cp[1];
1311 if (optlen <= 0)
1312 break;
1314 switch (opt) {
1316 default:
1317 continue;
1319 case TCPOPT_MAXSEG:
1320 if (optlen != TCPOLEN_MAXSEG)
1321 continue;
1322 if (!(ti->ti_flags & TH_SYN))
1323 continue;
1324 memcpy((char *) &mss, (char *) cp + 2, sizeof(mss));
1325 NTOHS(mss);
1326 (void) tcp_mss(tp, mss); /* sets t_maxseg */
1327 break;
1334 * Pull out of band byte out of a segment so
1335 * it doesn't appear in the user's data queue.
1336 * It is still reflected in the segment length for
1337 * sequencing purposes.
1340 #ifdef notdef
1342 void
1343 tcp_pulloutofband(so, ti, m)
1344 struct socket *so;
1345 struct tcpiphdr *ti;
1346 register struct mbuf *m;
1348 int cnt = ti->ti_urp - 1;
1350 while (cnt >= 0) {
1351 if (m->m_len > cnt) {
1352 char *cp = mtod(m, caddr_t) + cnt;
1353 struct tcpcb *tp = sototcpcb(so);
1355 tp->t_iobc = *cp;
1356 tp->t_oobflags |= TCPOOB_HAVEDATA;
1357 memcpy(sp, cp+1, (unsigned)(m->m_len - cnt - 1));
1358 m->m_len--;
1359 return;
1361 cnt -= m->m_len;
1362 m = m->m_next; /* XXX WRONG! Fix it! */
1363 if (m == 0)
1364 break;
1366 panic("tcp_pulloutofband");
1369 #endif /* notdef */
1372 * Collect new round-trip time estimate
1373 * and update averages and current timeout.
1376 static void
1377 tcp_xmit_timer(register struct tcpcb *tp, int rtt)
1379 register short delta;
1381 DEBUG_CALL("tcp_xmit_timer");
1382 DEBUG_ARG("tp = %lx", (long)tp);
1383 DEBUG_ARG("rtt = %d", rtt);
1385 if (tp->t_srtt != 0) {
1387 * srtt is stored as fixed point with 3 bits after the
1388 * binary point (i.e., scaled by 8). The following magic
1389 * is equivalent to the smoothing algorithm in rfc793 with
1390 * an alpha of .875 (srtt = rtt/8 + srtt*7/8 in fixed
1391 * point). Adjust rtt to origin 0.
1393 delta = rtt - 1 - (tp->t_srtt >> TCP_RTT_SHIFT);
1394 if ((tp->t_srtt += delta) <= 0)
1395 tp->t_srtt = 1;
1397 * We accumulate a smoothed rtt variance (actually, a
1398 * smoothed mean difference), then set the retransmit
1399 * timer to smoothed rtt + 4 times the smoothed variance.
1400 * rttvar is stored as fixed point with 2 bits after the
1401 * binary point (scaled by 4). The following is
1402 * equivalent to rfc793 smoothing with an alpha of .75
1403 * (rttvar = rttvar*3/4 + |delta| / 4). This replaces
1404 * rfc793's wired-in beta.
1406 if (delta < 0)
1407 delta = -delta;
1408 delta -= (tp->t_rttvar >> TCP_RTTVAR_SHIFT);
1409 if ((tp->t_rttvar += delta) <= 0)
1410 tp->t_rttvar = 1;
1411 } else {
1413 * No rtt measurement yet - use the unsmoothed rtt.
1414 * Set the variance to half the rtt (so our first
1415 * retransmit happens at 3*rtt).
1417 tp->t_srtt = rtt << TCP_RTT_SHIFT;
1418 tp->t_rttvar = rtt << (TCP_RTTVAR_SHIFT - 1);
1420 tp->t_rtt = 0;
1421 tp->t_rxtshift = 0;
1424 * the retransmit should happen at rtt + 4 * rttvar.
1425 * Because of the way we do the smoothing, srtt and rttvar
1426 * will each average +1/2 tick of bias. When we compute
1427 * the retransmit timer, we want 1/2 tick of rounding and
1428 * 1 extra tick because of +-1/2 tick uncertainty in the
1429 * firing of the timer. The bias will give us exactly the
1430 * 1.5 tick we need. But, because the bias is
1431 * statistical, we have to test that we don't drop below
1432 * the minimum feasible timer (which is 2 ticks).
1434 TCPT_RANGESET(tp->t_rxtcur, TCP_REXMTVAL(tp),
1435 (short)tp->t_rttmin, TCPTV_REXMTMAX); /* XXX */
1438 * We received an ack for a packet that wasn't retransmitted;
1439 * it is probably safe to discard any error indications we've
1440 * received recently. This isn't quite right, but close enough
1441 * for now (a route might have failed after we sent a segment,
1442 * and the return path might not be symmetrical).
1444 tp->t_softerror = 0;
1448 * Determine a reasonable value for maxseg size.
1449 * If the route is known, check route for mtu.
1450 * If none, use an mss that can be handled on the outgoing
1451 * interface without forcing IP to fragment; if bigger than
1452 * an mbuf cluster (MCLBYTES), round down to nearest multiple of MCLBYTES
1453 * to utilize large mbufs. If no route is found, route has no mtu,
1454 * or the destination isn't local, use a default, hopefully conservative
1455 * size (usually 512 or the default IP max size, but no more than the mtu
1456 * of the interface), as we can't discover anything about intervening
1457 * gateways or networks. We also initialize the congestion/slow start
1458 * window to be a single segment if the destination isn't local.
1459 * While looking at the routing entry, we also initialize other path-dependent
1460 * parameters from pre-set or cached values in the routing entry.
1464 tcp_mss(struct tcpcb *tp, u_int offer)
1466 struct socket *so = tp->t_socket;
1467 int mss;
1469 DEBUG_CALL("tcp_mss");
1470 DEBUG_ARG("tp = %lx", (long)tp);
1471 DEBUG_ARG("offer = %d", offer);
1473 mss = min(IF_MTU, IF_MRU) - sizeof(struct tcpiphdr);
1474 if (offer)
1475 mss = min(mss, offer);
1476 mss = max(mss, 32);
1477 if (mss < tp->t_maxseg || offer != 0)
1478 tp->t_maxseg = mss;
1480 tp->snd_cwnd = mss;
1482 sbreserve(&so->so_snd, TCP_SNDSPACE + ((TCP_SNDSPACE % mss) ?
1483 (mss - (TCP_SNDSPACE % mss)) :
1484 0));
1485 sbreserve(&so->so_rcv, TCP_RCVSPACE + ((TCP_RCVSPACE % mss) ?
1486 (mss - (TCP_RCVSPACE % mss)) :
1487 0));
1489 DEBUG_MISC((dfd, " returning mss = %d\n", mss));
1491 return mss;