UBIFS: amend ubifs_recover_leb interface
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sctp / outqueue.c
blob1c88c8911dc50095315bc02463f2b3e5e1535509
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001-2003 Intel Corp.
7 * This file is part of the SCTP kernel implementation
9 * These functions implement the sctp_outq class. The outqueue handles
10 * bundling and queueing of outgoing SCTP chunks.
12 * This SCTP implementation is free software;
13 * you can redistribute it and/or modify it under the terms of
14 * the GNU General Public License as published by
15 * the Free Software Foundation; either version 2, or (at your option)
16 * any later version.
18 * This SCTP implementation is distributed in the hope that it
19 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
20 * ************************
21 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
22 * See the GNU General Public License for more details.
24 * You should have received a copy of the GNU General Public License
25 * along with GNU CC; see the file COPYING. If not, write to
26 * the Free Software Foundation, 59 Temple Place - Suite 330,
27 * Boston, MA 02111-1307, USA.
29 * Please send any bug reports or fixes you make to the
30 * email address(es):
31 * lksctp developers <lksctp-developers@lists.sourceforge.net>
33 * Or submit a bug report through the following website:
34 * http://www.sf.net/projects/lksctp
36 * Written or modified by:
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Karl Knutson <karl@athena.chicago.il.us>
39 * Perry Melange <pmelange@null.cc.uic.edu>
40 * Xingang Guo <xingang.guo@intel.com>
41 * Hui Huang <hui.huang@nokia.com>
42 * Sridhar Samudrala <sri@us.ibm.com>
43 * Jon Grimm <jgrimm@us.ibm.com>
45 * Any bugs reported given to us we will try to fix... any fixes shared will
46 * be incorporated into the next SCTP release.
49 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
51 #include <linux/types.h>
52 #include <linux/list.h> /* For struct list_head */
53 #include <linux/socket.h>
54 #include <linux/ip.h>
55 #include <linux/slab.h>
56 #include <net/sock.h> /* For skb_set_owner_w */
58 #include <net/sctp/sctp.h>
59 #include <net/sctp/sm.h>
61 /* Declare internal functions here. */
62 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn);
63 static void sctp_check_transmitted(struct sctp_outq *q,
64 struct list_head *transmitted_queue,
65 struct sctp_transport *transport,
66 struct sctp_sackhdr *sack,
67 __u32 *highest_new_tsn);
69 static void sctp_mark_missing(struct sctp_outq *q,
70 struct list_head *transmitted_queue,
71 struct sctp_transport *transport,
72 __u32 highest_new_tsn,
73 int count_of_newacks);
75 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 sack_ctsn);
77 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout);
79 /* Add data to the front of the queue. */
80 static inline void sctp_outq_head_data(struct sctp_outq *q,
81 struct sctp_chunk *ch)
83 list_add(&ch->list, &q->out_chunk_list);
84 q->out_qlen += ch->skb->len;
87 /* Take data from the front of the queue. */
88 static inline struct sctp_chunk *sctp_outq_dequeue_data(struct sctp_outq *q)
90 struct sctp_chunk *ch = NULL;
92 if (!list_empty(&q->out_chunk_list)) {
93 struct list_head *entry = q->out_chunk_list.next;
95 ch = list_entry(entry, struct sctp_chunk, list);
96 list_del_init(entry);
97 q->out_qlen -= ch->skb->len;
99 return ch;
101 /* Add data chunk to the end of the queue. */
102 static inline void sctp_outq_tail_data(struct sctp_outq *q,
103 struct sctp_chunk *ch)
105 list_add_tail(&ch->list, &q->out_chunk_list);
106 q->out_qlen += ch->skb->len;
110 * SFR-CACC algorithm:
111 * D) If count_of_newacks is greater than or equal to 2
112 * and t was not sent to the current primary then the
113 * sender MUST NOT increment missing report count for t.
115 static inline int sctp_cacc_skip_3_1_d(struct sctp_transport *primary,
116 struct sctp_transport *transport,
117 int count_of_newacks)
119 if (count_of_newacks >=2 && transport != primary)
120 return 1;
121 return 0;
125 * SFR-CACC algorithm:
126 * F) If count_of_newacks is less than 2, let d be the
127 * destination to which t was sent. If cacc_saw_newack
128 * is 0 for destination d, then the sender MUST NOT
129 * increment missing report count for t.
131 static inline int sctp_cacc_skip_3_1_f(struct sctp_transport *transport,
132 int count_of_newacks)
134 if (count_of_newacks < 2 &&
135 (transport && !transport->cacc.cacc_saw_newack))
136 return 1;
137 return 0;
141 * SFR-CACC algorithm:
142 * 3.1) If CYCLING_CHANGEOVER is 0, the sender SHOULD
143 * execute steps C, D, F.
145 * C has been implemented in sctp_outq_sack
147 static inline int sctp_cacc_skip_3_1(struct sctp_transport *primary,
148 struct sctp_transport *transport,
149 int count_of_newacks)
151 if (!primary->cacc.cycling_changeover) {
152 if (sctp_cacc_skip_3_1_d(primary, transport, count_of_newacks))
153 return 1;
154 if (sctp_cacc_skip_3_1_f(transport, count_of_newacks))
155 return 1;
156 return 0;
158 return 0;
162 * SFR-CACC algorithm:
163 * 3.2) Else if CYCLING_CHANGEOVER is 1, and t is less
164 * than next_tsn_at_change of the current primary, then
165 * the sender MUST NOT increment missing report count
166 * for t.
168 static inline int sctp_cacc_skip_3_2(struct sctp_transport *primary, __u32 tsn)
170 if (primary->cacc.cycling_changeover &&
171 TSN_lt(tsn, primary->cacc.next_tsn_at_change))
172 return 1;
173 return 0;
177 * SFR-CACC algorithm:
178 * 3) If the missing report count for TSN t is to be
179 * incremented according to [RFC2960] and
180 * [SCTP_STEWART-2002], and CHANGEOVER_ACTIVE is set,
181 * then the sender MUST further execute steps 3.1 and
182 * 3.2 to determine if the missing report count for
183 * TSN t SHOULD NOT be incremented.
185 * 3.3) If 3.1 and 3.2 do not dictate that the missing
186 * report count for t should not be incremented, then
187 * the sender SHOULD increment missing report count for
188 * t (according to [RFC2960] and [SCTP_STEWART_2002]).
190 static inline int sctp_cacc_skip(struct sctp_transport *primary,
191 struct sctp_transport *transport,
192 int count_of_newacks,
193 __u32 tsn)
195 if (primary->cacc.changeover_active &&
196 (sctp_cacc_skip_3_1(primary, transport, count_of_newacks) ||
197 sctp_cacc_skip_3_2(primary, tsn)))
198 return 1;
199 return 0;
202 /* Initialize an existing sctp_outq. This does the boring stuff.
203 * You still need to define handlers if you really want to DO
204 * something with this structure...
206 void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q)
208 q->asoc = asoc;
209 INIT_LIST_HEAD(&q->out_chunk_list);
210 INIT_LIST_HEAD(&q->control_chunk_list);
211 INIT_LIST_HEAD(&q->retransmit);
212 INIT_LIST_HEAD(&q->sacked);
213 INIT_LIST_HEAD(&q->abandoned);
215 q->fast_rtx = 0;
216 q->outstanding_bytes = 0;
217 q->empty = 1;
218 q->cork = 0;
220 q->malloced = 0;
221 q->out_qlen = 0;
224 /* Free the outqueue structure and any related pending chunks.
226 void sctp_outq_teardown(struct sctp_outq *q)
228 struct sctp_transport *transport;
229 struct list_head *lchunk, *temp;
230 struct sctp_chunk *chunk, *tmp;
232 /* Throw away unacknowledged chunks. */
233 list_for_each_entry(transport, &q->asoc->peer.transport_addr_list,
234 transports) {
235 while ((lchunk = sctp_list_dequeue(&transport->transmitted)) != NULL) {
236 chunk = list_entry(lchunk, struct sctp_chunk,
237 transmitted_list);
238 /* Mark as part of a failed message. */
239 sctp_chunk_fail(chunk, q->error);
240 sctp_chunk_free(chunk);
244 /* Throw away chunks that have been gap ACKed. */
245 list_for_each_safe(lchunk, temp, &q->sacked) {
246 list_del_init(lchunk);
247 chunk = list_entry(lchunk, struct sctp_chunk,
248 transmitted_list);
249 sctp_chunk_fail(chunk, q->error);
250 sctp_chunk_free(chunk);
253 /* Throw away any chunks in the retransmit queue. */
254 list_for_each_safe(lchunk, temp, &q->retransmit) {
255 list_del_init(lchunk);
256 chunk = list_entry(lchunk, struct sctp_chunk,
257 transmitted_list);
258 sctp_chunk_fail(chunk, q->error);
259 sctp_chunk_free(chunk);
262 /* Throw away any chunks that are in the abandoned queue. */
263 list_for_each_safe(lchunk, temp, &q->abandoned) {
264 list_del_init(lchunk);
265 chunk = list_entry(lchunk, struct sctp_chunk,
266 transmitted_list);
267 sctp_chunk_fail(chunk, q->error);
268 sctp_chunk_free(chunk);
271 /* Throw away any leftover data chunks. */
272 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
274 /* Mark as send failure. */
275 sctp_chunk_fail(chunk, q->error);
276 sctp_chunk_free(chunk);
279 q->error = 0;
281 /* Throw away any leftover control chunks. */
282 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
283 list_del_init(&chunk->list);
284 sctp_chunk_free(chunk);
288 /* Free the outqueue structure and any related pending chunks. */
289 void sctp_outq_free(struct sctp_outq *q)
291 /* Throw away leftover chunks. */
292 sctp_outq_teardown(q);
294 /* If we were kmalloc()'d, free the memory. */
295 if (q->malloced)
296 kfree(q);
299 /* Put a new chunk in an sctp_outq. */
300 int sctp_outq_tail(struct sctp_outq *q, struct sctp_chunk *chunk)
302 int error = 0;
304 SCTP_DEBUG_PRINTK("sctp_outq_tail(%p, %p[%s])\n",
305 q, chunk, chunk && chunk->chunk_hdr ?
306 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
307 : "Illegal Chunk");
309 /* If it is data, queue it up, otherwise, send it
310 * immediately.
312 if (sctp_chunk_is_data(chunk)) {
313 /* Is it OK to queue data chunks? */
314 /* From 9. Termination of Association
316 * When either endpoint performs a shutdown, the
317 * association on each peer will stop accepting new
318 * data from its user and only deliver data in queue
319 * at the time of sending or receiving the SHUTDOWN
320 * chunk.
322 switch (q->asoc->state) {
323 case SCTP_STATE_CLOSED:
324 case SCTP_STATE_SHUTDOWN_PENDING:
325 case SCTP_STATE_SHUTDOWN_SENT:
326 case SCTP_STATE_SHUTDOWN_RECEIVED:
327 case SCTP_STATE_SHUTDOWN_ACK_SENT:
328 /* Cannot send after transport endpoint shutdown */
329 error = -ESHUTDOWN;
330 break;
332 default:
333 SCTP_DEBUG_PRINTK("outqueueing (%p, %p[%s])\n",
334 q, chunk, chunk && chunk->chunk_hdr ?
335 sctp_cname(SCTP_ST_CHUNK(chunk->chunk_hdr->type))
336 : "Illegal Chunk");
338 sctp_outq_tail_data(q, chunk);
339 if (chunk->chunk_hdr->flags & SCTP_DATA_UNORDERED)
340 SCTP_INC_STATS(SCTP_MIB_OUTUNORDERCHUNKS);
341 else
342 SCTP_INC_STATS(SCTP_MIB_OUTORDERCHUNKS);
343 q->empty = 0;
344 break;
346 } else {
347 list_add_tail(&chunk->list, &q->control_chunk_list);
348 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);
351 if (error < 0)
352 return error;
354 if (!q->cork)
355 error = sctp_outq_flush(q, 0);
357 return error;
360 /* Insert a chunk into the sorted list based on the TSNs. The retransmit list
361 * and the abandoned list are in ascending order.
363 static void sctp_insert_list(struct list_head *head, struct list_head *new)
365 struct list_head *pos;
366 struct sctp_chunk *nchunk, *lchunk;
367 __u32 ntsn, ltsn;
368 int done = 0;
370 nchunk = list_entry(new, struct sctp_chunk, transmitted_list);
371 ntsn = ntohl(nchunk->subh.data_hdr->tsn);
373 list_for_each(pos, head) {
374 lchunk = list_entry(pos, struct sctp_chunk, transmitted_list);
375 ltsn = ntohl(lchunk->subh.data_hdr->tsn);
376 if (TSN_lt(ntsn, ltsn)) {
377 list_add(new, pos->prev);
378 done = 1;
379 break;
382 if (!done)
383 list_add_tail(new, head);
386 /* Mark all the eligible packets on a transport for retransmission. */
387 void sctp_retransmit_mark(struct sctp_outq *q,
388 struct sctp_transport *transport,
389 __u8 reason)
391 struct list_head *lchunk, *ltemp;
392 struct sctp_chunk *chunk;
394 /* Walk through the specified transmitted queue. */
395 list_for_each_safe(lchunk, ltemp, &transport->transmitted) {
396 chunk = list_entry(lchunk, struct sctp_chunk,
397 transmitted_list);
399 /* If the chunk is abandoned, move it to abandoned list. */
400 if (sctp_chunk_abandoned(chunk)) {
401 list_del_init(lchunk);
402 sctp_insert_list(&q->abandoned, lchunk);
404 /* If this chunk has not been previousely acked,
405 * stop considering it 'outstanding'. Our peer
406 * will most likely never see it since it will
407 * not be retransmitted
409 if (!chunk->tsn_gap_acked) {
410 if (chunk->transport)
411 chunk->transport->flight_size -=
412 sctp_data_size(chunk);
413 q->outstanding_bytes -= sctp_data_size(chunk);
414 q->asoc->peer.rwnd += (sctp_data_size(chunk) +
415 sizeof(struct sk_buff));
417 continue;
420 /* If we are doing retransmission due to a timeout or pmtu
421 * discovery, only the chunks that are not yet acked should
422 * be added to the retransmit queue.
424 if ((reason == SCTP_RTXR_FAST_RTX &&
425 (chunk->fast_retransmit == SCTP_NEED_FRTX)) ||
426 (reason != SCTP_RTXR_FAST_RTX && !chunk->tsn_gap_acked)) {
427 /* RFC 2960 6.2.1 Processing a Received SACK
429 * C) Any time a DATA chunk is marked for
430 * retransmission (via either T3-rtx timer expiration
431 * (Section 6.3.3) or via fast retransmit
432 * (Section 7.2.4)), add the data size of those
433 * chunks to the rwnd.
435 q->asoc->peer.rwnd += (sctp_data_size(chunk) +
436 sizeof(struct sk_buff));
437 q->outstanding_bytes -= sctp_data_size(chunk);
438 if (chunk->transport)
439 transport->flight_size -= sctp_data_size(chunk);
441 /* sctpimpguide-05 Section 2.8.2
442 * M5) If a T3-rtx timer expires, the
443 * 'TSN.Missing.Report' of all affected TSNs is set
444 * to 0.
446 chunk->tsn_missing_report = 0;
448 /* If a chunk that is being used for RTT measurement
449 * has to be retransmitted, we cannot use this chunk
450 * anymore for RTT measurements. Reset rto_pending so
451 * that a new RTT measurement is started when a new
452 * data chunk is sent.
454 if (chunk->rtt_in_progress) {
455 chunk->rtt_in_progress = 0;
456 transport->rto_pending = 0;
459 /* Move the chunk to the retransmit queue. The chunks
460 * on the retransmit queue are always kept in order.
462 list_del_init(lchunk);
463 sctp_insert_list(&q->retransmit, lchunk);
467 SCTP_DEBUG_PRINTK("%s: transport: %p, reason: %d, "
468 "cwnd: %d, ssthresh: %d, flight_size: %d, "
469 "pba: %d\n", __func__,
470 transport, reason,
471 transport->cwnd, transport->ssthresh,
472 transport->flight_size,
473 transport->partial_bytes_acked);
477 /* Mark all the eligible packets on a transport for retransmission and force
478 * one packet out.
480 void sctp_retransmit(struct sctp_outq *q, struct sctp_transport *transport,
481 sctp_retransmit_reason_t reason)
483 int error = 0;
485 switch(reason) {
486 case SCTP_RTXR_T3_RTX:
487 SCTP_INC_STATS(SCTP_MIB_T3_RETRANSMITS);
488 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_T3_RTX);
489 /* Update the retran path if the T3-rtx timer has expired for
490 * the current retran path.
492 if (transport == transport->asoc->peer.retran_path)
493 sctp_assoc_update_retran_path(transport->asoc);
494 transport->asoc->rtx_data_chunks +=
495 transport->asoc->unack_data;
496 break;
497 case SCTP_RTXR_FAST_RTX:
498 SCTP_INC_STATS(SCTP_MIB_FAST_RETRANSMITS);
499 sctp_transport_lower_cwnd(transport, SCTP_LOWER_CWND_FAST_RTX);
500 q->fast_rtx = 1;
501 break;
502 case SCTP_RTXR_PMTUD:
503 SCTP_INC_STATS(SCTP_MIB_PMTUD_RETRANSMITS);
504 break;
505 case SCTP_RTXR_T1_RTX:
506 SCTP_INC_STATS(SCTP_MIB_T1_RETRANSMITS);
507 transport->asoc->init_retries++;
508 break;
509 default:
510 BUG();
513 sctp_retransmit_mark(q, transport, reason);
515 /* PR-SCTP A5) Any time the T3-rtx timer expires, on any destination,
516 * the sender SHOULD try to advance the "Advanced.Peer.Ack.Point" by
517 * following the procedures outlined in C1 - C5.
519 if (reason == SCTP_RTXR_T3_RTX)
520 sctp_generate_fwdtsn(q, q->asoc->ctsn_ack_point);
522 /* Flush the queues only on timeout, since fast_rtx is only
523 * triggered during sack processing and the queue
524 * will be flushed at the end.
526 if (reason != SCTP_RTXR_FAST_RTX)
527 error = sctp_outq_flush(q, /* rtx_timeout */ 1);
529 if (error)
530 q->asoc->base.sk->sk_err = -error;
534 * Transmit DATA chunks on the retransmit queue. Upon return from
535 * sctp_outq_flush_rtx() the packet 'pkt' may contain chunks which
536 * need to be transmitted by the caller.
537 * We assume that pkt->transport has already been set.
539 * The return value is a normal kernel error return value.
541 static int sctp_outq_flush_rtx(struct sctp_outq *q, struct sctp_packet *pkt,
542 int rtx_timeout, int *start_timer)
544 struct list_head *lqueue;
545 struct sctp_transport *transport = pkt->transport;
546 sctp_xmit_t status;
547 struct sctp_chunk *chunk, *chunk1;
548 int fast_rtx;
549 int error = 0;
550 int timer = 0;
551 int done = 0;
553 lqueue = &q->retransmit;
554 fast_rtx = q->fast_rtx;
556 /* This loop handles time-out retransmissions, fast retransmissions,
557 * and retransmissions due to opening of whindow.
559 * RFC 2960 6.3.3 Handle T3-rtx Expiration
561 * E3) Determine how many of the earliest (i.e., lowest TSN)
562 * outstanding DATA chunks for the address for which the
563 * T3-rtx has expired will fit into a single packet, subject
564 * to the MTU constraint for the path corresponding to the
565 * destination transport address to which the retransmission
566 * is being sent (this may be different from the address for
567 * which the timer expires [see Section 6.4]). Call this value
568 * K. Bundle and retransmit those K DATA chunks in a single
569 * packet to the destination endpoint.
571 * [Just to be painfully clear, if we are retransmitting
572 * because a timeout just happened, we should send only ONE
573 * packet of retransmitted data.]
575 * For fast retransmissions we also send only ONE packet. However,
576 * if we are just flushing the queue due to open window, we'll
577 * try to send as much as possible.
579 list_for_each_entry_safe(chunk, chunk1, lqueue, transmitted_list) {
580 /* If the chunk is abandoned, move it to abandoned list. */
581 if (sctp_chunk_abandoned(chunk)) {
582 list_del_init(&chunk->transmitted_list);
583 sctp_insert_list(&q->abandoned,
584 &chunk->transmitted_list);
585 continue;
588 /* Make sure that Gap Acked TSNs are not retransmitted. A
589 * simple approach is just to move such TSNs out of the
590 * way and into a 'transmitted' queue and skip to the
591 * next chunk.
593 if (chunk->tsn_gap_acked) {
594 list_del(&chunk->transmitted_list);
595 list_add_tail(&chunk->transmitted_list,
596 &transport->transmitted);
597 continue;
600 /* If we are doing fast retransmit, ignore non-fast_rtransmit
601 * chunks
603 if (fast_rtx && !chunk->fast_retransmit)
604 continue;
606 redo:
607 /* Attempt to append this chunk to the packet. */
608 status = sctp_packet_append_chunk(pkt, chunk);
610 switch (status) {
611 case SCTP_XMIT_PMTU_FULL:
612 if (!pkt->has_data && !pkt->has_cookie_echo) {
613 /* If this packet did not contain DATA then
614 * retransmission did not happen, so do it
615 * again. We'll ignore the error here since
616 * control chunks are already freed so there
617 * is nothing we can do.
619 sctp_packet_transmit(pkt);
620 goto redo;
623 /* Send this packet. */
624 error = sctp_packet_transmit(pkt);
626 /* If we are retransmitting, we should only
627 * send a single packet.
628 * Otherwise, try appending this chunk again.
630 if (rtx_timeout || fast_rtx)
631 done = 1;
632 else
633 goto redo;
635 /* Bundle next chunk in the next round. */
636 break;
638 case SCTP_XMIT_RWND_FULL:
639 /* Send this packet. */
640 error = sctp_packet_transmit(pkt);
642 /* Stop sending DATA as there is no more room
643 * at the receiver.
645 done = 1;
646 break;
648 case SCTP_XMIT_NAGLE_DELAY:
649 /* Send this packet. */
650 error = sctp_packet_transmit(pkt);
652 /* Stop sending DATA because of nagle delay. */
653 done = 1;
654 break;
656 default:
657 /* The append was successful, so add this chunk to
658 * the transmitted list.
660 list_del(&chunk->transmitted_list);
661 list_add_tail(&chunk->transmitted_list,
662 &transport->transmitted);
664 /* Mark the chunk as ineligible for fast retransmit
665 * after it is retransmitted.
667 if (chunk->fast_retransmit == SCTP_NEED_FRTX)
668 chunk->fast_retransmit = SCTP_DONT_FRTX;
670 q->empty = 0;
671 break;
674 /* Set the timer if there were no errors */
675 if (!error && !timer)
676 timer = 1;
678 if (done)
679 break;
682 /* If we are here due to a retransmit timeout or a fast
683 * retransmit and if there are any chunks left in the retransmit
684 * queue that could not fit in the PMTU sized packet, they need
685 * to be marked as ineligible for a subsequent fast retransmit.
687 if (rtx_timeout || fast_rtx) {
688 list_for_each_entry(chunk1, lqueue, transmitted_list) {
689 if (chunk1->fast_retransmit == SCTP_NEED_FRTX)
690 chunk1->fast_retransmit = SCTP_DONT_FRTX;
694 *start_timer = timer;
696 /* Clear fast retransmit hint */
697 if (fast_rtx)
698 q->fast_rtx = 0;
700 return error;
703 /* Cork the outqueue so queued chunks are really queued. */
704 int sctp_outq_uncork(struct sctp_outq *q)
706 int error = 0;
707 if (q->cork)
708 q->cork = 0;
709 error = sctp_outq_flush(q, 0);
710 return error;
715 * Try to flush an outqueue.
717 * Description: Send everything in q which we legally can, subject to
718 * congestion limitations.
719 * * Note: This function can be called from multiple contexts so appropriate
720 * locking concerns must be made. Today we use the sock lock to protect
721 * this function.
723 static int sctp_outq_flush(struct sctp_outq *q, int rtx_timeout)
725 struct sctp_packet *packet;
726 struct sctp_packet singleton;
727 struct sctp_association *asoc = q->asoc;
728 __u16 sport = asoc->base.bind_addr.port;
729 __u16 dport = asoc->peer.port;
730 __u32 vtag = asoc->peer.i.init_tag;
731 struct sctp_transport *transport = NULL;
732 struct sctp_transport *new_transport;
733 struct sctp_chunk *chunk, *tmp;
734 sctp_xmit_t status;
735 int error = 0;
736 int start_timer = 0;
737 int one_packet = 0;
739 /* These transports have chunks to send. */
740 struct list_head transport_list;
741 struct list_head *ltransport;
743 INIT_LIST_HEAD(&transport_list);
744 packet = NULL;
747 * 6.10 Bundling
748 * ...
749 * When bundling control chunks with DATA chunks, an
750 * endpoint MUST place control chunks first in the outbound
751 * SCTP packet. The transmitter MUST transmit DATA chunks
752 * within a SCTP packet in increasing order of TSN.
753 * ...
756 list_for_each_entry_safe(chunk, tmp, &q->control_chunk_list, list) {
757 list_del_init(&chunk->list);
759 /* Pick the right transport to use. */
760 new_transport = chunk->transport;
762 if (!new_transport) {
764 * If we have a prior transport pointer, see if
765 * the destination address of the chunk
766 * matches the destination address of the
767 * current transport. If not a match, then
768 * try to look up the transport with a given
769 * destination address. We do this because
770 * after processing ASCONFs, we may have new
771 * transports created.
773 if (transport &&
774 sctp_cmp_addr_exact(&chunk->dest,
775 &transport->ipaddr))
776 new_transport = transport;
777 else
778 new_transport = sctp_assoc_lookup_paddr(asoc,
779 &chunk->dest);
781 /* if we still don't have a new transport, then
782 * use the current active path.
784 if (!new_transport)
785 new_transport = asoc->peer.active_path;
786 } else if ((new_transport->state == SCTP_INACTIVE) ||
787 (new_transport->state == SCTP_UNCONFIRMED)) {
788 /* If the chunk is Heartbeat or Heartbeat Ack,
789 * send it to chunk->transport, even if it's
790 * inactive.
792 * 3.3.6 Heartbeat Acknowledgement:
793 * ...
794 * A HEARTBEAT ACK is always sent to the source IP
795 * address of the IP datagram containing the
796 * HEARTBEAT chunk to which this ack is responding.
797 * ...
799 * ASCONF_ACKs also must be sent to the source.
801 if (chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT &&
802 chunk->chunk_hdr->type != SCTP_CID_HEARTBEAT_ACK &&
803 chunk->chunk_hdr->type != SCTP_CID_ASCONF_ACK)
804 new_transport = asoc->peer.active_path;
807 /* Are we switching transports?
808 * Take care of transport locks.
810 if (new_transport != transport) {
811 transport = new_transport;
812 if (list_empty(&transport->send_ready)) {
813 list_add_tail(&transport->send_ready,
814 &transport_list);
816 packet = &transport->packet;
817 sctp_packet_config(packet, vtag,
818 asoc->peer.ecn_capable);
821 switch (chunk->chunk_hdr->type) {
823 * 6.10 Bundling
824 * ...
825 * An endpoint MUST NOT bundle INIT, INIT ACK or SHUTDOWN
826 * COMPLETE with any other chunks. [Send them immediately.]
828 case SCTP_CID_INIT:
829 case SCTP_CID_INIT_ACK:
830 case SCTP_CID_SHUTDOWN_COMPLETE:
831 sctp_packet_init(&singleton, transport, sport, dport);
832 sctp_packet_config(&singleton, vtag, 0);
833 sctp_packet_append_chunk(&singleton, chunk);
834 error = sctp_packet_transmit(&singleton);
835 if (error < 0)
836 return error;
837 break;
839 case SCTP_CID_ABORT:
840 if (sctp_test_T_bit(chunk)) {
841 packet->vtag = asoc->c.my_vtag;
843 /* The following chunks are "response" chunks, i.e.
844 * they are generated in response to something we
845 * received. If we are sending these, then we can
846 * send only 1 packet containing these chunks.
848 case SCTP_CID_HEARTBEAT_ACK:
849 case SCTP_CID_SHUTDOWN_ACK:
850 case SCTP_CID_COOKIE_ACK:
851 case SCTP_CID_COOKIE_ECHO:
852 case SCTP_CID_ERROR:
853 case SCTP_CID_ECN_CWR:
854 case SCTP_CID_ASCONF_ACK:
855 one_packet = 1;
856 /* Fall through */
858 case SCTP_CID_SACK:
859 case SCTP_CID_HEARTBEAT:
860 case SCTP_CID_SHUTDOWN:
861 case SCTP_CID_ECN_ECNE:
862 case SCTP_CID_ASCONF:
863 case SCTP_CID_FWD_TSN:
864 status = sctp_packet_transmit_chunk(packet, chunk,
865 one_packet);
866 if (status != SCTP_XMIT_OK) {
867 /* put the chunk back */
868 list_add(&chunk->list, &q->control_chunk_list);
869 } else if (chunk->chunk_hdr->type == SCTP_CID_FWD_TSN) {
870 /* PR-SCTP C5) If a FORWARD TSN is sent, the
871 * sender MUST assure that at least one T3-rtx
872 * timer is running.
874 sctp_transport_reset_timers(transport);
876 break;
878 default:
879 /* We built a chunk with an illegal type! */
880 BUG();
884 /* Is it OK to send data chunks? */
885 switch (asoc->state) {
886 case SCTP_STATE_COOKIE_ECHOED:
887 /* Only allow bundling when this packet has a COOKIE-ECHO
888 * chunk.
890 if (!packet || !packet->has_cookie_echo)
891 break;
893 /* fallthru */
894 case SCTP_STATE_ESTABLISHED:
895 case SCTP_STATE_SHUTDOWN_PENDING:
896 case SCTP_STATE_SHUTDOWN_RECEIVED:
898 * RFC 2960 6.1 Transmission of DATA Chunks
900 * C) When the time comes for the sender to transmit,
901 * before sending new DATA chunks, the sender MUST
902 * first transmit any outstanding DATA chunks which
903 * are marked for retransmission (limited by the
904 * current cwnd).
906 if (!list_empty(&q->retransmit)) {
907 if (transport == asoc->peer.retran_path)
908 goto retran;
910 /* Switch transports & prepare the packet. */
912 transport = asoc->peer.retran_path;
914 if (list_empty(&transport->send_ready)) {
915 list_add_tail(&transport->send_ready,
916 &transport_list);
919 packet = &transport->packet;
920 sctp_packet_config(packet, vtag,
921 asoc->peer.ecn_capable);
922 retran:
923 error = sctp_outq_flush_rtx(q, packet,
924 rtx_timeout, &start_timer);
926 if (start_timer)
927 sctp_transport_reset_timers(transport);
929 /* This can happen on COOKIE-ECHO resend. Only
930 * one chunk can get bundled with a COOKIE-ECHO.
932 if (packet->has_cookie_echo)
933 goto sctp_flush_out;
935 /* Don't send new data if there is still data
936 * waiting to retransmit.
938 if (!list_empty(&q->retransmit))
939 goto sctp_flush_out;
942 /* Apply Max.Burst limitation to the current transport in
943 * case it will be used for new data. We are going to
944 * rest it before we return, but we want to apply the limit
945 * to the currently queued data.
947 if (transport)
948 sctp_transport_burst_limited(transport);
950 /* Finally, transmit new packets. */
951 while ((chunk = sctp_outq_dequeue_data(q)) != NULL) {
952 /* RFC 2960 6.5 Every DATA chunk MUST carry a valid
953 * stream identifier.
955 if (chunk->sinfo.sinfo_stream >=
956 asoc->c.sinit_num_ostreams) {
958 /* Mark as failed send. */
959 sctp_chunk_fail(chunk, SCTP_ERROR_INV_STRM);
960 sctp_chunk_free(chunk);
961 continue;
964 /* Has this chunk expired? */
965 if (sctp_chunk_abandoned(chunk)) {
966 sctp_chunk_fail(chunk, 0);
967 sctp_chunk_free(chunk);
968 continue;
971 /* If there is a specified transport, use it.
972 * Otherwise, we want to use the active path.
974 new_transport = chunk->transport;
975 if (!new_transport ||
976 ((new_transport->state == SCTP_INACTIVE) ||
977 (new_transport->state == SCTP_UNCONFIRMED)))
978 new_transport = asoc->peer.active_path;
980 /* Change packets if necessary. */
981 if (new_transport != transport) {
982 transport = new_transport;
984 /* Schedule to have this transport's
985 * packet flushed.
987 if (list_empty(&transport->send_ready)) {
988 list_add_tail(&transport->send_ready,
989 &transport_list);
992 packet = &transport->packet;
993 sctp_packet_config(packet, vtag,
994 asoc->peer.ecn_capable);
995 /* We've switched transports, so apply the
996 * Burst limit to the new transport.
998 sctp_transport_burst_limited(transport);
1001 SCTP_DEBUG_PRINTK("sctp_outq_flush(%p, %p[%s]), ",
1002 q, chunk,
1003 chunk && chunk->chunk_hdr ?
1004 sctp_cname(SCTP_ST_CHUNK(
1005 chunk->chunk_hdr->type))
1006 : "Illegal Chunk");
1008 SCTP_DEBUG_PRINTK("TX TSN 0x%x skb->head "
1009 "%p skb->users %d.\n",
1010 ntohl(chunk->subh.data_hdr->tsn),
1011 chunk->skb ?chunk->skb->head : NULL,
1012 chunk->skb ?
1013 atomic_read(&chunk->skb->users) : -1);
1015 /* Add the chunk to the packet. */
1016 status = sctp_packet_transmit_chunk(packet, chunk, 0);
1018 switch (status) {
1019 case SCTP_XMIT_PMTU_FULL:
1020 case SCTP_XMIT_RWND_FULL:
1021 case SCTP_XMIT_NAGLE_DELAY:
1022 /* We could not append this chunk, so put
1023 * the chunk back on the output queue.
1025 SCTP_DEBUG_PRINTK("sctp_outq_flush: could "
1026 "not transmit TSN: 0x%x, status: %d\n",
1027 ntohl(chunk->subh.data_hdr->tsn),
1028 status);
1029 sctp_outq_head_data(q, chunk);
1030 goto sctp_flush_out;
1031 break;
1033 case SCTP_XMIT_OK:
1034 /* The sender is in the SHUTDOWN-PENDING state,
1035 * The sender MAY set the I-bit in the DATA
1036 * chunk header.
1038 if (asoc->state == SCTP_STATE_SHUTDOWN_PENDING)
1039 chunk->chunk_hdr->flags |= SCTP_DATA_SACK_IMM;
1041 break;
1043 default:
1044 BUG();
1047 /* BUG: We assume that the sctp_packet_transmit()
1048 * call below will succeed all the time and add the
1049 * chunk to the transmitted list and restart the
1050 * timers.
1051 * It is possible that the call can fail under OOM
1052 * conditions.
1054 * Is this really a problem? Won't this behave
1055 * like a lost TSN?
1057 list_add_tail(&chunk->transmitted_list,
1058 &transport->transmitted);
1060 sctp_transport_reset_timers(transport);
1062 q->empty = 0;
1064 /* Only let one DATA chunk get bundled with a
1065 * COOKIE-ECHO chunk.
1067 if (packet->has_cookie_echo)
1068 goto sctp_flush_out;
1070 break;
1072 default:
1073 /* Do nothing. */
1074 break;
1077 sctp_flush_out:
1079 /* Before returning, examine all the transports touched in
1080 * this call. Right now, we bluntly force clear all the
1081 * transports. Things might change after we implement Nagle.
1082 * But such an examination is still required.
1084 * --xguo
1086 while ((ltransport = sctp_list_dequeue(&transport_list)) != NULL ) {
1087 struct sctp_transport *t = list_entry(ltransport,
1088 struct sctp_transport,
1089 send_ready);
1090 packet = &t->packet;
1091 if (!sctp_packet_empty(packet))
1092 error = sctp_packet_transmit(packet);
1094 /* Clear the burst limited state, if any */
1095 sctp_transport_burst_reset(t);
1098 return error;
1101 /* Update unack_data based on the incoming SACK chunk */
1102 static void sctp_sack_update_unack_data(struct sctp_association *assoc,
1103 struct sctp_sackhdr *sack)
1105 sctp_sack_variable_t *frags;
1106 __u16 unack_data;
1107 int i;
1109 unack_data = assoc->next_tsn - assoc->ctsn_ack_point - 1;
1111 frags = sack->variable;
1112 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); i++) {
1113 unack_data -= ((ntohs(frags[i].gab.end) -
1114 ntohs(frags[i].gab.start) + 1));
1117 assoc->unack_data = unack_data;
1120 /* This is where we REALLY process a SACK.
1122 * Process the SACK against the outqueue. Mostly, this just frees
1123 * things off the transmitted queue.
1125 int sctp_outq_sack(struct sctp_outq *q, struct sctp_sackhdr *sack)
1127 struct sctp_association *asoc = q->asoc;
1128 struct sctp_transport *transport;
1129 struct sctp_chunk *tchunk = NULL;
1130 struct list_head *lchunk, *transport_list, *temp;
1131 sctp_sack_variable_t *frags = sack->variable;
1132 __u32 sack_ctsn, ctsn, tsn;
1133 __u32 highest_tsn, highest_new_tsn;
1134 __u32 sack_a_rwnd;
1135 unsigned outstanding;
1136 struct sctp_transport *primary = asoc->peer.primary_path;
1137 int count_of_newacks = 0;
1138 int gap_ack_blocks;
1139 u8 accum_moved = 0;
1141 /* Grab the association's destination address list. */
1142 transport_list = &asoc->peer.transport_addr_list;
1144 sack_ctsn = ntohl(sack->cum_tsn_ack);
1145 gap_ack_blocks = ntohs(sack->num_gap_ack_blocks);
1147 * SFR-CACC algorithm:
1148 * On receipt of a SACK the sender SHOULD execute the
1149 * following statements.
1151 * 1) If the cumulative ack in the SACK passes next tsn_at_change
1152 * on the current primary, the CHANGEOVER_ACTIVE flag SHOULD be
1153 * cleared. The CYCLING_CHANGEOVER flag SHOULD also be cleared for
1154 * all destinations.
1155 * 2) If the SACK contains gap acks and the flag CHANGEOVER_ACTIVE
1156 * is set the receiver of the SACK MUST take the following actions:
1158 * A) Initialize the cacc_saw_newack to 0 for all destination
1159 * addresses.
1161 * Only bother if changeover_active is set. Otherwise, this is
1162 * totally suboptimal to do on every SACK.
1164 if (primary->cacc.changeover_active) {
1165 u8 clear_cycling = 0;
1167 if (TSN_lte(primary->cacc.next_tsn_at_change, sack_ctsn)) {
1168 primary->cacc.changeover_active = 0;
1169 clear_cycling = 1;
1172 if (clear_cycling || gap_ack_blocks) {
1173 list_for_each_entry(transport, transport_list,
1174 transports) {
1175 if (clear_cycling)
1176 transport->cacc.cycling_changeover = 0;
1177 if (gap_ack_blocks)
1178 transport->cacc.cacc_saw_newack = 0;
1183 /* Get the highest TSN in the sack. */
1184 highest_tsn = sack_ctsn;
1185 if (gap_ack_blocks)
1186 highest_tsn += ntohs(frags[gap_ack_blocks - 1].gab.end);
1188 if (TSN_lt(asoc->highest_sacked, highest_tsn))
1189 asoc->highest_sacked = highest_tsn;
1191 highest_new_tsn = sack_ctsn;
1193 /* Run through the retransmit queue. Credit bytes received
1194 * and free those chunks that we can.
1196 sctp_check_transmitted(q, &q->retransmit, NULL, sack, &highest_new_tsn);
1198 /* Run through the transmitted queue.
1199 * Credit bytes received and free those chunks which we can.
1201 * This is a MASSIVE candidate for optimization.
1203 list_for_each_entry(transport, transport_list, transports) {
1204 sctp_check_transmitted(q, &transport->transmitted,
1205 transport, sack, &highest_new_tsn);
1207 * SFR-CACC algorithm:
1208 * C) Let count_of_newacks be the number of
1209 * destinations for which cacc_saw_newack is set.
1211 if (transport->cacc.cacc_saw_newack)
1212 count_of_newacks ++;
1215 /* Move the Cumulative TSN Ack Point if appropriate. */
1216 if (TSN_lt(asoc->ctsn_ack_point, sack_ctsn)) {
1217 asoc->ctsn_ack_point = sack_ctsn;
1218 accum_moved = 1;
1221 if (gap_ack_blocks) {
1223 if (asoc->fast_recovery && accum_moved)
1224 highest_new_tsn = highest_tsn;
1226 list_for_each_entry(transport, transport_list, transports)
1227 sctp_mark_missing(q, &transport->transmitted, transport,
1228 highest_new_tsn, count_of_newacks);
1231 /* Update unack_data field in the assoc. */
1232 sctp_sack_update_unack_data(asoc, sack);
1234 ctsn = asoc->ctsn_ack_point;
1236 /* Throw away stuff rotting on the sack queue. */
1237 list_for_each_safe(lchunk, temp, &q->sacked) {
1238 tchunk = list_entry(lchunk, struct sctp_chunk,
1239 transmitted_list);
1240 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1241 if (TSN_lte(tsn, ctsn)) {
1242 list_del_init(&tchunk->transmitted_list);
1243 sctp_chunk_free(tchunk);
1247 /* ii) Set rwnd equal to the newly received a_rwnd minus the
1248 * number of bytes still outstanding after processing the
1249 * Cumulative TSN Ack and the Gap Ack Blocks.
1252 sack_a_rwnd = ntohl(sack->a_rwnd);
1253 outstanding = q->outstanding_bytes;
1255 if (outstanding < sack_a_rwnd)
1256 sack_a_rwnd -= outstanding;
1257 else
1258 sack_a_rwnd = 0;
1260 asoc->peer.rwnd = sack_a_rwnd;
1262 sctp_generate_fwdtsn(q, sack_ctsn);
1264 SCTP_DEBUG_PRINTK("%s: sack Cumulative TSN Ack is 0x%x.\n",
1265 __func__, sack_ctsn);
1266 SCTP_DEBUG_PRINTK("%s: Cumulative TSN Ack of association, "
1267 "%p is 0x%x. Adv peer ack point: 0x%x\n",
1268 __func__, asoc, ctsn, asoc->adv_peer_ack_point);
1270 /* See if all chunks are acked.
1271 * Make sure the empty queue handler will get run later.
1273 q->empty = (list_empty(&q->out_chunk_list) &&
1274 list_empty(&q->retransmit));
1275 if (!q->empty)
1276 goto finish;
1278 list_for_each_entry(transport, transport_list, transports) {
1279 q->empty = q->empty && list_empty(&transport->transmitted);
1280 if (!q->empty)
1281 goto finish;
1284 SCTP_DEBUG_PRINTK("sack queue is empty.\n");
1285 finish:
1286 return q->empty;
1289 /* Is the outqueue empty? */
1290 int sctp_outq_is_empty(const struct sctp_outq *q)
1292 return q->empty;
1295 /********************************************************************
1296 * 2nd Level Abstractions
1297 ********************************************************************/
1299 /* Go through a transport's transmitted list or the association's retransmit
1300 * list and move chunks that are acked by the Cumulative TSN Ack to q->sacked.
1301 * The retransmit list will not have an associated transport.
1303 * I added coherent debug information output. --xguo
1305 * Instead of printing 'sacked' or 'kept' for each TSN on the
1306 * transmitted_queue, we print a range: SACKED: TSN1-TSN2, TSN3, TSN4-TSN5.
1307 * KEPT TSN6-TSN7, etc.
1309 static void sctp_check_transmitted(struct sctp_outq *q,
1310 struct list_head *transmitted_queue,
1311 struct sctp_transport *transport,
1312 struct sctp_sackhdr *sack,
1313 __u32 *highest_new_tsn_in_sack)
1315 struct list_head *lchunk;
1316 struct sctp_chunk *tchunk;
1317 struct list_head tlist;
1318 __u32 tsn;
1319 __u32 sack_ctsn;
1320 __u32 rtt;
1321 __u8 restart_timer = 0;
1322 int bytes_acked = 0;
1323 int migrate_bytes = 0;
1325 /* These state variables are for coherent debug output. --xguo */
1327 #if SCTP_DEBUG
1328 __u32 dbg_ack_tsn = 0; /* An ACKed TSN range starts here... */
1329 __u32 dbg_last_ack_tsn = 0; /* ...and finishes here. */
1330 __u32 dbg_kept_tsn = 0; /* An un-ACKed range starts here... */
1331 __u32 dbg_last_kept_tsn = 0; /* ...and finishes here. */
1333 /* 0 : The last TSN was ACKed.
1334 * 1 : The last TSN was NOT ACKed (i.e. KEPT).
1335 * -1: We need to initialize.
1337 int dbg_prt_state = -1;
1338 #endif /* SCTP_DEBUG */
1340 sack_ctsn = ntohl(sack->cum_tsn_ack);
1342 INIT_LIST_HEAD(&tlist);
1344 /* The while loop will skip empty transmitted queues. */
1345 while (NULL != (lchunk = sctp_list_dequeue(transmitted_queue))) {
1346 tchunk = list_entry(lchunk, struct sctp_chunk,
1347 transmitted_list);
1349 if (sctp_chunk_abandoned(tchunk)) {
1350 /* Move the chunk to abandoned list. */
1351 sctp_insert_list(&q->abandoned, lchunk);
1353 /* If this chunk has not been acked, stop
1354 * considering it as 'outstanding'.
1356 if (!tchunk->tsn_gap_acked) {
1357 if (tchunk->transport)
1358 tchunk->transport->flight_size -=
1359 sctp_data_size(tchunk);
1360 q->outstanding_bytes -= sctp_data_size(tchunk);
1362 continue;
1365 tsn = ntohl(tchunk->subh.data_hdr->tsn);
1366 if (sctp_acked(sack, tsn)) {
1367 /* If this queue is the retransmit queue, the
1368 * retransmit timer has already reclaimed
1369 * the outstanding bytes for this chunk, so only
1370 * count bytes associated with a transport.
1372 if (transport) {
1373 /* If this chunk is being used for RTT
1374 * measurement, calculate the RTT and update
1375 * the RTO using this value.
1377 * 6.3.1 C5) Karn's algorithm: RTT measurements
1378 * MUST NOT be made using packets that were
1379 * retransmitted (and thus for which it is
1380 * ambiguous whether the reply was for the
1381 * first instance of the packet or a later
1382 * instance).
1384 if (!tchunk->tsn_gap_acked &&
1385 tchunk->rtt_in_progress) {
1386 tchunk->rtt_in_progress = 0;
1387 rtt = jiffies - tchunk->sent_at;
1388 sctp_transport_update_rto(transport,
1389 rtt);
1393 /* If the chunk hasn't been marked as ACKED,
1394 * mark it and account bytes_acked if the
1395 * chunk had a valid transport (it will not
1396 * have a transport if ASCONF had deleted it
1397 * while DATA was outstanding).
1399 if (!tchunk->tsn_gap_acked) {
1400 tchunk->tsn_gap_acked = 1;
1401 *highest_new_tsn_in_sack = tsn;
1402 bytes_acked += sctp_data_size(tchunk);
1403 if (!tchunk->transport)
1404 migrate_bytes += sctp_data_size(tchunk);
1407 if (TSN_lte(tsn, sack_ctsn)) {
1408 /* RFC 2960 6.3.2 Retransmission Timer Rules
1410 * R3) Whenever a SACK is received
1411 * that acknowledges the DATA chunk
1412 * with the earliest outstanding TSN
1413 * for that address, restart T3-rtx
1414 * timer for that address with its
1415 * current RTO.
1417 restart_timer = 1;
1419 if (!tchunk->tsn_gap_acked) {
1421 * SFR-CACC algorithm:
1422 * 2) If the SACK contains gap acks
1423 * and the flag CHANGEOVER_ACTIVE is
1424 * set the receiver of the SACK MUST
1425 * take the following action:
1427 * B) For each TSN t being acked that
1428 * has not been acked in any SACK so
1429 * far, set cacc_saw_newack to 1 for
1430 * the destination that the TSN was
1431 * sent to.
1433 if (transport &&
1434 sack->num_gap_ack_blocks &&
1435 q->asoc->peer.primary_path->cacc.
1436 changeover_active)
1437 transport->cacc.cacc_saw_newack
1438 = 1;
1441 list_add_tail(&tchunk->transmitted_list,
1442 &q->sacked);
1443 } else {
1444 /* RFC2960 7.2.4, sctpimpguide-05 2.8.2
1445 * M2) Each time a SACK arrives reporting
1446 * 'Stray DATA chunk(s)' record the highest TSN
1447 * reported as newly acknowledged, call this
1448 * value 'HighestTSNinSack'. A newly
1449 * acknowledged DATA chunk is one not
1450 * previously acknowledged in a SACK.
1452 * When the SCTP sender of data receives a SACK
1453 * chunk that acknowledges, for the first time,
1454 * the receipt of a DATA chunk, all the still
1455 * unacknowledged DATA chunks whose TSN is
1456 * older than that newly acknowledged DATA
1457 * chunk, are qualified as 'Stray DATA chunks'.
1459 list_add_tail(lchunk, &tlist);
1462 #if SCTP_DEBUG
1463 switch (dbg_prt_state) {
1464 case 0: /* last TSN was ACKed */
1465 if (dbg_last_ack_tsn + 1 == tsn) {
1466 /* This TSN belongs to the
1467 * current ACK range.
1469 break;
1472 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1473 /* Display the end of the
1474 * current range.
1476 SCTP_DEBUG_PRINTK_CONT("-%08x",
1477 dbg_last_ack_tsn);
1480 /* Start a new range. */
1481 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1482 dbg_ack_tsn = tsn;
1483 break;
1485 case 1: /* The last TSN was NOT ACKed. */
1486 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1487 /* Display the end of current range. */
1488 SCTP_DEBUG_PRINTK_CONT("-%08x",
1489 dbg_last_kept_tsn);
1492 SCTP_DEBUG_PRINTK_CONT("\n");
1494 /* FALL THROUGH... */
1495 default:
1496 /* This is the first-ever TSN we examined. */
1497 /* Start a new range of ACK-ed TSNs. */
1498 SCTP_DEBUG_PRINTK("ACKed: %08x", tsn);
1499 dbg_prt_state = 0;
1500 dbg_ack_tsn = tsn;
1503 dbg_last_ack_tsn = tsn;
1504 #endif /* SCTP_DEBUG */
1506 } else {
1507 if (tchunk->tsn_gap_acked) {
1508 SCTP_DEBUG_PRINTK("%s: Receiver reneged on "
1509 "data TSN: 0x%x\n",
1510 __func__,
1511 tsn);
1512 tchunk->tsn_gap_acked = 0;
1514 if (tchunk->transport)
1515 bytes_acked -= sctp_data_size(tchunk);
1517 /* RFC 2960 6.3.2 Retransmission Timer Rules
1519 * R4) Whenever a SACK is received missing a
1520 * TSN that was previously acknowledged via a
1521 * Gap Ack Block, start T3-rtx for the
1522 * destination address to which the DATA
1523 * chunk was originally
1524 * transmitted if it is not already running.
1526 restart_timer = 1;
1529 list_add_tail(lchunk, &tlist);
1531 #if SCTP_DEBUG
1532 /* See the above comments on ACK-ed TSNs. */
1533 switch (dbg_prt_state) {
1534 case 1:
1535 if (dbg_last_kept_tsn + 1 == tsn)
1536 break;
1538 if (dbg_last_kept_tsn != dbg_kept_tsn)
1539 SCTP_DEBUG_PRINTK_CONT("-%08x",
1540 dbg_last_kept_tsn);
1542 SCTP_DEBUG_PRINTK_CONT(",%08x", tsn);
1543 dbg_kept_tsn = tsn;
1544 break;
1546 case 0:
1547 if (dbg_last_ack_tsn != dbg_ack_tsn)
1548 SCTP_DEBUG_PRINTK_CONT("-%08x",
1549 dbg_last_ack_tsn);
1550 SCTP_DEBUG_PRINTK_CONT("\n");
1552 /* FALL THROUGH... */
1553 default:
1554 SCTP_DEBUG_PRINTK("KEPT: %08x",tsn);
1555 dbg_prt_state = 1;
1556 dbg_kept_tsn = tsn;
1559 dbg_last_kept_tsn = tsn;
1560 #endif /* SCTP_DEBUG */
1564 #if SCTP_DEBUG
1565 /* Finish off the last range, displaying its ending TSN. */
1566 switch (dbg_prt_state) {
1567 case 0:
1568 if (dbg_last_ack_tsn != dbg_ack_tsn) {
1569 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_ack_tsn);
1570 } else {
1571 SCTP_DEBUG_PRINTK_CONT("\n");
1573 break;
1575 case 1:
1576 if (dbg_last_kept_tsn != dbg_kept_tsn) {
1577 SCTP_DEBUG_PRINTK_CONT("-%08x\n", dbg_last_kept_tsn);
1578 } else {
1579 SCTP_DEBUG_PRINTK_CONT("\n");
1582 #endif /* SCTP_DEBUG */
1583 if (transport) {
1584 if (bytes_acked) {
1585 /* We may have counted DATA that was migrated
1586 * to this transport due to DEL-IP operation.
1587 * Subtract those bytes, since the were never
1588 * send on this transport and shouldn't be
1589 * credited to this transport.
1591 bytes_acked -= migrate_bytes;
1593 /* 8.2. When an outstanding TSN is acknowledged,
1594 * the endpoint shall clear the error counter of
1595 * the destination transport address to which the
1596 * DATA chunk was last sent.
1597 * The association's overall error counter is
1598 * also cleared.
1600 transport->error_count = 0;
1601 transport->asoc->overall_error_count = 0;
1603 /* Mark the destination transport address as
1604 * active if it is not so marked.
1606 if ((transport->state == SCTP_INACTIVE) ||
1607 (transport->state == SCTP_UNCONFIRMED)) {
1608 sctp_assoc_control_transport(
1609 transport->asoc,
1610 transport,
1611 SCTP_TRANSPORT_UP,
1612 SCTP_RECEIVED_SACK);
1615 sctp_transport_raise_cwnd(transport, sack_ctsn,
1616 bytes_acked);
1618 transport->flight_size -= bytes_acked;
1619 if (transport->flight_size == 0)
1620 transport->partial_bytes_acked = 0;
1621 q->outstanding_bytes -= bytes_acked + migrate_bytes;
1622 } else {
1623 /* RFC 2960 6.1, sctpimpguide-06 2.15.2
1624 * When a sender is doing zero window probing, it
1625 * should not timeout the association if it continues
1626 * to receive new packets from the receiver. The
1627 * reason is that the receiver MAY keep its window
1628 * closed for an indefinite time.
1629 * A sender is doing zero window probing when the
1630 * receiver's advertised window is zero, and there is
1631 * only one data chunk in flight to the receiver.
1633 if (!q->asoc->peer.rwnd &&
1634 !list_empty(&tlist) &&
1635 (sack_ctsn+2 == q->asoc->next_tsn)) {
1636 SCTP_DEBUG_PRINTK("%s: SACK received for zero "
1637 "window probe: %u\n",
1638 __func__, sack_ctsn);
1639 q->asoc->overall_error_count = 0;
1640 transport->error_count = 0;
1644 /* RFC 2960 6.3.2 Retransmission Timer Rules
1646 * R2) Whenever all outstanding data sent to an address have
1647 * been acknowledged, turn off the T3-rtx timer of that
1648 * address.
1650 if (!transport->flight_size) {
1651 if (timer_pending(&transport->T3_rtx_timer) &&
1652 del_timer(&transport->T3_rtx_timer)) {
1653 sctp_transport_put(transport);
1655 } else if (restart_timer) {
1656 if (!mod_timer(&transport->T3_rtx_timer,
1657 jiffies + transport->rto))
1658 sctp_transport_hold(transport);
1662 list_splice(&tlist, transmitted_queue);
1665 /* Mark chunks as missing and consequently may get retransmitted. */
1666 static void sctp_mark_missing(struct sctp_outq *q,
1667 struct list_head *transmitted_queue,
1668 struct sctp_transport *transport,
1669 __u32 highest_new_tsn_in_sack,
1670 int count_of_newacks)
1672 struct sctp_chunk *chunk;
1673 __u32 tsn;
1674 char do_fast_retransmit = 0;
1675 struct sctp_association *asoc = q->asoc;
1676 struct sctp_transport *primary = asoc->peer.primary_path;
1678 list_for_each_entry(chunk, transmitted_queue, transmitted_list) {
1680 tsn = ntohl(chunk->subh.data_hdr->tsn);
1682 /* RFC 2960 7.2.4, sctpimpguide-05 2.8.2 M3) Examine all
1683 * 'Unacknowledged TSN's', if the TSN number of an
1684 * 'Unacknowledged TSN' is smaller than the 'HighestTSNinSack'
1685 * value, increment the 'TSN.Missing.Report' count on that
1686 * chunk if it has NOT been fast retransmitted or marked for
1687 * fast retransmit already.
1689 if (chunk->fast_retransmit == SCTP_CAN_FRTX &&
1690 !chunk->tsn_gap_acked &&
1691 TSN_lt(tsn, highest_new_tsn_in_sack)) {
1693 /* SFR-CACC may require us to skip marking
1694 * this chunk as missing.
1696 if (!transport || !sctp_cacc_skip(primary,
1697 chunk->transport,
1698 count_of_newacks, tsn)) {
1699 chunk->tsn_missing_report++;
1701 SCTP_DEBUG_PRINTK(
1702 "%s: TSN 0x%x missing counter: %d\n",
1703 __func__, tsn,
1704 chunk->tsn_missing_report);
1708 * M4) If any DATA chunk is found to have a
1709 * 'TSN.Missing.Report'
1710 * value larger than or equal to 3, mark that chunk for
1711 * retransmission and start the fast retransmit procedure.
1714 if (chunk->tsn_missing_report >= 3) {
1715 chunk->fast_retransmit = SCTP_NEED_FRTX;
1716 do_fast_retransmit = 1;
1720 if (transport) {
1721 if (do_fast_retransmit)
1722 sctp_retransmit(q, transport, SCTP_RTXR_FAST_RTX);
1724 SCTP_DEBUG_PRINTK("%s: transport: %p, cwnd: %d, "
1725 "ssthresh: %d, flight_size: %d, pba: %d\n",
1726 __func__, transport, transport->cwnd,
1727 transport->ssthresh, transport->flight_size,
1728 transport->partial_bytes_acked);
1732 /* Is the given TSN acked by this packet? */
1733 static int sctp_acked(struct sctp_sackhdr *sack, __u32 tsn)
1735 int i;
1736 sctp_sack_variable_t *frags;
1737 __u16 gap;
1738 __u32 ctsn = ntohl(sack->cum_tsn_ack);
1740 if (TSN_lte(tsn, ctsn))
1741 goto pass;
1743 /* 3.3.4 Selective Acknowledgement (SACK) (3):
1745 * Gap Ack Blocks:
1746 * These fields contain the Gap Ack Blocks. They are repeated
1747 * for each Gap Ack Block up to the number of Gap Ack Blocks
1748 * defined in the Number of Gap Ack Blocks field. All DATA
1749 * chunks with TSNs greater than or equal to (Cumulative TSN
1750 * Ack + Gap Ack Block Start) and less than or equal to
1751 * (Cumulative TSN Ack + Gap Ack Block End) of each Gap Ack
1752 * Block are assumed to have been received correctly.
1755 frags = sack->variable;
1756 gap = tsn - ctsn;
1757 for (i = 0; i < ntohs(sack->num_gap_ack_blocks); ++i) {
1758 if (TSN_lte(ntohs(frags[i].gab.start), gap) &&
1759 TSN_lte(gap, ntohs(frags[i].gab.end)))
1760 goto pass;
1763 return 0;
1764 pass:
1765 return 1;
1768 static inline int sctp_get_skip_pos(struct sctp_fwdtsn_skip *skiplist,
1769 int nskips, __be16 stream)
1771 int i;
1773 for (i = 0; i < nskips; i++) {
1774 if (skiplist[i].stream == stream)
1775 return i;
1777 return i;
1780 /* Create and add a fwdtsn chunk to the outq's control queue if needed. */
1781 static void sctp_generate_fwdtsn(struct sctp_outq *q, __u32 ctsn)
1783 struct sctp_association *asoc = q->asoc;
1784 struct sctp_chunk *ftsn_chunk = NULL;
1785 struct sctp_fwdtsn_skip ftsn_skip_arr[10];
1786 int nskips = 0;
1787 int skip_pos = 0;
1788 __u32 tsn;
1789 struct sctp_chunk *chunk;
1790 struct list_head *lchunk, *temp;
1792 if (!asoc->peer.prsctp_capable)
1793 return;
1795 /* PR-SCTP C1) Let SackCumAck be the Cumulative TSN ACK carried in the
1796 * received SACK.
1798 * If (Advanced.Peer.Ack.Point < SackCumAck), then update
1799 * Advanced.Peer.Ack.Point to be equal to SackCumAck.
1801 if (TSN_lt(asoc->adv_peer_ack_point, ctsn))
1802 asoc->adv_peer_ack_point = ctsn;
1804 /* PR-SCTP C2) Try to further advance the "Advanced.Peer.Ack.Point"
1805 * locally, that is, to move "Advanced.Peer.Ack.Point" up as long as
1806 * the chunk next in the out-queue space is marked as "abandoned" as
1807 * shown in the following example:
1809 * Assuming that a SACK arrived with the Cumulative TSN ACK 102
1810 * and the Advanced.Peer.Ack.Point is updated to this value:
1812 * out-queue at the end of ==> out-queue after Adv.Ack.Point
1813 * normal SACK processing local advancement
1814 * ... ...
1815 * Adv.Ack.Pt-> 102 acked 102 acked
1816 * 103 abandoned 103 abandoned
1817 * 104 abandoned Adv.Ack.P-> 104 abandoned
1818 * 105 105
1819 * 106 acked 106 acked
1820 * ... ...
1822 * In this example, the data sender successfully advanced the
1823 * "Advanced.Peer.Ack.Point" from 102 to 104 locally.
1825 list_for_each_safe(lchunk, temp, &q->abandoned) {
1826 chunk = list_entry(lchunk, struct sctp_chunk,
1827 transmitted_list);
1828 tsn = ntohl(chunk->subh.data_hdr->tsn);
1830 /* Remove any chunks in the abandoned queue that are acked by
1831 * the ctsn.
1833 if (TSN_lte(tsn, ctsn)) {
1834 list_del_init(lchunk);
1835 sctp_chunk_free(chunk);
1836 } else {
1837 if (TSN_lte(tsn, asoc->adv_peer_ack_point+1)) {
1838 asoc->adv_peer_ack_point = tsn;
1839 if (chunk->chunk_hdr->flags &
1840 SCTP_DATA_UNORDERED)
1841 continue;
1842 skip_pos = sctp_get_skip_pos(&ftsn_skip_arr[0],
1843 nskips,
1844 chunk->subh.data_hdr->stream);
1845 ftsn_skip_arr[skip_pos].stream =
1846 chunk->subh.data_hdr->stream;
1847 ftsn_skip_arr[skip_pos].ssn =
1848 chunk->subh.data_hdr->ssn;
1849 if (skip_pos == nskips)
1850 nskips++;
1851 if (nskips == 10)
1852 break;
1853 } else
1854 break;
1858 /* PR-SCTP C3) If, after step C1 and C2, the "Advanced.Peer.Ack.Point"
1859 * is greater than the Cumulative TSN ACK carried in the received
1860 * SACK, the data sender MUST send the data receiver a FORWARD TSN
1861 * chunk containing the latest value of the
1862 * "Advanced.Peer.Ack.Point".
1864 * C4) For each "abandoned" TSN the sender of the FORWARD TSN SHOULD
1865 * list each stream and sequence number in the forwarded TSN. This
1866 * information will enable the receiver to easily find any
1867 * stranded TSN's waiting on stream reorder queues. Each stream
1868 * SHOULD only be reported once; this means that if multiple
1869 * abandoned messages occur in the same stream then only the
1870 * highest abandoned stream sequence number is reported. If the
1871 * total size of the FORWARD TSN does NOT fit in a single MTU then
1872 * the sender of the FORWARD TSN SHOULD lower the
1873 * Advanced.Peer.Ack.Point to the last TSN that will fit in a
1874 * single MTU.
1876 if (asoc->adv_peer_ack_point > ctsn)
1877 ftsn_chunk = sctp_make_fwdtsn(asoc, asoc->adv_peer_ack_point,
1878 nskips, &ftsn_skip_arr[0]);
1880 if (ftsn_chunk) {
1881 list_add_tail(&ftsn_chunk->list, &q->control_chunk_list);
1882 SCTP_INC_STATS(SCTP_MIB_OUTCTRLCHUNKS);