ath9k_hw: fix dual band assumption for XB113
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sctp / ulpqueue.c
blobc7f7e49609cbf4e1732a99b51686f73f03616f45
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * This SCTP implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * This SCTP implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, write to
25 * the Free Software Foundation, 59 Temple Place - Suite 330,
26 * Boston, MA 02111-1307, USA.
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 * Or submit a bug report through the following website:
33 * http://www.sf.net/projects/lksctp
35 * Written or modified by:
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Sridhar Samudrala <sri@us.ibm.com>
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
44 #include <linux/slab.h>
45 #include <linux/types.h>
46 #include <linux/skbuff.h>
47 #include <net/sock.h>
48 #include <net/sctp/structs.h>
49 #include <net/sctp/sctp.h>
50 #include <net/sctp/sm.h>
52 /* Forward declarations for internal helpers. */
53 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
54 struct sctp_ulpevent *);
55 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
56 struct sctp_ulpevent *);
57 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq);
59 /* 1st Level Abstractions */
61 /* Initialize a ULP queue from a block of memory. */
62 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
63 struct sctp_association *asoc)
65 memset(ulpq, 0, sizeof(struct sctp_ulpq));
67 ulpq->asoc = asoc;
68 skb_queue_head_init(&ulpq->reasm);
69 skb_queue_head_init(&ulpq->lobby);
70 ulpq->pd_mode = 0;
71 ulpq->malloced = 0;
73 return ulpq;
77 /* Flush the reassembly and ordering queues. */
78 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
80 struct sk_buff *skb;
81 struct sctp_ulpevent *event;
83 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
84 event = sctp_skb2event(skb);
85 sctp_ulpevent_free(event);
88 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
89 event = sctp_skb2event(skb);
90 sctp_ulpevent_free(event);
95 /* Dispose of a ulpqueue. */
96 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
98 sctp_ulpq_flush(ulpq);
99 if (ulpq->malloced)
100 kfree(ulpq);
103 /* Process an incoming DATA chunk. */
104 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
105 gfp_t gfp)
107 struct sk_buff_head temp;
108 sctp_data_chunk_t *hdr;
109 struct sctp_ulpevent *event;
111 hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
113 /* Create an event from the incoming chunk. */
114 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
115 if (!event)
116 return -ENOMEM;
118 /* Do reassembly if needed. */
119 event = sctp_ulpq_reasm(ulpq, event);
121 /* Do ordering if needed. */
122 if ((event) && (event->msg_flags & MSG_EOR)){
123 /* Create a temporary list to collect chunks on. */
124 skb_queue_head_init(&temp);
125 __skb_queue_tail(&temp, sctp_event2skb(event));
127 event = sctp_ulpq_order(ulpq, event);
130 /* Send event to the ULP. 'event' is the sctp_ulpevent for
131 * very first SKB on the 'temp' list.
133 if (event)
134 sctp_ulpq_tail_event(ulpq, event);
136 return 0;
139 /* Add a new event for propagation to the ULP. */
140 /* Clear the partial delivery mode for this socket. Note: This
141 * assumes that no association is currently in partial delivery mode.
143 int sctp_clear_pd(struct sock *sk, struct sctp_association *asoc)
145 struct sctp_sock *sp = sctp_sk(sk);
147 if (atomic_dec_and_test(&sp->pd_mode)) {
148 /* This means there are no other associations in PD, so
149 * we can go ahead and clear out the lobby in one shot
151 if (!skb_queue_empty(&sp->pd_lobby)) {
152 struct list_head *list;
153 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
154 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
155 INIT_LIST_HEAD(list);
156 return 1;
158 } else {
159 /* There are other associations in PD, so we only need to
160 * pull stuff out of the lobby that belongs to the
161 * associations that is exiting PD (all of its notifications
162 * are posted here).
164 if (!skb_queue_empty(&sp->pd_lobby) && asoc) {
165 struct sk_buff *skb, *tmp;
166 struct sctp_ulpevent *event;
168 sctp_skb_for_each(skb, &sp->pd_lobby, tmp) {
169 event = sctp_skb2event(skb);
170 if (event->asoc == asoc) {
171 __skb_unlink(skb, &sp->pd_lobby);
172 __skb_queue_tail(&sk->sk_receive_queue,
173 skb);
179 return 0;
182 /* Set the pd_mode on the socket and ulpq */
183 static void sctp_ulpq_set_pd(struct sctp_ulpq *ulpq)
185 struct sctp_sock *sp = sctp_sk(ulpq->asoc->base.sk);
187 atomic_inc(&sp->pd_mode);
188 ulpq->pd_mode = 1;
191 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
192 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
194 ulpq->pd_mode = 0;
195 sctp_ulpq_reasm_drain(ulpq);
196 return sctp_clear_pd(ulpq->asoc->base.sk, ulpq->asoc);
199 /* If the SKB of 'event' is on a list, it is the first such member
200 * of that list.
202 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
204 struct sock *sk = ulpq->asoc->base.sk;
205 struct sk_buff_head *queue, *skb_list;
206 struct sk_buff *skb = sctp_event2skb(event);
207 int clear_pd = 0;
209 skb_list = (struct sk_buff_head *) skb->prev;
211 /* If the socket is just going to throw this away, do not
212 * even try to deliver it.
214 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
215 goto out_free;
217 /* Check if the user wishes to receive this event. */
218 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
219 goto out_free;
221 /* If we are in partial delivery mode, post to the lobby until
222 * partial delivery is cleared, unless, of course _this_ is
223 * the association the cause of the partial delivery.
226 if (atomic_read(&sctp_sk(sk)->pd_mode) == 0) {
227 queue = &sk->sk_receive_queue;
228 } else {
229 if (ulpq->pd_mode) {
230 /* If the association is in partial delivery, we
231 * need to finish delivering the partially processed
232 * packet before passing any other data. This is
233 * because we don't truly support stream interleaving.
235 if ((event->msg_flags & MSG_NOTIFICATION) ||
236 (SCTP_DATA_NOT_FRAG ==
237 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
238 queue = &sctp_sk(sk)->pd_lobby;
239 else {
240 clear_pd = event->msg_flags & MSG_EOR;
241 queue = &sk->sk_receive_queue;
243 } else {
245 * If fragment interleave is enabled, we
246 * can queue this to the recieve queue instead
247 * of the lobby.
249 if (sctp_sk(sk)->frag_interleave)
250 queue = &sk->sk_receive_queue;
251 else
252 queue = &sctp_sk(sk)->pd_lobby;
256 /* If we are harvesting multiple skbs they will be
257 * collected on a list.
259 if (skb_list)
260 sctp_skb_list_tail(skb_list, queue);
261 else
262 __skb_queue_tail(queue, skb);
264 /* Did we just complete partial delivery and need to get
265 * rolling again? Move pending data to the receive
266 * queue.
268 if (clear_pd)
269 sctp_ulpq_clear_pd(ulpq);
271 if (queue == &sk->sk_receive_queue)
272 sk->sk_data_ready(sk, 0);
273 return 1;
275 out_free:
276 if (skb_list)
277 sctp_queue_purge_ulpevents(skb_list);
278 else
279 sctp_ulpevent_free(event);
281 return 0;
284 /* 2nd Level Abstractions */
286 /* Helper function to store chunks that need to be reassembled. */
287 static void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
288 struct sctp_ulpevent *event)
290 struct sk_buff *pos;
291 struct sctp_ulpevent *cevent;
292 __u32 tsn, ctsn;
294 tsn = event->tsn;
296 /* See if it belongs at the end. */
297 pos = skb_peek_tail(&ulpq->reasm);
298 if (!pos) {
299 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
300 return;
303 /* Short circuit just dropping it at the end. */
304 cevent = sctp_skb2event(pos);
305 ctsn = cevent->tsn;
306 if (TSN_lt(ctsn, tsn)) {
307 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
308 return;
311 /* Find the right place in this list. We store them by TSN. */
312 skb_queue_walk(&ulpq->reasm, pos) {
313 cevent = sctp_skb2event(pos);
314 ctsn = cevent->tsn;
316 if (TSN_lt(tsn, ctsn))
317 break;
320 /* Insert before pos. */
321 __skb_queue_before(&ulpq->reasm, pos, sctp_event2skb(event));
325 /* Helper function to return an event corresponding to the reassembled
326 * datagram.
327 * This routine creates a re-assembled skb given the first and last skb's
328 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
329 * payload was fragmented on the way and ip had to reassemble them.
330 * We add the rest of skb's to the first skb's fraglist.
332 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
334 struct sk_buff *pos;
335 struct sk_buff *new = NULL;
336 struct sctp_ulpevent *event;
337 struct sk_buff *pnext, *last;
338 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
340 /* Store the pointer to the 2nd skb */
341 if (f_frag == l_frag)
342 pos = NULL;
343 else
344 pos = f_frag->next;
346 /* Get the last skb in the f_frag's frag_list if present. */
347 for (last = list; list; last = list, list = list->next);
349 /* Add the list of remaining fragments to the first fragments
350 * frag_list.
352 if (last)
353 last->next = pos;
354 else {
355 if (skb_cloned(f_frag)) {
356 /* This is a cloned skb, we can't just modify
357 * the frag_list. We need a new skb to do that.
358 * Instead of calling skb_unshare(), we'll do it
359 * ourselves since we need to delay the free.
361 new = skb_copy(f_frag, GFP_ATOMIC);
362 if (!new)
363 return NULL; /* try again later */
365 sctp_skb_set_owner_r(new, f_frag->sk);
367 skb_shinfo(new)->frag_list = pos;
368 } else
369 skb_shinfo(f_frag)->frag_list = pos;
372 /* Remove the first fragment from the reassembly queue. */
373 __skb_unlink(f_frag, queue);
375 /* if we did unshare, then free the old skb and re-assign */
376 if (new) {
377 kfree_skb(f_frag);
378 f_frag = new;
381 while (pos) {
383 pnext = pos->next;
385 /* Update the len and data_len fields of the first fragment. */
386 f_frag->len += pos->len;
387 f_frag->data_len += pos->len;
389 /* Remove the fragment from the reassembly queue. */
390 __skb_unlink(pos, queue);
392 /* Break if we have reached the last fragment. */
393 if (pos == l_frag)
394 break;
395 pos->next = pnext;
396 pos = pnext;
399 event = sctp_skb2event(f_frag);
400 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
402 return event;
406 /* Helper function to check if an incoming chunk has filled up the last
407 * missing fragment in a SCTP datagram and return the corresponding event.
409 static struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
411 struct sk_buff *pos;
412 struct sctp_ulpevent *cevent;
413 struct sk_buff *first_frag = NULL;
414 __u32 ctsn, next_tsn;
415 struct sctp_ulpevent *retval = NULL;
416 struct sk_buff *pd_first = NULL;
417 struct sk_buff *pd_last = NULL;
418 size_t pd_len = 0;
419 struct sctp_association *asoc;
420 u32 pd_point;
422 /* Initialized to 0 just to avoid compiler warning message. Will
423 * never be used with this value. It is referenced only after it
424 * is set when we find the first fragment of a message.
426 next_tsn = 0;
428 /* The chunks are held in the reasm queue sorted by TSN.
429 * Walk through the queue sequentially and look for a sequence of
430 * fragmented chunks that complete a datagram.
431 * 'first_frag' and next_tsn are reset when we find a chunk which
432 * is the first fragment of a datagram. Once these 2 fields are set
433 * we expect to find the remaining middle fragments and the last
434 * fragment in order. If not, first_frag is reset to NULL and we
435 * start the next pass when we find another first fragment.
437 * There is a potential to do partial delivery if user sets
438 * SCTP_PARTIAL_DELIVERY_POINT option. Lets count some things here
439 * to see if can do PD.
441 skb_queue_walk(&ulpq->reasm, pos) {
442 cevent = sctp_skb2event(pos);
443 ctsn = cevent->tsn;
445 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
446 case SCTP_DATA_FIRST_FRAG:
447 /* If this "FIRST_FRAG" is the first
448 * element in the queue, then count it towards
449 * possible PD.
451 if (pos == ulpq->reasm.next) {
452 pd_first = pos;
453 pd_last = pos;
454 pd_len = pos->len;
455 } else {
456 pd_first = NULL;
457 pd_last = NULL;
458 pd_len = 0;
461 first_frag = pos;
462 next_tsn = ctsn + 1;
463 break;
465 case SCTP_DATA_MIDDLE_FRAG:
466 if ((first_frag) && (ctsn == next_tsn)) {
467 next_tsn++;
468 if (pd_first) {
469 pd_last = pos;
470 pd_len += pos->len;
472 } else
473 first_frag = NULL;
474 break;
476 case SCTP_DATA_LAST_FRAG:
477 if (first_frag && (ctsn == next_tsn))
478 goto found;
479 else
480 first_frag = NULL;
481 break;
485 asoc = ulpq->asoc;
486 if (pd_first) {
487 /* Make sure we can enter partial deliver.
488 * We can trigger partial delivery only if framgent
489 * interleave is set, or the socket is not already
490 * in partial delivery.
492 if (!sctp_sk(asoc->base.sk)->frag_interleave &&
493 atomic_read(&sctp_sk(asoc->base.sk)->pd_mode))
494 goto done;
496 cevent = sctp_skb2event(pd_first);
497 pd_point = sctp_sk(asoc->base.sk)->pd_point;
498 if (pd_point && pd_point <= pd_len) {
499 retval = sctp_make_reassembled_event(&ulpq->reasm,
500 pd_first,
501 pd_last);
502 if (retval)
503 sctp_ulpq_set_pd(ulpq);
506 done:
507 return retval;
508 found:
509 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
510 if (retval)
511 retval->msg_flags |= MSG_EOR;
512 goto done;
515 /* Retrieve the next set of fragments of a partial message. */
516 static struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
518 struct sk_buff *pos, *last_frag, *first_frag;
519 struct sctp_ulpevent *cevent;
520 __u32 ctsn, next_tsn;
521 int is_last;
522 struct sctp_ulpevent *retval;
524 /* The chunks are held in the reasm queue sorted by TSN.
525 * Walk through the queue sequentially and look for the first
526 * sequence of fragmented chunks.
529 if (skb_queue_empty(&ulpq->reasm))
530 return NULL;
532 last_frag = first_frag = NULL;
533 retval = NULL;
534 next_tsn = 0;
535 is_last = 0;
537 skb_queue_walk(&ulpq->reasm, pos) {
538 cevent = sctp_skb2event(pos);
539 ctsn = cevent->tsn;
541 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
542 case SCTP_DATA_MIDDLE_FRAG:
543 if (!first_frag) {
544 first_frag = pos;
545 next_tsn = ctsn + 1;
546 last_frag = pos;
547 } else if (next_tsn == ctsn)
548 next_tsn++;
549 else
550 goto done;
551 break;
552 case SCTP_DATA_LAST_FRAG:
553 if (!first_frag)
554 first_frag = pos;
555 else if (ctsn != next_tsn)
556 goto done;
557 last_frag = pos;
558 is_last = 1;
559 goto done;
560 default:
561 return NULL;
565 /* We have the reassembled event. There is no need to look
566 * further.
568 done:
569 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
570 if (retval && is_last)
571 retval->msg_flags |= MSG_EOR;
573 return retval;
577 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
578 * need reassembling.
580 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
581 struct sctp_ulpevent *event)
583 struct sctp_ulpevent *retval = NULL;
585 /* Check if this is part of a fragmented message. */
586 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
587 event->msg_flags |= MSG_EOR;
588 return event;
591 sctp_ulpq_store_reasm(ulpq, event);
592 if (!ulpq->pd_mode)
593 retval = sctp_ulpq_retrieve_reassembled(ulpq);
594 else {
595 __u32 ctsn, ctsnap;
597 /* Do not even bother unless this is the next tsn to
598 * be delivered.
600 ctsn = event->tsn;
601 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
602 if (TSN_lte(ctsn, ctsnap))
603 retval = sctp_ulpq_retrieve_partial(ulpq);
606 return retval;
609 /* Retrieve the first part (sequential fragments) for partial delivery. */
610 static struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
612 struct sk_buff *pos, *last_frag, *first_frag;
613 struct sctp_ulpevent *cevent;
614 __u32 ctsn, next_tsn;
615 struct sctp_ulpevent *retval;
617 /* The chunks are held in the reasm queue sorted by TSN.
618 * Walk through the queue sequentially and look for a sequence of
619 * fragmented chunks that start a datagram.
622 if (skb_queue_empty(&ulpq->reasm))
623 return NULL;
625 last_frag = first_frag = NULL;
626 retval = NULL;
627 next_tsn = 0;
629 skb_queue_walk(&ulpq->reasm, pos) {
630 cevent = sctp_skb2event(pos);
631 ctsn = cevent->tsn;
633 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
634 case SCTP_DATA_FIRST_FRAG:
635 if (!first_frag) {
636 first_frag = pos;
637 next_tsn = ctsn + 1;
638 last_frag = pos;
639 } else
640 goto done;
641 break;
643 case SCTP_DATA_MIDDLE_FRAG:
644 if (!first_frag)
645 return NULL;
646 if (ctsn == next_tsn) {
647 next_tsn++;
648 last_frag = pos;
649 } else
650 goto done;
651 break;
652 default:
653 return NULL;
657 /* We have the reassembled event. There is no need to look
658 * further.
660 done:
661 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
662 return retval;
666 * Flush out stale fragments from the reassembly queue when processing
667 * a Forward TSN.
669 * RFC 3758, Section 3.6
671 * After receiving and processing a FORWARD TSN, the data receiver MUST
672 * take cautions in updating its re-assembly queue. The receiver MUST
673 * remove any partially reassembled message, which is still missing one
674 * or more TSNs earlier than or equal to the new cumulative TSN point.
675 * In the event that the receiver has invoked the partial delivery API,
676 * a notification SHOULD also be generated to inform the upper layer API
677 * that the message being partially delivered will NOT be completed.
679 void sctp_ulpq_reasm_flushtsn(struct sctp_ulpq *ulpq, __u32 fwd_tsn)
681 struct sk_buff *pos, *tmp;
682 struct sctp_ulpevent *event;
683 __u32 tsn;
685 if (skb_queue_empty(&ulpq->reasm))
686 return;
688 skb_queue_walk_safe(&ulpq->reasm, pos, tmp) {
689 event = sctp_skb2event(pos);
690 tsn = event->tsn;
692 /* Since the entire message must be abandoned by the
693 * sender (item A3 in Section 3.5, RFC 3758), we can
694 * free all fragments on the list that are less then
695 * or equal to ctsn_point
697 if (TSN_lte(tsn, fwd_tsn)) {
698 __skb_unlink(pos, &ulpq->reasm);
699 sctp_ulpevent_free(event);
700 } else
701 break;
706 * Drain the reassembly queue. If we just cleared parted delivery, it
707 * is possible that the reassembly queue will contain already reassembled
708 * messages. Retrieve any such messages and give them to the user.
710 static void sctp_ulpq_reasm_drain(struct sctp_ulpq *ulpq)
712 struct sctp_ulpevent *event = NULL;
713 struct sk_buff_head temp;
715 if (skb_queue_empty(&ulpq->reasm))
716 return;
718 while ((event = sctp_ulpq_retrieve_reassembled(ulpq)) != NULL) {
719 /* Do ordering if needed. */
720 if ((event) && (event->msg_flags & MSG_EOR)){
721 skb_queue_head_init(&temp);
722 __skb_queue_tail(&temp, sctp_event2skb(event));
724 event = sctp_ulpq_order(ulpq, event);
727 /* Send event to the ULP. 'event' is the
728 * sctp_ulpevent for very first SKB on the temp' list.
730 if (event)
731 sctp_ulpq_tail_event(ulpq, event);
736 /* Helper function to gather skbs that have possibly become
737 * ordered by an an incoming chunk.
739 static void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
740 struct sctp_ulpevent *event)
742 struct sk_buff_head *event_list;
743 struct sk_buff *pos, *tmp;
744 struct sctp_ulpevent *cevent;
745 struct sctp_stream *in;
746 __u16 sid, csid;
747 __u16 ssn, cssn;
749 sid = event->stream;
750 ssn = event->ssn;
751 in = &ulpq->asoc->ssnmap->in;
753 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
755 /* We are holding the chunks by stream, by SSN. */
756 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
757 cevent = (struct sctp_ulpevent *) pos->cb;
758 csid = cevent->stream;
759 cssn = cevent->ssn;
761 /* Have we gone too far? */
762 if (csid > sid)
763 break;
765 /* Have we not gone far enough? */
766 if (csid < sid)
767 continue;
769 if (cssn != sctp_ssn_peek(in, sid))
770 break;
772 /* Found it, so mark in the ssnmap. */
773 sctp_ssn_next(in, sid);
775 __skb_unlink(pos, &ulpq->lobby);
777 /* Attach all gathered skbs to the event. */
778 __skb_queue_tail(event_list, pos);
782 /* Helper function to store chunks needing ordering. */
783 static void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
784 struct sctp_ulpevent *event)
786 struct sk_buff *pos;
787 struct sctp_ulpevent *cevent;
788 __u16 sid, csid;
789 __u16 ssn, cssn;
791 pos = skb_peek_tail(&ulpq->lobby);
792 if (!pos) {
793 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
794 return;
797 sid = event->stream;
798 ssn = event->ssn;
800 cevent = (struct sctp_ulpevent *) pos->cb;
801 csid = cevent->stream;
802 cssn = cevent->ssn;
803 if (sid > csid) {
804 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
805 return;
808 if ((sid == csid) && SSN_lt(cssn, ssn)) {
809 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
810 return;
813 /* Find the right place in this list. We store them by
814 * stream ID and then by SSN.
816 skb_queue_walk(&ulpq->lobby, pos) {
817 cevent = (struct sctp_ulpevent *) pos->cb;
818 csid = cevent->stream;
819 cssn = cevent->ssn;
821 if (csid > sid)
822 break;
823 if (csid == sid && SSN_lt(ssn, cssn))
824 break;
828 /* Insert before pos. */
829 __skb_queue_before(&ulpq->lobby, pos, sctp_event2skb(event));
832 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
833 struct sctp_ulpevent *event)
835 __u16 sid, ssn;
836 struct sctp_stream *in;
838 /* Check if this message needs ordering. */
839 if (SCTP_DATA_UNORDERED & event->msg_flags)
840 return event;
842 /* Note: The stream ID must be verified before this routine. */
843 sid = event->stream;
844 ssn = event->ssn;
845 in = &ulpq->asoc->ssnmap->in;
847 /* Is this the expected SSN for this stream ID? */
848 if (ssn != sctp_ssn_peek(in, sid)) {
849 /* We've received something out of order, so find where it
850 * needs to be placed. We order by stream and then by SSN.
852 sctp_ulpq_store_ordered(ulpq, event);
853 return NULL;
856 /* Mark that the next chunk has been found. */
857 sctp_ssn_next(in, sid);
859 /* Go find any other chunks that were waiting for
860 * ordering.
862 sctp_ulpq_retrieve_ordered(ulpq, event);
864 return event;
867 /* Helper function to gather skbs that have possibly become
868 * ordered by forward tsn skipping their dependencies.
870 static void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq, __u16 sid)
872 struct sk_buff *pos, *tmp;
873 struct sctp_ulpevent *cevent;
874 struct sctp_ulpevent *event;
875 struct sctp_stream *in;
876 struct sk_buff_head temp;
877 struct sk_buff_head *lobby = &ulpq->lobby;
878 __u16 csid, cssn;
880 in = &ulpq->asoc->ssnmap->in;
882 /* We are holding the chunks by stream, by SSN. */
883 skb_queue_head_init(&temp);
884 event = NULL;
885 sctp_skb_for_each(pos, lobby, tmp) {
886 cevent = (struct sctp_ulpevent *) pos->cb;
887 csid = cevent->stream;
888 cssn = cevent->ssn;
890 /* Have we gone too far? */
891 if (csid > sid)
892 break;
894 /* Have we not gone far enough? */
895 if (csid < sid)
896 continue;
898 /* see if this ssn has been marked by skipping */
899 if (!SSN_lt(cssn, sctp_ssn_peek(in, csid)))
900 break;
902 __skb_unlink(pos, lobby);
903 if (!event)
904 /* Create a temporary list to collect chunks on. */
905 event = sctp_skb2event(pos);
907 /* Attach all gathered skbs to the event. */
908 __skb_queue_tail(&temp, pos);
911 /* If we didn't reap any data, see if the next expected SSN
912 * is next on the queue and if so, use that.
914 if (event == NULL && pos != (struct sk_buff *)lobby) {
915 cevent = (struct sctp_ulpevent *) pos->cb;
916 csid = cevent->stream;
917 cssn = cevent->ssn;
919 if (csid == sid && cssn == sctp_ssn_peek(in, csid)) {
920 sctp_ssn_next(in, csid);
921 __skb_unlink(pos, lobby);
922 __skb_queue_tail(&temp, pos);
923 event = sctp_skb2event(pos);
927 /* Send event to the ULP. 'event' is the sctp_ulpevent for
928 * very first SKB on the 'temp' list.
930 if (event) {
931 /* see if we have more ordered that we can deliver */
932 sctp_ulpq_retrieve_ordered(ulpq, event);
933 sctp_ulpq_tail_event(ulpq, event);
937 /* Skip over an SSN. This is used during the processing of
938 * Forwared TSN chunk to skip over the abandoned ordered data
940 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
942 struct sctp_stream *in;
944 /* Note: The stream ID must be verified before this routine. */
945 in = &ulpq->asoc->ssnmap->in;
947 /* Is this an old SSN? If so ignore. */
948 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
949 return;
951 /* Mark that we are no longer expecting this SSN or lower. */
952 sctp_ssn_skip(in, sid, ssn);
954 /* Go find any other chunks that were waiting for
955 * ordering and deliver them if needed.
957 sctp_ulpq_reap_ordered(ulpq, sid);
960 static __u16 sctp_ulpq_renege_list(struct sctp_ulpq *ulpq,
961 struct sk_buff_head *list, __u16 needed)
963 __u16 freed = 0;
964 __u32 tsn;
965 struct sk_buff *skb;
966 struct sctp_ulpevent *event;
967 struct sctp_tsnmap *tsnmap;
969 tsnmap = &ulpq->asoc->peer.tsn_map;
971 while ((skb = __skb_dequeue_tail(list)) != NULL) {
972 freed += skb_headlen(skb);
973 event = sctp_skb2event(skb);
974 tsn = event->tsn;
976 sctp_ulpevent_free(event);
977 sctp_tsnmap_renege(tsnmap, tsn);
978 if (freed >= needed)
979 return freed;
982 return freed;
985 /* Renege 'needed' bytes from the ordering queue. */
986 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
988 return sctp_ulpq_renege_list(ulpq, &ulpq->lobby, needed);
991 /* Renege 'needed' bytes from the reassembly queue. */
992 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
994 return sctp_ulpq_renege_list(ulpq, &ulpq->reasm, needed);
997 /* Partial deliver the first message as there is pressure on rwnd. */
998 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
999 struct sctp_chunk *chunk,
1000 gfp_t gfp)
1002 struct sctp_ulpevent *event;
1003 struct sctp_association *asoc;
1004 struct sctp_sock *sp;
1006 asoc = ulpq->asoc;
1007 sp = sctp_sk(asoc->base.sk);
1009 /* If the association is already in Partial Delivery mode
1010 * we have noting to do.
1012 if (ulpq->pd_mode)
1013 return;
1015 /* If the user enabled fragment interleave socket option,
1016 * multiple associations can enter partial delivery.
1017 * Otherwise, we can only enter partial delivery if the
1018 * socket is not in partial deliver mode.
1020 if (sp->frag_interleave || atomic_read(&sp->pd_mode) == 0) {
1021 /* Is partial delivery possible? */
1022 event = sctp_ulpq_retrieve_first(ulpq);
1023 /* Send event to the ULP. */
1024 if (event) {
1025 sctp_ulpq_tail_event(ulpq, event);
1026 sctp_ulpq_set_pd(ulpq);
1027 return;
1032 /* Renege some packets to make room for an incoming chunk. */
1033 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
1034 gfp_t gfp)
1036 struct sctp_association *asoc;
1037 __u16 needed, freed;
1039 asoc = ulpq->asoc;
1041 if (chunk) {
1042 needed = ntohs(chunk->chunk_hdr->length);
1043 needed -= sizeof(sctp_data_chunk_t);
1044 } else
1045 needed = SCTP_DEFAULT_MAXWINDOW;
1047 freed = 0;
1049 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
1050 freed = sctp_ulpq_renege_order(ulpq, needed);
1051 if (freed < needed) {
1052 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
1055 /* If able to free enough room, accept this chunk. */
1056 if (chunk && (freed >= needed)) {
1057 __u32 tsn;
1058 tsn = ntohl(chunk->subh.data_hdr->tsn);
1059 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
1060 sctp_ulpq_tail_data(ulpq, chunk, gfp);
1062 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
1065 sk_mem_reclaim(asoc->base.sk);
1070 /* Notify the application if an association is aborted and in
1071 * partial delivery mode. Send up any pending received messages.
1073 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
1075 struct sctp_ulpevent *ev = NULL;
1076 struct sock *sk;
1078 if (!ulpq->pd_mode)
1079 return;
1081 sk = ulpq->asoc->base.sk;
1082 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
1083 &sctp_sk(sk)->subscribe))
1084 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
1085 SCTP_PARTIAL_DELIVERY_ABORTED,
1086 gfp);
1087 if (ev)
1088 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
1090 /* If there is data waiting, send it up the socket now. */
1091 if (sctp_ulpq_clear_pd(ulpq) || ev)
1092 sk->sk_data_ready(sk, 0);