[NET_SCHED]: sch_htb: use hrtimer based watchdog
[linux-2.6.22.y-op.git] / net / sctp / ulpqueue.c
blobb29e3e4b72c922e10be26207b6a8c102029d9de8
1 /* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * The SCTP reference implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * The SCTP reference implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, write to
25 * the Free Software Foundation, 59 Temple Place - Suite 330,
26 * Boston, MA 02111-1307, USA.
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 * Or submit a bug report through the following website:
33 * http://www.sf.net/projects/lksctp
35 * Written or modified by:
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Sridhar Samudrala <sri@us.ibm.com>
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <net/sock.h>
47 #include <net/sctp/structs.h>
48 #include <net/sctp/sctp.h>
49 #include <net/sctp/sm.h>
51 /* Forward declarations for internal helpers. */
52 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 struct sctp_ulpevent *);
54 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 struct sctp_ulpevent *);
57 /* 1st Level Abstractions */
59 /* Initialize a ULP queue from a block of memory. */
60 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
61 struct sctp_association *asoc)
63 memset(ulpq, 0, sizeof(struct sctp_ulpq));
65 ulpq->asoc = asoc;
66 skb_queue_head_init(&ulpq->reasm);
67 skb_queue_head_init(&ulpq->lobby);
68 ulpq->pd_mode = 0;
69 ulpq->malloced = 0;
71 return ulpq;
75 /* Flush the reassembly and ordering queues. */
76 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
78 struct sk_buff *skb;
79 struct sctp_ulpevent *event;
81 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
82 event = sctp_skb2event(skb);
83 sctp_ulpevent_free(event);
86 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
87 event = sctp_skb2event(skb);
88 sctp_ulpevent_free(event);
93 /* Dispose of a ulpqueue. */
94 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
96 sctp_ulpq_flush(ulpq);
97 if (ulpq->malloced)
98 kfree(ulpq);
101 /* Process an incoming DATA chunk. */
102 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
103 gfp_t gfp)
105 struct sk_buff_head temp;
106 sctp_data_chunk_t *hdr;
107 struct sctp_ulpevent *event;
109 hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
111 /* Create an event from the incoming chunk. */
112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
113 if (!event)
114 return -ENOMEM;
116 /* Do reassembly if needed. */
117 event = sctp_ulpq_reasm(ulpq, event);
119 /* Do ordering if needed. */
120 if ((event) && (event->msg_flags & MSG_EOR)){
121 /* Create a temporary list to collect chunks on. */
122 skb_queue_head_init(&temp);
123 __skb_queue_tail(&temp, sctp_event2skb(event));
125 event = sctp_ulpq_order(ulpq, event);
128 /* Send event to the ULP. 'event' is the sctp_ulpevent for
129 * very first SKB on the 'temp' list.
131 if (event)
132 sctp_ulpq_tail_event(ulpq, event);
134 return 0;
137 /* Add a new event for propagation to the ULP. */
138 /* Clear the partial delivery mode for this socket. Note: This
139 * assumes that no association is currently in partial delivery mode.
141 int sctp_clear_pd(struct sock *sk)
143 struct sctp_sock *sp = sctp_sk(sk);
145 sp->pd_mode = 0;
146 if (!skb_queue_empty(&sp->pd_lobby)) {
147 struct list_head *list;
148 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
149 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
150 INIT_LIST_HEAD(list);
151 return 1;
153 return 0;
156 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
157 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
159 ulpq->pd_mode = 0;
160 return sctp_clear_pd(ulpq->asoc->base.sk);
163 /* If the SKB of 'event' is on a list, it is the first such member
164 * of that list.
166 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
168 struct sock *sk = ulpq->asoc->base.sk;
169 struct sk_buff_head *queue, *skb_list;
170 struct sk_buff *skb = sctp_event2skb(event);
171 int clear_pd = 0;
173 skb_list = (struct sk_buff_head *) skb->prev;
175 /* If the socket is just going to throw this away, do not
176 * even try to deliver it.
178 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
179 goto out_free;
181 /* Check if the user wishes to receive this event. */
182 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
183 goto out_free;
185 /* If we are in partial delivery mode, post to the lobby until
186 * partial delivery is cleared, unless, of course _this_ is
187 * the association the cause of the partial delivery.
190 if (!sctp_sk(sk)->pd_mode) {
191 queue = &sk->sk_receive_queue;
192 } else if (ulpq->pd_mode) {
193 /* If the association is in partial delivery, we
194 * need to finish delivering the partially processed
195 * packet before passing any other data. This is
196 * because we don't truly support stream interleaving.
198 if ((event->msg_flags & MSG_NOTIFICATION) ||
199 (SCTP_DATA_NOT_FRAG ==
200 (event->msg_flags & SCTP_DATA_FRAG_MASK)))
201 queue = &sctp_sk(sk)->pd_lobby;
202 else {
203 clear_pd = event->msg_flags & MSG_EOR;
204 queue = &sk->sk_receive_queue;
206 } else
207 queue = &sctp_sk(sk)->pd_lobby;
210 /* If we are harvesting multiple skbs they will be
211 * collected on a list.
213 if (skb_list)
214 sctp_skb_list_tail(skb_list, queue);
215 else
216 __skb_queue_tail(queue, skb);
218 /* Did we just complete partial delivery and need to get
219 * rolling again? Move pending data to the receive
220 * queue.
222 if (clear_pd)
223 sctp_ulpq_clear_pd(ulpq);
225 if (queue == &sk->sk_receive_queue)
226 sk->sk_data_ready(sk, 0);
227 return 1;
229 out_free:
230 if (skb_list)
231 sctp_queue_purge_ulpevents(skb_list);
232 else
233 sctp_ulpevent_free(event);
235 return 0;
238 /* 2nd Level Abstractions */
240 /* Helper function to store chunks that need to be reassembled. */
241 static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
242 struct sctp_ulpevent *event)
244 struct sk_buff *pos;
245 struct sctp_ulpevent *cevent;
246 __u32 tsn, ctsn;
248 tsn = event->tsn;
250 /* See if it belongs at the end. */
251 pos = skb_peek_tail(&ulpq->reasm);
252 if (!pos) {
253 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
254 return;
257 /* Short circuit just dropping it at the end. */
258 cevent = sctp_skb2event(pos);
259 ctsn = cevent->tsn;
260 if (TSN_lt(ctsn, tsn)) {
261 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
262 return;
265 /* Find the right place in this list. We store them by TSN. */
266 skb_queue_walk(&ulpq->reasm, pos) {
267 cevent = sctp_skb2event(pos);
268 ctsn = cevent->tsn;
270 if (TSN_lt(tsn, ctsn))
271 break;
274 /* Insert before pos. */
275 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
279 /* Helper function to return an event corresponding to the reassembled
280 * datagram.
281 * This routine creates a re-assembled skb given the first and last skb's
282 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
283 * payload was fragmented on the way and ip had to reassemble them.
284 * We add the rest of skb's to the first skb's fraglist.
286 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff_head *queue, struct sk_buff *f_frag, struct sk_buff *l_frag)
288 struct sk_buff *pos;
289 struct sk_buff *new = NULL;
290 struct sctp_ulpevent *event;
291 struct sk_buff *pnext, *last;
292 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
294 /* Store the pointer to the 2nd skb */
295 if (f_frag == l_frag)
296 pos = NULL;
297 else
298 pos = f_frag->next;
300 /* Get the last skb in the f_frag's frag_list if present. */
301 for (last = list; list; last = list, list = list->next);
303 /* Add the list of remaining fragments to the first fragments
304 * frag_list.
306 if (last)
307 last->next = pos;
308 else {
309 if (skb_cloned(f_frag)) {
310 /* This is a cloned skb, we can't just modify
311 * the frag_list. We need a new skb to do that.
312 * Instead of calling skb_unshare(), we'll do it
313 * ourselves since we need to delay the free.
315 new = skb_copy(f_frag, GFP_ATOMIC);
316 if (!new)
317 return NULL; /* try again later */
319 sctp_skb_set_owner_r(new, f_frag->sk);
321 skb_shinfo(new)->frag_list = pos;
322 } else
323 skb_shinfo(f_frag)->frag_list = pos;
326 /* Remove the first fragment from the reassembly queue. */
327 __skb_unlink(f_frag, queue);
329 /* if we did unshare, then free the old skb and re-assign */
330 if (new) {
331 kfree_skb(f_frag);
332 f_frag = new;
335 while (pos) {
337 pnext = pos->next;
339 /* Update the len and data_len fields of the first fragment. */
340 f_frag->len += pos->len;
341 f_frag->data_len += pos->len;
343 /* Remove the fragment from the reassembly queue. */
344 __skb_unlink(pos, queue);
346 /* Break if we have reached the last fragment. */
347 if (pos == l_frag)
348 break;
349 pos->next = pnext;
350 pos = pnext;
353 event = sctp_skb2event(f_frag);
354 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
356 return event;
360 /* Helper function to check if an incoming chunk has filled up the last
361 * missing fragment in a SCTP datagram and return the corresponding event.
363 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
365 struct sk_buff *pos;
366 struct sctp_ulpevent *cevent;
367 struct sk_buff *first_frag = NULL;
368 __u32 ctsn, next_tsn;
369 struct sctp_ulpevent *retval = NULL;
371 /* Initialized to 0 just to avoid compiler warning message. Will
372 * never be used with this value. It is referenced only after it
373 * is set when we find the first fragment of a message.
375 next_tsn = 0;
377 /* The chunks are held in the reasm queue sorted by TSN.
378 * Walk through the queue sequentially and look for a sequence of
379 * fragmented chunks that complete a datagram.
380 * 'first_frag' and next_tsn are reset when we find a chunk which
381 * is the first fragment of a datagram. Once these 2 fields are set
382 * we expect to find the remaining middle fragments and the last
383 * fragment in order. If not, first_frag is reset to NULL and we
384 * start the next pass when we find another first fragment.
386 skb_queue_walk(&ulpq->reasm, pos) {
387 cevent = sctp_skb2event(pos);
388 ctsn = cevent->tsn;
390 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
391 case SCTP_DATA_FIRST_FRAG:
392 first_frag = pos;
393 next_tsn = ctsn + 1;
394 break;
396 case SCTP_DATA_MIDDLE_FRAG:
397 if ((first_frag) && (ctsn == next_tsn))
398 next_tsn++;
399 else
400 first_frag = NULL;
401 break;
403 case SCTP_DATA_LAST_FRAG:
404 if (first_frag && (ctsn == next_tsn))
405 goto found;
406 else
407 first_frag = NULL;
408 break;
412 done:
413 return retval;
414 found:
415 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, pos);
416 if (retval)
417 retval->msg_flags |= MSG_EOR;
418 goto done;
421 /* Retrieve the next set of fragments of a partial message. */
422 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
424 struct sk_buff *pos, *last_frag, *first_frag;
425 struct sctp_ulpevent *cevent;
426 __u32 ctsn, next_tsn;
427 int is_last;
428 struct sctp_ulpevent *retval;
430 /* The chunks are held in the reasm queue sorted by TSN.
431 * Walk through the queue sequentially and look for the first
432 * sequence of fragmented chunks.
435 if (skb_queue_empty(&ulpq->reasm))
436 return NULL;
438 last_frag = first_frag = NULL;
439 retval = NULL;
440 next_tsn = 0;
441 is_last = 0;
443 skb_queue_walk(&ulpq->reasm, pos) {
444 cevent = sctp_skb2event(pos);
445 ctsn = cevent->tsn;
447 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
448 case SCTP_DATA_MIDDLE_FRAG:
449 if (!first_frag) {
450 first_frag = pos;
451 next_tsn = ctsn + 1;
452 last_frag = pos;
453 } else if (next_tsn == ctsn)
454 next_tsn++;
455 else
456 goto done;
457 break;
458 case SCTP_DATA_LAST_FRAG:
459 if (!first_frag)
460 first_frag = pos;
461 else if (ctsn != next_tsn)
462 goto done;
463 last_frag = pos;
464 is_last = 1;
465 goto done;
466 default:
467 return NULL;
471 /* We have the reassembled event. There is no need to look
472 * further.
474 done:
475 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
476 if (retval && is_last)
477 retval->msg_flags |= MSG_EOR;
479 return retval;
483 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
484 * need reassembling.
486 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
487 struct sctp_ulpevent *event)
489 struct sctp_ulpevent *retval = NULL;
491 /* Check if this is part of a fragmented message. */
492 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
493 event->msg_flags |= MSG_EOR;
494 return event;
497 sctp_ulpq_store_reasm(ulpq, event);
498 if (!ulpq->pd_mode)
499 retval = sctp_ulpq_retrieve_reassembled(ulpq);
500 else {
501 __u32 ctsn, ctsnap;
503 /* Do not even bother unless this is the next tsn to
504 * be delivered.
506 ctsn = event->tsn;
507 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
508 if (TSN_lte(ctsn, ctsnap))
509 retval = sctp_ulpq_retrieve_partial(ulpq);
512 return retval;
515 /* Retrieve the first part (sequential fragments) for partial delivery. */
516 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
518 struct sk_buff *pos, *last_frag, *first_frag;
519 struct sctp_ulpevent *cevent;
520 __u32 ctsn, next_tsn;
521 struct sctp_ulpevent *retval;
523 /* The chunks are held in the reasm queue sorted by TSN.
524 * Walk through the queue sequentially and look for a sequence of
525 * fragmented chunks that start a datagram.
528 if (skb_queue_empty(&ulpq->reasm))
529 return NULL;
531 last_frag = first_frag = NULL;
532 retval = NULL;
533 next_tsn = 0;
535 skb_queue_walk(&ulpq->reasm, pos) {
536 cevent = sctp_skb2event(pos);
537 ctsn = cevent->tsn;
539 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
540 case SCTP_DATA_FIRST_FRAG:
541 if (!first_frag) {
542 first_frag = pos;
543 next_tsn = ctsn + 1;
544 last_frag = pos;
545 } else
546 goto done;
547 break;
549 case SCTP_DATA_MIDDLE_FRAG:
550 if (!first_frag)
551 return NULL;
552 if (ctsn == next_tsn) {
553 next_tsn++;
554 last_frag = pos;
555 } else
556 goto done;
557 break;
558 default:
559 return NULL;
563 /* We have the reassembled event. There is no need to look
564 * further.
566 done:
567 retval = sctp_make_reassembled_event(&ulpq->reasm, first_frag, last_frag);
568 return retval;
571 /* Helper function to gather skbs that have possibly become
572 * ordered by an an incoming chunk.
574 static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
575 struct sctp_ulpevent *event)
577 struct sk_buff_head *event_list;
578 struct sk_buff *pos, *tmp;
579 struct sctp_ulpevent *cevent;
580 struct sctp_stream *in;
581 __u16 sid, csid;
582 __u16 ssn, cssn;
584 sid = event->stream;
585 ssn = event->ssn;
586 in = &ulpq->asoc->ssnmap->in;
588 event_list = (struct sk_buff_head *) sctp_event2skb(event)->prev;
590 /* We are holding the chunks by stream, by SSN. */
591 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
592 cevent = (struct sctp_ulpevent *) pos->cb;
593 csid = cevent->stream;
594 cssn = cevent->ssn;
596 /* Have we gone too far? */
597 if (csid > sid)
598 break;
600 /* Have we not gone far enough? */
601 if (csid < sid)
602 continue;
604 if (cssn != sctp_ssn_peek(in, sid))
605 break;
607 /* Found it, so mark in the ssnmap. */
608 sctp_ssn_next(in, sid);
610 __skb_unlink(pos, &ulpq->lobby);
612 /* Attach all gathered skbs to the event. */
613 __skb_queue_tail(event_list, pos);
617 /* Helper function to store chunks needing ordering. */
618 static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
619 struct sctp_ulpevent *event)
621 struct sk_buff *pos;
622 struct sctp_ulpevent *cevent;
623 __u16 sid, csid;
624 __u16 ssn, cssn;
626 pos = skb_peek_tail(&ulpq->lobby);
627 if (!pos) {
628 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
629 return;
632 sid = event->stream;
633 ssn = event->ssn;
635 cevent = (struct sctp_ulpevent *) pos->cb;
636 csid = cevent->stream;
637 cssn = cevent->ssn;
638 if (sid > csid) {
639 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
640 return;
643 if ((sid == csid) && SSN_lt(cssn, ssn)) {
644 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
645 return;
648 /* Find the right place in this list. We store them by
649 * stream ID and then by SSN.
651 skb_queue_walk(&ulpq->lobby, pos) {
652 cevent = (struct sctp_ulpevent *) pos->cb;
653 csid = cevent->stream;
654 cssn = cevent->ssn;
656 if (csid > sid)
657 break;
658 if (csid == sid && SSN_lt(ssn, cssn))
659 break;
663 /* Insert before pos. */
664 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
668 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
669 struct sctp_ulpevent *event)
671 __u16 sid, ssn;
672 struct sctp_stream *in;
674 /* Check if this message needs ordering. */
675 if (SCTP_DATA_UNORDERED & event->msg_flags)
676 return event;
678 /* Note: The stream ID must be verified before this routine. */
679 sid = event->stream;
680 ssn = event->ssn;
681 in = &ulpq->asoc->ssnmap->in;
683 /* Is this the expected SSN for this stream ID? */
684 if (ssn != sctp_ssn_peek(in, sid)) {
685 /* We've received something out of order, so find where it
686 * needs to be placed. We order by stream and then by SSN.
688 sctp_ulpq_store_ordered(ulpq, event);
689 return NULL;
692 /* Mark that the next chunk has been found. */
693 sctp_ssn_next(in, sid);
695 /* Go find any other chunks that were waiting for
696 * ordering.
698 sctp_ulpq_retrieve_ordered(ulpq, event);
700 return event;
703 /* Helper function to gather skbs that have possibly become
704 * ordered by forward tsn skipping their dependencies.
706 static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
708 struct sk_buff *pos, *tmp;
709 struct sctp_ulpevent *cevent;
710 struct sctp_ulpevent *event;
711 struct sctp_stream *in;
712 struct sk_buff_head temp;
713 __u16 csid, cssn;
715 in = &ulpq->asoc->ssnmap->in;
717 /* We are holding the chunks by stream, by SSN. */
718 skb_queue_head_init(&temp);
719 event = NULL;
720 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
721 cevent = (struct sctp_ulpevent *) pos->cb;
722 csid = cevent->stream;
723 cssn = cevent->ssn;
725 if (cssn != sctp_ssn_peek(in, csid))
726 break;
728 /* Found it, so mark in the ssnmap. */
729 sctp_ssn_next(in, csid);
731 __skb_unlink(pos, &ulpq->lobby);
732 if (!event) {
733 /* Create a temporary list to collect chunks on. */
734 event = sctp_skb2event(pos);
735 __skb_queue_tail(&temp, sctp_event2skb(event));
736 } else {
737 /* Attach all gathered skbs to the event. */
738 __skb_queue_tail(&temp, pos);
742 /* Send event to the ULP. 'event' is the sctp_ulpevent for
743 * very first SKB on the 'temp' list.
745 if (event)
746 sctp_ulpq_tail_event(ulpq, event);
749 /* Skip over an SSN. */
750 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
752 struct sctp_stream *in;
754 /* Note: The stream ID must be verified before this routine. */
755 in = &ulpq->asoc->ssnmap->in;
757 /* Is this an old SSN? If so ignore. */
758 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
759 return;
761 /* Mark that we are no longer expecting this SSN or lower. */
762 sctp_ssn_skip(in, sid, ssn);
764 /* Go find any other chunks that were waiting for
765 * ordering and deliver them if needed.
767 sctp_ulpq_reap_ordered(ulpq);
768 return;
771 /* Renege 'needed' bytes from the ordering queue. */
772 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
774 __u16 freed = 0;
775 __u32 tsn;
776 struct sk_buff *skb;
777 struct sctp_ulpevent *event;
778 struct sctp_tsnmap *tsnmap;
780 tsnmap = &ulpq->asoc->peer.tsn_map;
782 while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
783 freed += skb_headlen(skb);
784 event = sctp_skb2event(skb);
785 tsn = event->tsn;
787 sctp_ulpevent_free(event);
788 sctp_tsnmap_renege(tsnmap, tsn);
789 if (freed >= needed)
790 return freed;
793 return freed;
796 /* Renege 'needed' bytes from the reassembly queue. */
797 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
799 __u16 freed = 0;
800 __u32 tsn;
801 struct sk_buff *skb;
802 struct sctp_ulpevent *event;
803 struct sctp_tsnmap *tsnmap;
805 tsnmap = &ulpq->asoc->peer.tsn_map;
807 /* Walk backwards through the list, reneges the newest tsns. */
808 while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
809 freed += skb_headlen(skb);
810 event = sctp_skb2event(skb);
811 tsn = event->tsn;
813 sctp_ulpevent_free(event);
814 sctp_tsnmap_renege(tsnmap, tsn);
815 if (freed >= needed)
816 return freed;
819 return freed;
822 /* Partial deliver the first message as there is pressure on rwnd. */
823 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
824 struct sctp_chunk *chunk,
825 gfp_t gfp)
827 struct sctp_ulpevent *event;
828 struct sctp_association *asoc;
830 asoc = ulpq->asoc;
832 /* Are we already in partial delivery mode? */
833 if (!sctp_sk(asoc->base.sk)->pd_mode) {
835 /* Is partial delivery possible? */
836 event = sctp_ulpq_retrieve_first(ulpq);
837 /* Send event to the ULP. */
838 if (event) {
839 sctp_ulpq_tail_event(ulpq, event);
840 sctp_sk(asoc->base.sk)->pd_mode = 1;
841 ulpq->pd_mode = 1;
842 return;
847 /* Renege some packets to make room for an incoming chunk. */
848 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
849 gfp_t gfp)
851 struct sctp_association *asoc;
852 __u16 needed, freed;
854 asoc = ulpq->asoc;
856 if (chunk) {
857 needed = ntohs(chunk->chunk_hdr->length);
858 needed -= sizeof(sctp_data_chunk_t);
859 } else
860 needed = SCTP_DEFAULT_MAXWINDOW;
862 freed = 0;
864 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
865 freed = sctp_ulpq_renege_order(ulpq, needed);
866 if (freed < needed) {
867 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
870 /* If able to free enough room, accept this chunk. */
871 if (chunk && (freed >= needed)) {
872 __u32 tsn;
873 tsn = ntohl(chunk->subh.data_hdr->tsn);
874 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
875 sctp_ulpq_tail_data(ulpq, chunk, gfp);
877 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
880 return;
885 /* Notify the application if an association is aborted and in
886 * partial delivery mode. Send up any pending received messages.
888 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, gfp_t gfp)
890 struct sctp_ulpevent *ev = NULL;
891 struct sock *sk;
893 if (!ulpq->pd_mode)
894 return;
896 sk = ulpq->asoc->base.sk;
897 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
898 &sctp_sk(sk)->subscribe))
899 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
900 SCTP_PARTIAL_DELIVERY_ABORTED,
901 gfp);
902 if (ev)
903 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
905 /* If there is data waiting, send it up the socket now. */
906 if (sctp_ulpq_clear_pd(ulpq) || ev)
907 sk->sk_data_ready(sk, 0);