Automatic merge of /spare/repo/netdev-2.6 branch smc91x
[linux-2.6/x86.git] / net / sctp / ulpqueue.c
blobd5dd2cf7ac4a01a6b7a13cfb417c32d5ca8bd3cb
1 /* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * The SCTP reference implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * The SCTP reference implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, write to
25 * the Free Software Foundation, 59 Temple Place - Suite 330,
26 * Boston, MA 02111-1307, USA.
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 * Or submit a bug report through the following website:
33 * http://www.sf.net/projects/lksctp
35 * Written or modified by:
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Sridhar Samudrala <sri@us.ibm.com>
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <net/sock.h>
47 #include <net/sctp/structs.h>
48 #include <net/sctp/sctp.h>
49 #include <net/sctp/sm.h>
51 /* Forward declarations for internal helpers. */
52 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 struct sctp_ulpevent *);
54 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 struct sctp_ulpevent *);
57 /* 1st Level Abstractions */
59 /* Initialize a ULP queue from a block of memory. */
60 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
61 struct sctp_association *asoc)
63 memset(ulpq, 0, sizeof(struct sctp_ulpq));
65 ulpq->asoc = asoc;
66 skb_queue_head_init(&ulpq->reasm);
67 skb_queue_head_init(&ulpq->lobby);
68 ulpq->pd_mode = 0;
69 ulpq->malloced = 0;
71 return ulpq;
75 /* Flush the reassembly and ordering queues. */
76 static void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
78 struct sk_buff *skb;
79 struct sctp_ulpevent *event;
81 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
82 event = sctp_skb2event(skb);
83 sctp_ulpevent_free(event);
86 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
87 event = sctp_skb2event(skb);
88 sctp_ulpevent_free(event);
93 /* Dispose of a ulpqueue. */
94 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
96 sctp_ulpq_flush(ulpq);
97 if (ulpq->malloced)
98 kfree(ulpq);
101 /* Process an incoming DATA chunk. */
102 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
103 int gfp)
105 struct sk_buff_head temp;
106 sctp_data_chunk_t *hdr;
107 struct sctp_ulpevent *event;
109 hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
111 /* Create an event from the incoming chunk. */
112 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
113 if (!event)
114 return -ENOMEM;
116 /* Do reassembly if needed. */
117 event = sctp_ulpq_reasm(ulpq, event);
119 /* Do ordering if needed. */
120 if ((event) && (event->msg_flags & MSG_EOR)){
121 /* Create a temporary list to collect chunks on. */
122 skb_queue_head_init(&temp);
123 __skb_queue_tail(&temp, sctp_event2skb(event));
125 event = sctp_ulpq_order(ulpq, event);
128 /* Send event to the ULP. */
129 if (event)
130 sctp_ulpq_tail_event(ulpq, event);
132 return 0;
135 /* Add a new event for propagation to the ULP. */
136 /* Clear the partial delivery mode for this socket. Note: This
137 * assumes that no association is currently in partial delivery mode.
139 int sctp_clear_pd(struct sock *sk)
141 struct sctp_sock *sp = sctp_sk(sk);
143 sp->pd_mode = 0;
144 if (!skb_queue_empty(&sp->pd_lobby)) {
145 struct list_head *list;
146 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
147 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
148 INIT_LIST_HEAD(list);
149 return 1;
151 return 0;
154 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
155 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
157 ulpq->pd_mode = 0;
158 return sctp_clear_pd(ulpq->asoc->base.sk);
163 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
165 struct sock *sk = ulpq->asoc->base.sk;
166 struct sk_buff_head *queue;
167 int clear_pd = 0;
169 /* If the socket is just going to throw this away, do not
170 * even try to deliver it.
172 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
173 goto out_free;
175 /* Check if the user wishes to receive this event. */
176 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
177 goto out_free;
179 /* If we are in partial delivery mode, post to the lobby until
180 * partial delivery is cleared, unless, of course _this_ is
181 * the association the cause of the partial delivery.
184 if (!sctp_sk(sk)->pd_mode) {
185 queue = &sk->sk_receive_queue;
186 } else if (ulpq->pd_mode) {
187 if (event->msg_flags & MSG_NOTIFICATION)
188 queue = &sctp_sk(sk)->pd_lobby;
189 else {
190 clear_pd = event->msg_flags & MSG_EOR;
191 queue = &sk->sk_receive_queue;
193 } else
194 queue = &sctp_sk(sk)->pd_lobby;
197 /* If we are harvesting multiple skbs they will be
198 * collected on a list.
200 if (sctp_event2skb(event)->list)
201 sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
202 else
203 __skb_queue_tail(queue, sctp_event2skb(event));
205 /* Did we just complete partial delivery and need to get
206 * rolling again? Move pending data to the receive
207 * queue.
209 if (clear_pd)
210 sctp_ulpq_clear_pd(ulpq);
212 if (queue == &sk->sk_receive_queue)
213 sk->sk_data_ready(sk, 0);
214 return 1;
216 out_free:
217 if (sctp_event2skb(event)->list)
218 sctp_queue_purge_ulpevents(sctp_event2skb(event)->list);
219 else
220 sctp_ulpevent_free(event);
221 return 0;
224 /* 2nd Level Abstractions */
226 /* Helper function to store chunks that need to be reassembled. */
227 static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
228 struct sctp_ulpevent *event)
230 struct sk_buff *pos;
231 struct sctp_ulpevent *cevent;
232 __u32 tsn, ctsn;
234 tsn = event->tsn;
236 /* See if it belongs at the end. */
237 pos = skb_peek_tail(&ulpq->reasm);
238 if (!pos) {
239 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
240 return;
243 /* Short circuit just dropping it at the end. */
244 cevent = sctp_skb2event(pos);
245 ctsn = cevent->tsn;
246 if (TSN_lt(ctsn, tsn)) {
247 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
248 return;
251 /* Find the right place in this list. We store them by TSN. */
252 skb_queue_walk(&ulpq->reasm, pos) {
253 cevent = sctp_skb2event(pos);
254 ctsn = cevent->tsn;
256 if (TSN_lt(tsn, ctsn))
257 break;
260 /* Insert before pos. */
261 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
265 /* Helper function to return an event corresponding to the reassembled
266 * datagram.
267 * This routine creates a re-assembled skb given the first and last skb's
268 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
269 * payload was fragmented on the way and ip had to reassemble them.
270 * We add the rest of skb's to the first skb's fraglist.
272 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
274 struct sk_buff *pos;
275 struct sctp_ulpevent *event;
276 struct sk_buff *pnext, *last;
277 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
279 /* Store the pointer to the 2nd skb */
280 if (f_frag == l_frag)
281 pos = NULL;
282 else
283 pos = f_frag->next;
285 /* Get the last skb in the f_frag's frag_list if present. */
286 for (last = list; list; last = list, list = list->next);
288 /* Add the list of remaining fragments to the first fragments
289 * frag_list.
291 if (last)
292 last->next = pos;
293 else
294 skb_shinfo(f_frag)->frag_list = pos;
296 /* Remove the first fragment from the reassembly queue. */
297 __skb_unlink(f_frag, f_frag->list);
298 while (pos) {
300 pnext = pos->next;
302 /* Update the len and data_len fields of the first fragment. */
303 f_frag->len += pos->len;
304 f_frag->data_len += pos->len;
306 /* Remove the fragment from the reassembly queue. */
307 __skb_unlink(pos, pos->list);
309 /* Break if we have reached the last fragment. */
310 if (pos == l_frag)
311 break;
312 pos->next = pnext;
313 pos = pnext;
316 event = sctp_skb2event(f_frag);
317 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
319 return event;
323 /* Helper function to check if an incoming chunk has filled up the last
324 * missing fragment in a SCTP datagram and return the corresponding event.
326 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
328 struct sk_buff *pos;
329 struct sctp_ulpevent *cevent;
330 struct sk_buff *first_frag = NULL;
331 __u32 ctsn, next_tsn;
332 struct sctp_ulpevent *retval = NULL;
334 /* Initialized to 0 just to avoid compiler warning message. Will
335 * never be used with this value. It is referenced only after it
336 * is set when we find the first fragment of a message.
338 next_tsn = 0;
340 /* The chunks are held in the reasm queue sorted by TSN.
341 * Walk through the queue sequentially and look for a sequence of
342 * fragmented chunks that complete a datagram.
343 * 'first_frag' and next_tsn are reset when we find a chunk which
344 * is the first fragment of a datagram. Once these 2 fields are set
345 * we expect to find the remaining middle fragments and the last
346 * fragment in order. If not, first_frag is reset to NULL and we
347 * start the next pass when we find another first fragment.
349 skb_queue_walk(&ulpq->reasm, pos) {
350 cevent = sctp_skb2event(pos);
351 ctsn = cevent->tsn;
353 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
354 case SCTP_DATA_FIRST_FRAG:
355 first_frag = pos;
356 next_tsn = ctsn + 1;
357 break;
359 case SCTP_DATA_MIDDLE_FRAG:
360 if ((first_frag) && (ctsn == next_tsn))
361 next_tsn++;
362 else
363 first_frag = NULL;
364 break;
366 case SCTP_DATA_LAST_FRAG:
367 if (first_frag && (ctsn == next_tsn))
368 goto found;
369 else
370 first_frag = NULL;
371 break;
375 done:
376 return retval;
377 found:
378 retval = sctp_make_reassembled_event(first_frag, pos);
379 if (retval)
380 retval->msg_flags |= MSG_EOR;
381 goto done;
384 /* Retrieve the next set of fragments of a partial message. */
385 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
387 struct sk_buff *pos, *last_frag, *first_frag;
388 struct sctp_ulpevent *cevent;
389 __u32 ctsn, next_tsn;
390 int is_last;
391 struct sctp_ulpevent *retval;
393 /* The chunks are held in the reasm queue sorted by TSN.
394 * Walk through the queue sequentially and look for the first
395 * sequence of fragmented chunks.
398 if (skb_queue_empty(&ulpq->reasm))
399 return NULL;
401 last_frag = first_frag = NULL;
402 retval = NULL;
403 next_tsn = 0;
404 is_last = 0;
406 skb_queue_walk(&ulpq->reasm, pos) {
407 cevent = sctp_skb2event(pos);
408 ctsn = cevent->tsn;
410 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
411 case SCTP_DATA_MIDDLE_FRAG:
412 if (!first_frag) {
413 first_frag = pos;
414 next_tsn = ctsn + 1;
415 last_frag = pos;
416 } else if (next_tsn == ctsn)
417 next_tsn++;
418 else
419 goto done;
420 break;
421 case SCTP_DATA_LAST_FRAG:
422 if (!first_frag)
423 first_frag = pos;
424 else if (ctsn != next_tsn)
425 goto done;
426 last_frag = pos;
427 is_last = 1;
428 goto done;
429 default:
430 return NULL;
434 /* We have the reassembled event. There is no need to look
435 * further.
437 done:
438 retval = sctp_make_reassembled_event(first_frag, last_frag);
439 if (retval && is_last)
440 retval->msg_flags |= MSG_EOR;
442 return retval;
446 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
447 * need reassembling.
449 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
450 struct sctp_ulpevent *event)
452 struct sctp_ulpevent *retval = NULL;
454 /* Check if this is part of a fragmented message. */
455 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
456 event->msg_flags |= MSG_EOR;
457 return event;
460 sctp_ulpq_store_reasm(ulpq, event);
461 if (!ulpq->pd_mode)
462 retval = sctp_ulpq_retrieve_reassembled(ulpq);
463 else {
464 __u32 ctsn, ctsnap;
466 /* Do not even bother unless this is the next tsn to
467 * be delivered.
469 ctsn = event->tsn;
470 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
471 if (TSN_lte(ctsn, ctsnap))
472 retval = sctp_ulpq_retrieve_partial(ulpq);
475 return retval;
478 /* Retrieve the first part (sequential fragments) for partial delivery. */
479 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
481 struct sk_buff *pos, *last_frag, *first_frag;
482 struct sctp_ulpevent *cevent;
483 __u32 ctsn, next_tsn;
484 struct sctp_ulpevent *retval;
486 /* The chunks are held in the reasm queue sorted by TSN.
487 * Walk through the queue sequentially and look for a sequence of
488 * fragmented chunks that start a datagram.
491 if (skb_queue_empty(&ulpq->reasm))
492 return NULL;
494 last_frag = first_frag = NULL;
495 retval = NULL;
496 next_tsn = 0;
498 skb_queue_walk(&ulpq->reasm, pos) {
499 cevent = sctp_skb2event(pos);
500 ctsn = cevent->tsn;
502 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
503 case SCTP_DATA_FIRST_FRAG:
504 if (!first_frag) {
505 first_frag = pos;
506 next_tsn = ctsn + 1;
507 last_frag = pos;
508 } else
509 goto done;
510 break;
512 case SCTP_DATA_MIDDLE_FRAG:
513 if (!first_frag)
514 return NULL;
515 if (ctsn == next_tsn) {
516 next_tsn++;
517 last_frag = pos;
518 } else
519 goto done;
520 break;
521 default:
522 return NULL;
526 /* We have the reassembled event. There is no need to look
527 * further.
529 done:
530 retval = sctp_make_reassembled_event(first_frag, last_frag);
531 return retval;
534 /* Helper function to gather skbs that have possibly become
535 * ordered by an an incoming chunk.
537 static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
538 struct sctp_ulpevent *event)
540 struct sk_buff *pos, *tmp;
541 struct sctp_ulpevent *cevent;
542 struct sctp_stream *in;
543 __u16 sid, csid;
544 __u16 ssn, cssn;
546 sid = event->stream;
547 ssn = event->ssn;
548 in = &ulpq->asoc->ssnmap->in;
550 /* We are holding the chunks by stream, by SSN. */
551 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
552 cevent = (struct sctp_ulpevent *) pos->cb;
553 csid = cevent->stream;
554 cssn = cevent->ssn;
556 /* Have we gone too far? */
557 if (csid > sid)
558 break;
560 /* Have we not gone far enough? */
561 if (csid < sid)
562 continue;
564 if (cssn != sctp_ssn_peek(in, sid))
565 break;
567 /* Found it, so mark in the ssnmap. */
568 sctp_ssn_next(in, sid);
570 __skb_unlink(pos, pos->list);
572 /* Attach all gathered skbs to the event. */
573 __skb_queue_tail(sctp_event2skb(event)->list, pos);
577 /* Helper function to store chunks needing ordering. */
578 static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
579 struct sctp_ulpevent *event)
581 struct sk_buff *pos;
582 struct sctp_ulpevent *cevent;
583 __u16 sid, csid;
584 __u16 ssn, cssn;
586 pos = skb_peek_tail(&ulpq->lobby);
587 if (!pos) {
588 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
589 return;
592 sid = event->stream;
593 ssn = event->ssn;
595 cevent = (struct sctp_ulpevent *) pos->cb;
596 csid = cevent->stream;
597 cssn = cevent->ssn;
598 if (sid > csid) {
599 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
600 return;
603 if ((sid == csid) && SSN_lt(cssn, ssn)) {
604 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
605 return;
608 /* Find the right place in this list. We store them by
609 * stream ID and then by SSN.
611 skb_queue_walk(&ulpq->lobby, pos) {
612 cevent = (struct sctp_ulpevent *) pos->cb;
613 csid = cevent->stream;
614 cssn = cevent->ssn;
616 if (csid > sid)
617 break;
618 if (csid == sid && SSN_lt(ssn, cssn))
619 break;
623 /* Insert before pos. */
624 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
628 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
629 struct sctp_ulpevent *event)
631 __u16 sid, ssn;
632 struct sctp_stream *in;
634 /* Check if this message needs ordering. */
635 if (SCTP_DATA_UNORDERED & event->msg_flags)
636 return event;
638 /* Note: The stream ID must be verified before this routine. */
639 sid = event->stream;
640 ssn = event->ssn;
641 in = &ulpq->asoc->ssnmap->in;
643 /* Is this the expected SSN for this stream ID? */
644 if (ssn != sctp_ssn_peek(in, sid)) {
645 /* We've received something out of order, so find where it
646 * needs to be placed. We order by stream and then by SSN.
648 sctp_ulpq_store_ordered(ulpq, event);
649 return NULL;
652 /* Mark that the next chunk has been found. */
653 sctp_ssn_next(in, sid);
655 /* Go find any other chunks that were waiting for
656 * ordering.
658 sctp_ulpq_retrieve_ordered(ulpq, event);
660 return event;
663 /* Helper function to gather skbs that have possibly become
664 * ordered by forward tsn skipping their dependencies.
666 static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
668 struct sk_buff *pos, *tmp;
669 struct sctp_ulpevent *cevent;
670 struct sctp_ulpevent *event = NULL;
671 struct sctp_stream *in;
672 struct sk_buff_head temp;
673 __u16 csid, cssn;
675 in = &ulpq->asoc->ssnmap->in;
677 /* We are holding the chunks by stream, by SSN. */
678 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
679 cevent = (struct sctp_ulpevent *) pos->cb;
680 csid = cevent->stream;
681 cssn = cevent->ssn;
683 if (cssn != sctp_ssn_peek(in, csid))
684 break;
686 /* Found it, so mark in the ssnmap. */
687 sctp_ssn_next(in, csid);
689 __skb_unlink(pos, pos->list);
690 if (!event) {
691 /* Create a temporary list to collect chunks on. */
692 event = sctp_skb2event(pos);
693 skb_queue_head_init(&temp);
694 __skb_queue_tail(&temp, sctp_event2skb(event));
695 } else {
696 /* Attach all gathered skbs to the event. */
697 __skb_queue_tail(sctp_event2skb(event)->list, pos);
701 /* Send event to the ULP. */
702 if (event)
703 sctp_ulpq_tail_event(ulpq, event);
706 /* Skip over an SSN. */
707 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
709 struct sctp_stream *in;
711 /* Note: The stream ID must be verified before this routine. */
712 in = &ulpq->asoc->ssnmap->in;
714 /* Is this an old SSN? If so ignore. */
715 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
716 return;
718 /* Mark that we are no longer expecting this SSN or lower. */
719 sctp_ssn_skip(in, sid, ssn);
721 /* Go find any other chunks that were waiting for
722 * ordering and deliver them if needed.
724 sctp_ulpq_reap_ordered(ulpq);
725 return;
728 /* Renege 'needed' bytes from the ordering queue. */
729 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
731 __u16 freed = 0;
732 __u32 tsn;
733 struct sk_buff *skb;
734 struct sctp_ulpevent *event;
735 struct sctp_tsnmap *tsnmap;
737 tsnmap = &ulpq->asoc->peer.tsn_map;
739 while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
740 freed += skb_headlen(skb);
741 event = sctp_skb2event(skb);
742 tsn = event->tsn;
744 sctp_ulpevent_free(event);
745 sctp_tsnmap_renege(tsnmap, tsn);
746 if (freed >= needed)
747 return freed;
750 return freed;
753 /* Renege 'needed' bytes from the reassembly queue. */
754 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
756 __u16 freed = 0;
757 __u32 tsn;
758 struct sk_buff *skb;
759 struct sctp_ulpevent *event;
760 struct sctp_tsnmap *tsnmap;
762 tsnmap = &ulpq->asoc->peer.tsn_map;
764 /* Walk backwards through the list, reneges the newest tsns. */
765 while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
766 freed += skb_headlen(skb);
767 event = sctp_skb2event(skb);
768 tsn = event->tsn;
770 sctp_ulpevent_free(event);
771 sctp_tsnmap_renege(tsnmap, tsn);
772 if (freed >= needed)
773 return freed;
776 return freed;
779 /* Partial deliver the first message as there is pressure on rwnd. */
780 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
781 struct sctp_chunk *chunk, int gfp)
783 struct sctp_ulpevent *event;
784 struct sctp_association *asoc;
786 asoc = ulpq->asoc;
788 /* Are we already in partial delivery mode? */
789 if (!sctp_sk(asoc->base.sk)->pd_mode) {
791 /* Is partial delivery possible? */
792 event = sctp_ulpq_retrieve_first(ulpq);
793 /* Send event to the ULP. */
794 if (event) {
795 sctp_ulpq_tail_event(ulpq, event);
796 sctp_sk(asoc->base.sk)->pd_mode = 1;
797 ulpq->pd_mode = 1;
798 return;
803 /* Renege some packets to make room for an incoming chunk. */
804 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
805 int gfp)
807 struct sctp_association *asoc;
808 __u16 needed, freed;
810 asoc = ulpq->asoc;
812 if (chunk) {
813 needed = ntohs(chunk->chunk_hdr->length);
814 needed -= sizeof(sctp_data_chunk_t);
815 } else
816 needed = SCTP_DEFAULT_MAXWINDOW;
818 freed = 0;
820 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
821 freed = sctp_ulpq_renege_order(ulpq, needed);
822 if (freed < needed) {
823 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
826 /* If able to free enough room, accept this chunk. */
827 if (chunk && (freed >= needed)) {
828 __u32 tsn;
829 tsn = ntohl(chunk->subh.data_hdr->tsn);
830 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
831 sctp_ulpq_tail_data(ulpq, chunk, gfp);
833 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
836 return;
841 /* Notify the application if an association is aborted and in
842 * partial delivery mode. Send up any pending received messages.
844 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int gfp)
846 struct sctp_ulpevent *ev = NULL;
847 struct sock *sk;
849 if (!ulpq->pd_mode)
850 return;
852 sk = ulpq->asoc->base.sk;
853 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
854 &sctp_sk(sk)->subscribe))
855 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
856 SCTP_PARTIAL_DELIVERY_ABORTED,
857 gfp);
858 if (ev)
859 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
861 /* If there is data waiting, send it up the socket now. */
862 if (sctp_ulpq_clear_pd(ulpq) || ev)
863 sk->sk_data_ready(sk, 0);