initial commit with v2.6.9
[linux-2.6.9-moxart.git] / net / sctp / ulpqueue.c
blob350a97ec7c622de056749073d6ebbb9f9c6fa7fa
1 /* SCTP kernel reference Implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
5 * Copyright (c) 2001 Intel Corp.
6 * Copyright (c) 2001 Nokia, Inc.
7 * Copyright (c) 2001 La Monte H.P. Yarroll
9 * This abstraction carries sctp events to the ULP (sockets).
11 * The SCTP reference implementation is free software;
12 * you can redistribute it and/or modify it under the terms of
13 * the GNU General Public License as published by
14 * the Free Software Foundation; either version 2, or (at your option)
15 * any later version.
17 * The SCTP reference implementation is distributed in the hope that it
18 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
19 * ************************
20 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
21 * See the GNU General Public License for more details.
23 * You should have received a copy of the GNU General Public License
24 * along with GNU CC; see the file COPYING. If not, write to
25 * the Free Software Foundation, 59 Temple Place - Suite 330,
26 * Boston, MA 02111-1307, USA.
28 * Please send any bug reports or fixes you make to the
29 * email address(es):
30 * lksctp developers <lksctp-developers@lists.sourceforge.net>
32 * Or submit a bug report through the following website:
33 * http://www.sf.net/projects/lksctp
35 * Written or modified by:
36 * Jon Grimm <jgrimm@us.ibm.com>
37 * La Monte H.P. Yarroll <piggy@acm.org>
38 * Sridhar Samudrala <sri@us.ibm.com>
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
44 #include <linux/types.h>
45 #include <linux/skbuff.h>
46 #include <net/sock.h>
47 #include <net/sctp/structs.h>
48 #include <net/sctp/sctp.h>
49 #include <net/sctp/sm.h>
51 /* Forward declarations for internal helpers. */
52 static struct sctp_ulpevent * sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
53 struct sctp_ulpevent *);
54 static struct sctp_ulpevent * sctp_ulpq_order(struct sctp_ulpq *,
55 struct sctp_ulpevent *);
57 /* 1st Level Abstractions */
59 /* Create a new ULP queue. */
60 struct sctp_ulpq *sctp_ulpq_new(struct sctp_association *asoc, int gfp)
62 struct sctp_ulpq *ulpq;
64 ulpq = kmalloc(sizeof(struct sctp_ulpq), gfp);
65 if (!ulpq)
66 goto fail;
67 if (!sctp_ulpq_init(ulpq, asoc))
68 goto fail_init;
69 ulpq->malloced = 1;
70 return ulpq;
72 fail_init:
73 kfree(ulpq);
74 fail:
75 return NULL;
78 /* Initialize a ULP queue from a block of memory. */
79 struct sctp_ulpq *sctp_ulpq_init(struct sctp_ulpq *ulpq,
80 struct sctp_association *asoc)
82 memset(ulpq, 0, sizeof(struct sctp_ulpq));
84 ulpq->asoc = asoc;
85 skb_queue_head_init(&ulpq->reasm);
86 skb_queue_head_init(&ulpq->lobby);
87 ulpq->pd_mode = 0;
88 ulpq->malloced = 0;
90 return ulpq;
94 /* Flush the reassembly and ordering queues. */
95 void sctp_ulpq_flush(struct sctp_ulpq *ulpq)
97 struct sk_buff *skb;
98 struct sctp_ulpevent *event;
100 while ((skb = __skb_dequeue(&ulpq->lobby)) != NULL) {
101 event = sctp_skb2event(skb);
102 sctp_ulpevent_free(event);
105 while ((skb = __skb_dequeue(&ulpq->reasm)) != NULL) {
106 event = sctp_skb2event(skb);
107 sctp_ulpevent_free(event);
112 /* Dispose of a ulpqueue. */
113 void sctp_ulpq_free(struct sctp_ulpq *ulpq)
115 sctp_ulpq_flush(ulpq);
116 if (ulpq->malloced)
117 kfree(ulpq);
120 /* Process an incoming DATA chunk. */
121 int sctp_ulpq_tail_data(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
122 int gfp)
124 struct sk_buff_head temp;
125 sctp_data_chunk_t *hdr;
126 struct sctp_ulpevent *event;
128 hdr = (sctp_data_chunk_t *) chunk->chunk_hdr;
130 /* Create an event from the incoming chunk. */
131 event = sctp_ulpevent_make_rcvmsg(chunk->asoc, chunk, gfp);
132 if (!event)
133 return -ENOMEM;
135 /* Do reassembly if needed. */
136 event = sctp_ulpq_reasm(ulpq, event);
138 /* Do ordering if needed. */
139 if ((event) && (event->msg_flags & MSG_EOR)){
140 /* Create a temporary list to collect chunks on. */
141 skb_queue_head_init(&temp);
142 __skb_queue_tail(&temp, sctp_event2skb(event));
144 event = sctp_ulpq_order(ulpq, event);
147 /* Send event to the ULP. */
148 if (event)
149 sctp_ulpq_tail_event(ulpq, event);
151 return 0;
154 /* Add a new event for propagation to the ULP. */
155 /* Clear the partial delivery mode for this socket. Note: This
156 * assumes that no association is currently in partial delivery mode.
158 int sctp_clear_pd(struct sock *sk)
160 struct sctp_opt *sp;
161 sp = sctp_sk(sk);
163 sp->pd_mode = 0;
164 if (!skb_queue_empty(&sp->pd_lobby)) {
165 struct list_head *list;
166 sctp_skb_list_tail(&sp->pd_lobby, &sk->sk_receive_queue);
167 list = (struct list_head *)&sctp_sk(sk)->pd_lobby;
168 INIT_LIST_HEAD(list);
169 return 1;
171 return 0;
174 /* Clear the pd_mode and restart any pending messages waiting for delivery. */
175 static int sctp_ulpq_clear_pd(struct sctp_ulpq *ulpq)
177 ulpq->pd_mode = 0;
178 return sctp_clear_pd(ulpq->asoc->base.sk);
183 int sctp_ulpq_tail_event(struct sctp_ulpq *ulpq, struct sctp_ulpevent *event)
185 struct sock *sk = ulpq->asoc->base.sk;
186 struct sk_buff_head *queue;
187 int clear_pd = 0;
189 /* If the socket is just going to throw this away, do not
190 * even try to deliver it.
192 if (sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN))
193 goto out_free;
195 /* Check if the user wishes to receive this event. */
196 if (!sctp_ulpevent_is_enabled(event, &sctp_sk(sk)->subscribe))
197 goto out_free;
199 /* If we are in partial delivery mode, post to the lobby until
200 * partial delivery is cleared, unless, of course _this_ is
201 * the association the cause of the partial delivery.
204 if (!sctp_sk(sk)->pd_mode) {
205 queue = &sk->sk_receive_queue;
206 } else if (ulpq->pd_mode) {
207 if (event->msg_flags & MSG_NOTIFICATION)
208 queue = &sctp_sk(sk)->pd_lobby;
209 else {
210 clear_pd = event->msg_flags & MSG_EOR;
211 queue = &sk->sk_receive_queue;
213 } else
214 queue = &sctp_sk(sk)->pd_lobby;
217 /* If we are harvesting multiple skbs they will be
218 * collected on a list.
220 if (sctp_event2skb(event)->list)
221 sctp_skb_list_tail(sctp_event2skb(event)->list, queue);
222 else
223 __skb_queue_tail(queue, sctp_event2skb(event));
225 /* Did we just complete partial delivery and need to get
226 * rolling again? Move pending data to the receive
227 * queue.
229 if (clear_pd)
230 sctp_ulpq_clear_pd(ulpq);
232 if (queue == &sk->sk_receive_queue)
233 sk->sk_data_ready(sk, 0);
234 return 1;
236 out_free:
237 if (sctp_event2skb(event)->list)
238 sctp_queue_purge_ulpevents(sctp_event2skb(event)->list);
239 else
240 sctp_ulpevent_free(event);
241 return 0;
244 /* 2nd Level Abstractions */
246 /* Helper function to store chunks that need to be reassembled. */
247 static inline void sctp_ulpq_store_reasm(struct sctp_ulpq *ulpq,
248 struct sctp_ulpevent *event)
250 struct sk_buff *pos;
251 struct sctp_ulpevent *cevent;
252 __u32 tsn, ctsn;
254 tsn = event->tsn;
256 /* See if it belongs at the end. */
257 pos = skb_peek_tail(&ulpq->reasm);
258 if (!pos) {
259 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
260 return;
263 /* Short circuit just dropping it at the end. */
264 cevent = sctp_skb2event(pos);
265 ctsn = cevent->tsn;
266 if (TSN_lt(ctsn, tsn)) {
267 __skb_queue_tail(&ulpq->reasm, sctp_event2skb(event));
268 return;
271 /* Find the right place in this list. We store them by TSN. */
272 skb_queue_walk(&ulpq->reasm, pos) {
273 cevent = sctp_skb2event(pos);
274 ctsn = cevent->tsn;
276 if (TSN_lt(tsn, ctsn))
277 break;
280 /* Insert before pos. */
281 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->reasm);
285 /* Helper function to return an event corresponding to the reassembled
286 * datagram.
287 * This routine creates a re-assembled skb given the first and last skb's
288 * as stored in the reassembly queue. The skb's may be non-linear if the sctp
289 * payload was fragmented on the way and ip had to reassemble them.
290 * We add the rest of skb's to the first skb's fraglist.
292 static struct sctp_ulpevent *sctp_make_reassembled_event(struct sk_buff *f_frag, struct sk_buff *l_frag)
294 struct sk_buff *pos;
295 struct sctp_ulpevent *event;
296 struct sk_buff *pnext, *last;
297 struct sk_buff *list = skb_shinfo(f_frag)->frag_list;
299 /* Store the pointer to the 2nd skb */
300 if (f_frag == l_frag)
301 pos = NULL;
302 else
303 pos = f_frag->next;
305 /* Get the last skb in the f_frag's frag_list if present. */
306 for (last = list; list; last = list, list = list->next);
308 /* Add the list of remaining fragments to the first fragments
309 * frag_list.
311 if (last)
312 last->next = pos;
313 else
314 skb_shinfo(f_frag)->frag_list = pos;
316 /* Remove the first fragment from the reassembly queue. */
317 __skb_unlink(f_frag, f_frag->list);
318 while (pos) {
320 pnext = pos->next;
322 /* Update the len and data_len fields of the first fragment. */
323 f_frag->len += pos->len;
324 f_frag->data_len += pos->len;
326 /* Remove the fragment from the reassembly queue. */
327 __skb_unlink(pos, pos->list);
329 /* Break if we have reached the last fragment. */
330 if (pos == l_frag)
331 break;
332 pos->next = pnext;
333 pos = pnext;
336 event = sctp_skb2event(f_frag);
337 SCTP_INC_STATS(SCTP_MIB_REASMUSRMSGS);
339 return event;
343 /* Helper function to check if an incoming chunk has filled up the last
344 * missing fragment in a SCTP datagram and return the corresponding event.
346 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_reassembled(struct sctp_ulpq *ulpq)
348 struct sk_buff *pos;
349 struct sctp_ulpevent *cevent;
350 struct sk_buff *first_frag = NULL;
351 __u32 ctsn, next_tsn;
352 struct sctp_ulpevent *retval = NULL;
354 /* Initialized to 0 just to avoid compiler warning message. Will
355 * never be used with this value. It is referenced only after it
356 * is set when we find the first fragment of a message.
358 next_tsn = 0;
360 /* The chunks are held in the reasm queue sorted by TSN.
361 * Walk through the queue sequentially and look for a sequence of
362 * fragmented chunks that complete a datagram.
363 * 'first_frag' and next_tsn are reset when we find a chunk which
364 * is the first fragment of a datagram. Once these 2 fields are set
365 * we expect to find the remaining middle fragments and the last
366 * fragment in order. If not, first_frag is reset to NULL and we
367 * start the next pass when we find another first fragment.
369 skb_queue_walk(&ulpq->reasm, pos) {
370 cevent = sctp_skb2event(pos);
371 ctsn = cevent->tsn;
373 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
374 case SCTP_DATA_FIRST_FRAG:
375 first_frag = pos;
376 next_tsn = ctsn + 1;
377 break;
379 case SCTP_DATA_MIDDLE_FRAG:
380 if ((first_frag) && (ctsn == next_tsn))
381 next_tsn++;
382 else
383 first_frag = NULL;
384 break;
386 case SCTP_DATA_LAST_FRAG:
387 if (first_frag && (ctsn == next_tsn))
388 goto found;
389 else
390 first_frag = NULL;
391 break;
395 done:
396 return retval;
397 found:
398 retval = sctp_make_reassembled_event(first_frag, pos);
399 if (retval)
400 retval->msg_flags |= MSG_EOR;
401 goto done;
404 /* Retrieve the next set of fragments of a partial message. */
405 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_partial(struct sctp_ulpq *ulpq)
407 struct sk_buff *pos, *last_frag, *first_frag;
408 struct sctp_ulpevent *cevent;
409 __u32 ctsn, next_tsn;
410 int is_last;
411 struct sctp_ulpevent *retval;
413 /* The chunks are held in the reasm queue sorted by TSN.
414 * Walk through the queue sequentially and look for the first
415 * sequence of fragmented chunks.
418 if (skb_queue_empty(&ulpq->reasm))
419 return NULL;
421 last_frag = first_frag = NULL;
422 retval = NULL;
423 next_tsn = 0;
424 is_last = 0;
426 skb_queue_walk(&ulpq->reasm, pos) {
427 cevent = sctp_skb2event(pos);
428 ctsn = cevent->tsn;
430 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
431 case SCTP_DATA_MIDDLE_FRAG:
432 if (!first_frag) {
433 first_frag = pos;
434 next_tsn = ctsn + 1;
435 last_frag = pos;
436 } else if (next_tsn == ctsn)
437 next_tsn++;
438 else
439 goto done;
440 break;
441 case SCTP_DATA_LAST_FRAG:
442 if (!first_frag)
443 first_frag = pos;
444 else if (ctsn != next_tsn)
445 goto done;
446 last_frag = pos;
447 is_last = 1;
448 goto done;
449 default:
450 return NULL;
454 /* We have the reassembled event. There is no need to look
455 * further.
457 done:
458 retval = sctp_make_reassembled_event(first_frag, last_frag);
459 if (retval && is_last)
460 retval->msg_flags |= MSG_EOR;
462 return retval;
466 /* Helper function to reassemble chunks. Hold chunks on the reasm queue that
467 * need reassembling.
469 static struct sctp_ulpevent *sctp_ulpq_reasm(struct sctp_ulpq *ulpq,
470 struct sctp_ulpevent *event)
472 struct sctp_ulpevent *retval = NULL;
474 /* Check if this is part of a fragmented message. */
475 if (SCTP_DATA_NOT_FRAG == (event->msg_flags & SCTP_DATA_FRAG_MASK)) {
476 event->msg_flags |= MSG_EOR;
477 return event;
480 sctp_ulpq_store_reasm(ulpq, event);
481 if (!ulpq->pd_mode)
482 retval = sctp_ulpq_retrieve_reassembled(ulpq);
483 else {
484 __u32 ctsn, ctsnap;
486 /* Do not even bother unless this is the next tsn to
487 * be delivered.
489 ctsn = event->tsn;
490 ctsnap = sctp_tsnmap_get_ctsn(&ulpq->asoc->peer.tsn_map);
491 if (TSN_lte(ctsn, ctsnap))
492 retval = sctp_ulpq_retrieve_partial(ulpq);
495 return retval;
498 /* Retrieve the first part (sequential fragments) for partial delivery. */
499 static inline struct sctp_ulpevent *sctp_ulpq_retrieve_first(struct sctp_ulpq *ulpq)
501 struct sk_buff *pos, *last_frag, *first_frag;
502 struct sctp_ulpevent *cevent;
503 __u32 ctsn, next_tsn;
504 struct sctp_ulpevent *retval;
506 /* The chunks are held in the reasm queue sorted by TSN.
507 * Walk through the queue sequentially and look for a sequence of
508 * fragmented chunks that start a datagram.
511 if (skb_queue_empty(&ulpq->reasm))
512 return NULL;
514 last_frag = first_frag = NULL;
515 retval = NULL;
516 next_tsn = 0;
518 skb_queue_walk(&ulpq->reasm, pos) {
519 cevent = sctp_skb2event(pos);
520 ctsn = cevent->tsn;
522 switch (cevent->msg_flags & SCTP_DATA_FRAG_MASK) {
523 case SCTP_DATA_FIRST_FRAG:
524 if (!first_frag) {
525 first_frag = pos;
526 next_tsn = ctsn + 1;
527 last_frag = pos;
528 } else
529 goto done;
530 break;
532 case SCTP_DATA_MIDDLE_FRAG:
533 if (!first_frag)
534 return NULL;
535 if (ctsn == next_tsn) {
536 next_tsn++;
537 last_frag = pos;
538 } else
539 goto done;
540 break;
541 default:
542 return NULL;
546 /* We have the reassembled event. There is no need to look
547 * further.
549 done:
550 retval = sctp_make_reassembled_event(first_frag, last_frag);
551 return retval;
554 /* Helper function to gather skbs that have possibly become
555 * ordered by an an incoming chunk.
557 static inline void sctp_ulpq_retrieve_ordered(struct sctp_ulpq *ulpq,
558 struct sctp_ulpevent *event)
560 struct sk_buff *pos, *tmp;
561 struct sctp_ulpevent *cevent;
562 struct sctp_stream *in;
563 __u16 sid, csid;
564 __u16 ssn, cssn;
566 sid = event->stream;
567 ssn = event->ssn;
568 in = &ulpq->asoc->ssnmap->in;
570 /* We are holding the chunks by stream, by SSN. */
571 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
572 cevent = (struct sctp_ulpevent *) pos->cb;
573 csid = cevent->stream;
574 cssn = cevent->ssn;
576 /* Have we gone too far? */
577 if (csid > sid)
578 break;
580 /* Have we not gone far enough? */
581 if (csid < sid)
582 continue;
584 if (cssn != sctp_ssn_peek(in, sid))
585 break;
587 /* Found it, so mark in the ssnmap. */
588 sctp_ssn_next(in, sid);
590 __skb_unlink(pos, pos->list);
592 /* Attach all gathered skbs to the event. */
593 __skb_queue_tail(sctp_event2skb(event)->list, pos);
597 /* Helper function to store chunks needing ordering. */
598 static inline void sctp_ulpq_store_ordered(struct sctp_ulpq *ulpq,
599 struct sctp_ulpevent *event)
601 struct sk_buff *pos;
602 struct sctp_ulpevent *cevent;
603 __u16 sid, csid;
604 __u16 ssn, cssn;
606 pos = skb_peek_tail(&ulpq->lobby);
607 if (!pos) {
608 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
609 return;
612 sid = event->stream;
613 ssn = event->ssn;
615 cevent = (struct sctp_ulpevent *) pos->cb;
616 csid = cevent->stream;
617 cssn = cevent->ssn;
618 if (sid > csid) {
619 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
620 return;
623 if ((sid == csid) && SSN_lt(cssn, ssn)) {
624 __skb_queue_tail(&ulpq->lobby, sctp_event2skb(event));
625 return;
628 /* Find the right place in this list. We store them by
629 * stream ID and then by SSN.
631 skb_queue_walk(&ulpq->lobby, pos) {
632 cevent = (struct sctp_ulpevent *) pos->cb;
633 csid = cevent->stream;
634 cssn = cevent->ssn;
636 if (csid > sid)
637 break;
638 if (csid == sid && SSN_lt(ssn, cssn))
639 break;
643 /* Insert before pos. */
644 __skb_insert(sctp_event2skb(event), pos->prev, pos, &ulpq->lobby);
648 static struct sctp_ulpevent *sctp_ulpq_order(struct sctp_ulpq *ulpq,
649 struct sctp_ulpevent *event)
651 __u16 sid, ssn;
652 struct sctp_stream *in;
654 /* Check if this message needs ordering. */
655 if (SCTP_DATA_UNORDERED & event->msg_flags)
656 return event;
658 /* Note: The stream ID must be verified before this routine. */
659 sid = event->stream;
660 ssn = event->ssn;
661 in = &ulpq->asoc->ssnmap->in;
663 /* Is this the expected SSN for this stream ID? */
664 if (ssn != sctp_ssn_peek(in, sid)) {
665 /* We've received something out of order, so find where it
666 * needs to be placed. We order by stream and then by SSN.
668 sctp_ulpq_store_ordered(ulpq, event);
669 return NULL;
672 /* Mark that the next chunk has been found. */
673 sctp_ssn_next(in, sid);
675 /* Go find any other chunks that were waiting for
676 * ordering.
678 sctp_ulpq_retrieve_ordered(ulpq, event);
680 return event;
683 /* Helper function to gather skbs that have possibly become
684 * ordered by forward tsn skipping their dependencies.
686 static inline void sctp_ulpq_reap_ordered(struct sctp_ulpq *ulpq)
688 struct sk_buff *pos, *tmp;
689 struct sctp_ulpevent *cevent;
690 struct sctp_ulpevent *event = NULL;
691 struct sctp_stream *in;
692 struct sk_buff_head temp;
693 __u16 csid, cssn;
695 in = &ulpq->asoc->ssnmap->in;
697 /* We are holding the chunks by stream, by SSN. */
698 sctp_skb_for_each(pos, &ulpq->lobby, tmp) {
699 cevent = (struct sctp_ulpevent *) pos->cb;
700 csid = cevent->stream;
701 cssn = cevent->ssn;
703 if (cssn != sctp_ssn_peek(in, csid))
704 break;
706 /* Found it, so mark in the ssnmap. */
707 sctp_ssn_next(in, csid);
709 __skb_unlink(pos, pos->list);
710 if (!event) {
711 /* Create a temporary list to collect chunks on. */
712 event = sctp_skb2event(pos);
713 skb_queue_head_init(&temp);
714 __skb_queue_tail(&temp, sctp_event2skb(event));
715 } else {
716 /* Attach all gathered skbs to the event. */
717 __skb_queue_tail(sctp_event2skb(event)->list, pos);
721 /* Send event to the ULP. */
722 if (event)
723 sctp_ulpq_tail_event(ulpq, event);
726 /* Skip over an SSN. */
727 void sctp_ulpq_skip(struct sctp_ulpq *ulpq, __u16 sid, __u16 ssn)
729 struct sctp_stream *in;
731 /* Note: The stream ID must be verified before this routine. */
732 in = &ulpq->asoc->ssnmap->in;
734 /* Is this an old SSN? If so ignore. */
735 if (SSN_lt(ssn, sctp_ssn_peek(in, sid)))
736 return;
738 /* Mark that we are no longer expecting this SSN or lower. */
739 sctp_ssn_skip(in, sid, ssn);
741 /* Go find any other chunks that were waiting for
742 * ordering and deliver them if needed.
744 sctp_ulpq_reap_ordered(ulpq);
745 return;
748 /* Renege 'needed' bytes from the ordering queue. */
749 static __u16 sctp_ulpq_renege_order(struct sctp_ulpq *ulpq, __u16 needed)
751 __u16 freed = 0;
752 __u32 tsn;
753 struct sk_buff *skb;
754 struct sctp_ulpevent *event;
755 struct sctp_tsnmap *tsnmap;
757 tsnmap = &ulpq->asoc->peer.tsn_map;
759 while ((skb = __skb_dequeue_tail(&ulpq->lobby)) != NULL) {
760 freed += skb_headlen(skb);
761 event = sctp_skb2event(skb);
762 tsn = event->tsn;
764 sctp_ulpevent_free(event);
765 sctp_tsnmap_renege(tsnmap, tsn);
766 if (freed >= needed)
767 return freed;
770 return freed;
773 /* Renege 'needed' bytes from the reassembly queue. */
774 static __u16 sctp_ulpq_renege_frags(struct sctp_ulpq *ulpq, __u16 needed)
776 __u16 freed = 0;
777 __u32 tsn;
778 struct sk_buff *skb;
779 struct sctp_ulpevent *event;
780 struct sctp_tsnmap *tsnmap;
782 tsnmap = &ulpq->asoc->peer.tsn_map;
784 /* Walk backwards through the list, reneges the newest tsns. */
785 while ((skb = __skb_dequeue_tail(&ulpq->reasm)) != NULL) {
786 freed += skb_headlen(skb);
787 event = sctp_skb2event(skb);
788 tsn = event->tsn;
790 sctp_ulpevent_free(event);
791 sctp_tsnmap_renege(tsnmap, tsn);
792 if (freed >= needed)
793 return freed;
796 return freed;
799 /* Partial deliver the first message as there is pressure on rwnd. */
800 void sctp_ulpq_partial_delivery(struct sctp_ulpq *ulpq,
801 struct sctp_chunk *chunk, int gfp)
803 struct sctp_ulpevent *event;
804 struct sctp_association *asoc;
806 asoc = ulpq->asoc;
808 /* Are we already in partial delivery mode? */
809 if (!sctp_sk(asoc->base.sk)->pd_mode) {
811 /* Is partial delivery possible? */
812 event = sctp_ulpq_retrieve_first(ulpq);
813 /* Send event to the ULP. */
814 if (event) {
815 sctp_ulpq_tail_event(ulpq, event);
816 sctp_sk(asoc->base.sk)->pd_mode = 1;
817 ulpq->pd_mode = 1;
818 return;
823 /* Renege some packets to make room for an incoming chunk. */
824 void sctp_ulpq_renege(struct sctp_ulpq *ulpq, struct sctp_chunk *chunk,
825 int gfp)
827 struct sctp_association *asoc;
828 __u16 needed, freed;
830 asoc = ulpq->asoc;
832 if (chunk) {
833 needed = ntohs(chunk->chunk_hdr->length);
834 needed -= sizeof(sctp_data_chunk_t);
835 } else
836 needed = SCTP_DEFAULT_MAXWINDOW;
838 freed = 0;
840 if (skb_queue_empty(&asoc->base.sk->sk_receive_queue)) {
841 freed = sctp_ulpq_renege_order(ulpq, needed);
842 if (freed < needed) {
843 freed += sctp_ulpq_renege_frags(ulpq, needed - freed);
846 /* If able to free enough room, accept this chunk. */
847 if (chunk && (freed >= needed)) {
848 __u32 tsn;
849 tsn = ntohl(chunk->subh.data_hdr->tsn);
850 sctp_tsnmap_mark(&asoc->peer.tsn_map, tsn);
851 sctp_ulpq_tail_data(ulpq, chunk, gfp);
853 sctp_ulpq_partial_delivery(ulpq, chunk, gfp);
856 return;
861 /* Notify the application if an association is aborted and in
862 * partial delivery mode. Send up any pending received messages.
864 void sctp_ulpq_abort_pd(struct sctp_ulpq *ulpq, int gfp)
866 struct sctp_ulpevent *ev = NULL;
867 struct sock *sk;
869 if (!ulpq->pd_mode)
870 return;
872 sk = ulpq->asoc->base.sk;
873 if (sctp_ulpevent_type_enabled(SCTP_PARTIAL_DELIVERY_EVENT,
874 &sctp_sk(sk)->subscribe))
875 ev = sctp_ulpevent_make_pdapi(ulpq->asoc,
876 SCTP_PARTIAL_DELIVERY_ABORTED,
877 gfp);
878 if (ev)
879 __skb_queue_tail(&sk->sk_receive_queue, sctp_event2skb(ev));
881 /* If there is data waiting, send it up the socket now. */
882 if (sctp_ulpq_clear_pd(ulpq) || ev)
883 sk->sk_data_ready(sk, 0);