Merge branch 'v4l_for_linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mchehab...
[linux-2.6.git] / net / sctp / output.c
blobbbef4a7a9b569f1d639bf0fe4b2bbef8a5296951
1 /* SCTP kernel implementation
2 * (C) Copyright IBM Corp. 2001, 2004
3 * Copyright (c) 1999-2000 Cisco, Inc.
4 * Copyright (c) 1999-2001 Motorola, Inc.
6 * This file is part of the SCTP kernel implementation
8 * These functions handle output processing.
10 * This SCTP implementation is free software;
11 * you can redistribute it and/or modify it under the terms of
12 * the GNU General Public License as published by
13 * the Free Software Foundation; either version 2, or (at your option)
14 * any later version.
16 * This SCTP implementation is distributed in the hope that it
17 * will be useful, but WITHOUT ANY WARRANTY; without even the implied
18 * ************************
19 * warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
20 * See the GNU General Public License for more details.
22 * You should have received a copy of the GNU General Public License
23 * along with GNU CC; see the file COPYING. If not, write to
24 * the Free Software Foundation, 59 Temple Place - Suite 330,
25 * Boston, MA 02111-1307, USA.
27 * Please send any bug reports or fixes you make to the
28 * email address(es):
29 * lksctp developers <lksctp-developers@lists.sourceforge.net>
31 * Or submit a bug report through the following website:
32 * http://www.sf.net/projects/lksctp
34 * Written or modified by:
35 * La Monte H.P. Yarroll <piggy@acm.org>
36 * Karl Knutson <karl@athena.chicago.il.us>
37 * Jon Grimm <jgrimm@austin.ibm.com>
38 * Sridhar Samudrala <sri@us.ibm.com>
40 * Any bugs reported given to us we will try to fix... any fixes shared will
41 * be incorporated into the next SCTP release.
44 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
46 #include <linux/types.h>
47 #include <linux/kernel.h>
48 #include <linux/wait.h>
49 #include <linux/time.h>
50 #include <linux/ip.h>
51 #include <linux/ipv6.h>
52 #include <linux/init.h>
53 #include <linux/slab.h>
54 #include <net/inet_ecn.h>
55 #include <net/ip.h>
56 #include <net/icmp.h>
57 #include <net/net_namespace.h>
59 #include <linux/socket.h> /* for sa_family_t */
60 #include <net/sock.h>
62 #include <net/sctp/sctp.h>
63 #include <net/sctp/sm.h>
64 #include <net/sctp/checksum.h>
66 /* Forward declarations for private helpers. */
67 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
68 struct sctp_chunk *chunk);
69 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
70 struct sctp_chunk *chunk);
71 static void sctp_packet_append_data(struct sctp_packet *packet,
72 struct sctp_chunk *chunk);
73 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
74 struct sctp_chunk *chunk,
75 u16 chunk_len);
77 static void sctp_packet_reset(struct sctp_packet *packet)
79 packet->size = packet->overhead;
80 packet->has_cookie_echo = 0;
81 packet->has_sack = 0;
82 packet->has_data = 0;
83 packet->has_auth = 0;
84 packet->ipfragok = 0;
85 packet->auth = NULL;
88 /* Config a packet.
89 * This appears to be a followup set of initializations.
91 struct sctp_packet *sctp_packet_config(struct sctp_packet *packet,
92 __u32 vtag, int ecn_capable)
94 struct sctp_chunk *chunk = NULL;
96 SCTP_DEBUG_PRINTK("%s: packet:%p vtag:0x%x\n", __func__,
97 packet, vtag);
99 packet->vtag = vtag;
101 if (ecn_capable && sctp_packet_empty(packet)) {
102 chunk = sctp_get_ecne_prepend(packet->transport->asoc);
104 /* If there a is a prepend chunk stick it on the list before
105 * any other chunks get appended.
107 if (chunk)
108 sctp_packet_append_chunk(packet, chunk);
111 return packet;
114 /* Initialize the packet structure. */
115 struct sctp_packet *sctp_packet_init(struct sctp_packet *packet,
116 struct sctp_transport *transport,
117 __u16 sport, __u16 dport)
119 struct sctp_association *asoc = transport->asoc;
120 size_t overhead;
122 SCTP_DEBUG_PRINTK("%s: packet:%p transport:%p\n", __func__,
123 packet, transport);
125 packet->transport = transport;
126 packet->source_port = sport;
127 packet->destination_port = dport;
128 INIT_LIST_HEAD(&packet->chunk_list);
129 if (asoc) {
130 struct sctp_sock *sp = sctp_sk(asoc->base.sk);
131 overhead = sp->pf->af->net_header_len;
132 } else {
133 overhead = sizeof(struct ipv6hdr);
135 overhead += sizeof(struct sctphdr);
136 packet->overhead = overhead;
137 sctp_packet_reset(packet);
138 packet->vtag = 0;
140 return packet;
143 /* Free a packet. */
144 void sctp_packet_free(struct sctp_packet *packet)
146 struct sctp_chunk *chunk, *tmp;
148 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet);
150 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
151 list_del_init(&chunk->list);
152 sctp_chunk_free(chunk);
156 /* This routine tries to append the chunk to the offered packet. If adding
157 * the chunk causes the packet to exceed the path MTU and COOKIE_ECHO chunk
158 * is not present in the packet, it transmits the input packet.
159 * Data can be bundled with a packet containing a COOKIE_ECHO chunk as long
160 * as it can fit in the packet, but any more data that does not fit in this
161 * packet can be sent only after receiving the COOKIE_ACK.
163 sctp_xmit_t sctp_packet_transmit_chunk(struct sctp_packet *packet,
164 struct sctp_chunk *chunk,
165 int one_packet)
167 sctp_xmit_t retval;
168 int error = 0;
170 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__,
171 packet, chunk);
173 switch ((retval = (sctp_packet_append_chunk(packet, chunk)))) {
174 case SCTP_XMIT_PMTU_FULL:
175 if (!packet->has_cookie_echo) {
176 error = sctp_packet_transmit(packet);
177 if (error < 0)
178 chunk->skb->sk->sk_err = -error;
180 /* If we have an empty packet, then we can NOT ever
181 * return PMTU_FULL.
183 if (!one_packet)
184 retval = sctp_packet_append_chunk(packet,
185 chunk);
187 break;
189 case SCTP_XMIT_RWND_FULL:
190 case SCTP_XMIT_OK:
191 case SCTP_XMIT_NAGLE_DELAY:
192 break;
195 return retval;
198 /* Try to bundle an auth chunk into the packet. */
199 static sctp_xmit_t sctp_packet_bundle_auth(struct sctp_packet *pkt,
200 struct sctp_chunk *chunk)
202 struct sctp_association *asoc = pkt->transport->asoc;
203 struct sctp_chunk *auth;
204 sctp_xmit_t retval = SCTP_XMIT_OK;
206 /* if we don't have an association, we can't do authentication */
207 if (!asoc)
208 return retval;
210 /* See if this is an auth chunk we are bundling or if
211 * auth is already bundled.
213 if (chunk->chunk_hdr->type == SCTP_CID_AUTH || pkt->has_auth)
214 return retval;
216 /* if the peer did not request this chunk to be authenticated,
217 * don't do it
219 if (!chunk->auth)
220 return retval;
222 auth = sctp_make_auth(asoc);
223 if (!auth)
224 return retval;
226 retval = __sctp_packet_append_chunk(pkt, auth);
228 if (retval != SCTP_XMIT_OK)
229 sctp_chunk_free(auth);
231 return retval;
234 /* Try to bundle a SACK with the packet. */
235 static sctp_xmit_t sctp_packet_bundle_sack(struct sctp_packet *pkt,
236 struct sctp_chunk *chunk)
238 sctp_xmit_t retval = SCTP_XMIT_OK;
240 /* If sending DATA and haven't aleady bundled a SACK, try to
241 * bundle one in to the packet.
243 if (sctp_chunk_is_data(chunk) && !pkt->has_sack &&
244 !pkt->has_cookie_echo) {
245 struct sctp_association *asoc;
246 struct timer_list *timer;
247 asoc = pkt->transport->asoc;
248 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_SACK];
250 /* If the SACK timer is running, we have a pending SACK */
251 if (timer_pending(timer)) {
252 struct sctp_chunk *sack;
254 if (pkt->transport->sack_generation !=
255 pkt->transport->asoc->peer.sack_generation)
256 return retval;
258 asoc->a_rwnd = asoc->rwnd;
259 sack = sctp_make_sack(asoc);
260 if (sack) {
261 retval = __sctp_packet_append_chunk(pkt, sack);
262 if (retval != SCTP_XMIT_OK) {
263 sctp_chunk_free(sack);
264 goto out;
266 asoc->peer.sack_needed = 0;
267 if (del_timer(timer))
268 sctp_association_put(asoc);
272 out:
273 return retval;
277 /* Append a chunk to the offered packet reporting back any inability to do
278 * so.
280 static sctp_xmit_t __sctp_packet_append_chunk(struct sctp_packet *packet,
281 struct sctp_chunk *chunk)
283 sctp_xmit_t retval = SCTP_XMIT_OK;
284 __u16 chunk_len = WORD_ROUND(ntohs(chunk->chunk_hdr->length));
286 /* Check to see if this chunk will fit into the packet */
287 retval = sctp_packet_will_fit(packet, chunk, chunk_len);
288 if (retval != SCTP_XMIT_OK)
289 goto finish;
291 /* We believe that this chunk is OK to add to the packet */
292 switch (chunk->chunk_hdr->type) {
293 case SCTP_CID_DATA:
294 /* Account for the data being in the packet */
295 sctp_packet_append_data(packet, chunk);
296 /* Disallow SACK bundling after DATA. */
297 packet->has_sack = 1;
298 /* Disallow AUTH bundling after DATA */
299 packet->has_auth = 1;
300 /* Let it be knows that packet has DATA in it */
301 packet->has_data = 1;
302 /* timestamp the chunk for rtx purposes */
303 chunk->sent_at = jiffies;
304 break;
305 case SCTP_CID_COOKIE_ECHO:
306 packet->has_cookie_echo = 1;
307 break;
309 case SCTP_CID_SACK:
310 packet->has_sack = 1;
311 if (chunk->asoc)
312 chunk->asoc->stats.osacks++;
313 break;
315 case SCTP_CID_AUTH:
316 packet->has_auth = 1;
317 packet->auth = chunk;
318 break;
321 /* It is OK to send this chunk. */
322 list_add_tail(&chunk->list, &packet->chunk_list);
323 packet->size += chunk_len;
324 chunk->transport = packet->transport;
325 finish:
326 return retval;
329 /* Append a chunk to the offered packet reporting back any inability to do
330 * so.
332 sctp_xmit_t sctp_packet_append_chunk(struct sctp_packet *packet,
333 struct sctp_chunk *chunk)
335 sctp_xmit_t retval = SCTP_XMIT_OK;
337 SCTP_DEBUG_PRINTK("%s: packet:%p chunk:%p\n", __func__, packet,
338 chunk);
340 /* Data chunks are special. Before seeing what else we can
341 * bundle into this packet, check to see if we are allowed to
342 * send this DATA.
344 if (sctp_chunk_is_data(chunk)) {
345 retval = sctp_packet_can_append_data(packet, chunk);
346 if (retval != SCTP_XMIT_OK)
347 goto finish;
350 /* Try to bundle AUTH chunk */
351 retval = sctp_packet_bundle_auth(packet, chunk);
352 if (retval != SCTP_XMIT_OK)
353 goto finish;
355 /* Try to bundle SACK chunk */
356 retval = sctp_packet_bundle_sack(packet, chunk);
357 if (retval != SCTP_XMIT_OK)
358 goto finish;
360 retval = __sctp_packet_append_chunk(packet, chunk);
362 finish:
363 return retval;
366 static void sctp_packet_release_owner(struct sk_buff *skb)
368 sk_free(skb->sk);
371 static void sctp_packet_set_owner_w(struct sk_buff *skb, struct sock *sk)
373 skb_orphan(skb);
374 skb->sk = sk;
375 skb->destructor = sctp_packet_release_owner;
378 * The data chunks have already been accounted for in sctp_sendmsg(),
379 * therefore only reserve a single byte to keep socket around until
380 * the packet has been transmitted.
382 atomic_inc(&sk->sk_wmem_alloc);
385 /* All packets are sent to the network through this function from
386 * sctp_outq_tail().
388 * The return value is a normal kernel error return value.
390 int sctp_packet_transmit(struct sctp_packet *packet)
392 struct sctp_transport *tp = packet->transport;
393 struct sctp_association *asoc = tp->asoc;
394 struct sctphdr *sh;
395 struct sk_buff *nskb;
396 struct sctp_chunk *chunk, *tmp;
397 struct sock *sk;
398 int err = 0;
399 int padding; /* How much padding do we need? */
400 __u8 has_data = 0;
401 struct dst_entry *dst = tp->dst;
402 unsigned char *auth = NULL; /* pointer to auth in skb data */
403 __u32 cksum_buf_len = sizeof(struct sctphdr);
405 SCTP_DEBUG_PRINTK("%s: packet:%p\n", __func__, packet);
407 /* Do NOT generate a chunkless packet. */
408 if (list_empty(&packet->chunk_list))
409 return err;
411 /* Set up convenience variables... */
412 chunk = list_entry(packet->chunk_list.next, struct sctp_chunk, list);
413 sk = chunk->skb->sk;
415 /* Allocate the new skb. */
416 nskb = alloc_skb(packet->size + LL_MAX_HEADER, GFP_ATOMIC);
417 if (!nskb)
418 goto nomem;
420 /* Make sure the outbound skb has enough header room reserved. */
421 skb_reserve(nskb, packet->overhead + LL_MAX_HEADER);
423 /* Set the owning socket so that we know where to get the
424 * destination IP address.
426 sctp_packet_set_owner_w(nskb, sk);
428 if (!sctp_transport_dst_check(tp)) {
429 sctp_transport_route(tp, NULL, sctp_sk(sk));
430 if (asoc && (asoc->param_flags & SPP_PMTUD_ENABLE)) {
431 sctp_assoc_sync_pmtu(sk, asoc);
434 dst = dst_clone(tp->dst);
435 skb_dst_set(nskb, dst);
436 if (!dst)
437 goto no_route;
439 /* Build the SCTP header. */
440 sh = (struct sctphdr *)skb_push(nskb, sizeof(struct sctphdr));
441 skb_reset_transport_header(nskb);
442 sh->source = htons(packet->source_port);
443 sh->dest = htons(packet->destination_port);
445 /* From 6.8 Adler-32 Checksum Calculation:
446 * After the packet is constructed (containing the SCTP common
447 * header and one or more control or DATA chunks), the
448 * transmitter shall:
450 * 1) Fill in the proper Verification Tag in the SCTP common
451 * header and initialize the checksum field to 0's.
453 sh->vtag = htonl(packet->vtag);
454 sh->checksum = 0;
457 * 6.10 Bundling
459 * An endpoint bundles chunks by simply including multiple
460 * chunks in one outbound SCTP packet. ...
464 * 3.2 Chunk Field Descriptions
466 * The total length of a chunk (including Type, Length and
467 * Value fields) MUST be a multiple of 4 bytes. If the length
468 * of the chunk is not a multiple of 4 bytes, the sender MUST
469 * pad the chunk with all zero bytes and this padding is not
470 * included in the chunk length field. The sender should
471 * never pad with more than 3 bytes.
473 * [This whole comment explains WORD_ROUND() below.]
475 SCTP_DEBUG_PRINTK("***sctp_transmit_packet***\n");
476 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
477 list_del_init(&chunk->list);
478 if (sctp_chunk_is_data(chunk)) {
479 /* 6.3.1 C4) When data is in flight and when allowed
480 * by rule C5, a new RTT measurement MUST be made each
481 * round trip. Furthermore, new RTT measurements
482 * SHOULD be made no more than once per round-trip
483 * for a given destination transport address.
486 if (!tp->rto_pending) {
487 chunk->rtt_in_progress = 1;
488 tp->rto_pending = 1;
490 has_data = 1;
493 padding = WORD_ROUND(chunk->skb->len) - chunk->skb->len;
494 if (padding)
495 memset(skb_put(chunk->skb, padding), 0, padding);
497 /* if this is the auth chunk that we are adding,
498 * store pointer where it will be added and put
499 * the auth into the packet.
501 if (chunk == packet->auth)
502 auth = skb_tail_pointer(nskb);
504 cksum_buf_len += chunk->skb->len;
505 memcpy(skb_put(nskb, chunk->skb->len),
506 chunk->skb->data, chunk->skb->len);
508 SCTP_DEBUG_PRINTK("%s %p[%s] %s 0x%x, %s %d, %s %d, %s %d\n",
509 "*** Chunk", chunk,
510 sctp_cname(SCTP_ST_CHUNK(
511 chunk->chunk_hdr->type)),
512 chunk->has_tsn ? "TSN" : "No TSN",
513 chunk->has_tsn ?
514 ntohl(chunk->subh.data_hdr->tsn) : 0,
515 "length", ntohs(chunk->chunk_hdr->length),
516 "chunk->skb->len", chunk->skb->len,
517 "rtt_in_progress", chunk->rtt_in_progress);
520 * If this is a control chunk, this is our last
521 * reference. Free data chunks after they've been
522 * acknowledged or have failed.
524 if (!sctp_chunk_is_data(chunk))
525 sctp_chunk_free(chunk);
528 /* SCTP-AUTH, Section 6.2
529 * The sender MUST calculate the MAC as described in RFC2104 [2]
530 * using the hash function H as described by the MAC Identifier and
531 * the shared association key K based on the endpoint pair shared key
532 * described by the shared key identifier. The 'data' used for the
533 * computation of the AUTH-chunk is given by the AUTH chunk with its
534 * HMAC field set to zero (as shown in Figure 6) followed by all
535 * chunks that are placed after the AUTH chunk in the SCTP packet.
537 if (auth)
538 sctp_auth_calculate_hmac(asoc, nskb,
539 (struct sctp_auth_chunk *)auth,
540 GFP_ATOMIC);
542 /* 2) Calculate the Adler-32 checksum of the whole packet,
543 * including the SCTP common header and all the
544 * chunks.
546 * Note: Adler-32 is no longer applicable, as has been replaced
547 * by CRC32-C as described in <draft-ietf-tsvwg-sctpcsum-02.txt>.
549 if (!sctp_checksum_disable) {
550 if (!(dst->dev->features & NETIF_F_SCTP_CSUM)) {
551 __u32 crc32 = sctp_start_cksum((__u8 *)sh, cksum_buf_len);
553 /* 3) Put the resultant value into the checksum field in the
554 * common header, and leave the rest of the bits unchanged.
556 sh->checksum = sctp_end_cksum(crc32);
557 } else {
558 /* no need to seed pseudo checksum for SCTP */
559 nskb->ip_summed = CHECKSUM_PARTIAL;
560 nskb->csum_start = (skb_transport_header(nskb) -
561 nskb->head);
562 nskb->csum_offset = offsetof(struct sctphdr, checksum);
566 /* IP layer ECN support
567 * From RFC 2481
568 * "The ECN-Capable Transport (ECT) bit would be set by the
569 * data sender to indicate that the end-points of the
570 * transport protocol are ECN-capable."
572 * Now setting the ECT bit all the time, as it should not cause
573 * any problems protocol-wise even if our peer ignores it.
575 * Note: The works for IPv6 layer checks this bit too later
576 * in transmission. See IP6_ECN_flow_xmit().
578 (*tp->af_specific->ecn_capable)(nskb->sk);
580 /* Set up the IP options. */
581 /* BUG: not implemented
582 * For v4 this all lives somewhere in sk->sk_opt...
585 /* Dump that on IP! */
586 if (asoc) {
587 asoc->stats.opackets++;
588 if (asoc->peer.last_sent_to != tp)
589 /* Considering the multiple CPU scenario, this is a
590 * "correcter" place for last_sent_to. --xguo
592 asoc->peer.last_sent_to = tp;
595 if (has_data) {
596 struct timer_list *timer;
597 unsigned long timeout;
599 /* Restart the AUTOCLOSE timer when sending data. */
600 if (sctp_state(asoc, ESTABLISHED) && asoc->autoclose) {
601 timer = &asoc->timers[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
602 timeout = asoc->timeouts[SCTP_EVENT_TIMEOUT_AUTOCLOSE];
604 if (!mod_timer(timer, jiffies + timeout))
605 sctp_association_hold(asoc);
609 SCTP_DEBUG_PRINTK("***sctp_transmit_packet*** skb len %d\n",
610 nskb->len);
612 nskb->local_df = packet->ipfragok;
613 (*tp->af_specific->sctp_xmit)(nskb, tp);
615 out:
616 sctp_packet_reset(packet);
617 return err;
618 no_route:
619 kfree_skb(nskb);
620 IP_INC_STATS_BH(sock_net(asoc->base.sk), IPSTATS_MIB_OUTNOROUTES);
622 /* FIXME: Returning the 'err' will effect all the associations
623 * associated with a socket, although only one of the paths of the
624 * association is unreachable.
625 * The real failure of a transport or association can be passed on
626 * to the user via notifications. So setting this error may not be
627 * required.
629 /* err = -EHOSTUNREACH; */
630 err:
631 /* Control chunks are unreliable so just drop them. DATA chunks
632 * will get resent or dropped later.
635 list_for_each_entry_safe(chunk, tmp, &packet->chunk_list, list) {
636 list_del_init(&chunk->list);
637 if (!sctp_chunk_is_data(chunk))
638 sctp_chunk_free(chunk);
640 goto out;
641 nomem:
642 err = -ENOMEM;
643 goto err;
646 /********************************************************************
647 * 2nd Level Abstractions
648 ********************************************************************/
650 /* This private function check to see if a chunk can be added */
651 static sctp_xmit_t sctp_packet_can_append_data(struct sctp_packet *packet,
652 struct sctp_chunk *chunk)
654 sctp_xmit_t retval = SCTP_XMIT_OK;
655 size_t datasize, rwnd, inflight, flight_size;
656 struct sctp_transport *transport = packet->transport;
657 struct sctp_association *asoc = transport->asoc;
658 struct sctp_outq *q = &asoc->outqueue;
660 /* RFC 2960 6.1 Transmission of DATA Chunks
662 * A) At any given time, the data sender MUST NOT transmit new data to
663 * any destination transport address if its peer's rwnd indicates
664 * that the peer has no buffer space (i.e. rwnd is 0, see Section
665 * 6.2.1). However, regardless of the value of rwnd (including if it
666 * is 0), the data sender can always have one DATA chunk in flight to
667 * the receiver if allowed by cwnd (see rule B below). This rule
668 * allows the sender to probe for a change in rwnd that the sender
669 * missed due to the SACK having been lost in transit from the data
670 * receiver to the data sender.
673 rwnd = asoc->peer.rwnd;
674 inflight = q->outstanding_bytes;
675 flight_size = transport->flight_size;
677 datasize = sctp_data_size(chunk);
679 if (datasize > rwnd) {
680 if (inflight > 0) {
681 /* We have (at least) one data chunk in flight,
682 * so we can't fall back to rule 6.1 B).
684 retval = SCTP_XMIT_RWND_FULL;
685 goto finish;
689 /* RFC 2960 6.1 Transmission of DATA Chunks
691 * B) At any given time, the sender MUST NOT transmit new data
692 * to a given transport address if it has cwnd or more bytes
693 * of data outstanding to that transport address.
695 /* RFC 7.2.4 & the Implementers Guide 2.8.
697 * 3) ...
698 * When a Fast Retransmit is being performed the sender SHOULD
699 * ignore the value of cwnd and SHOULD NOT delay retransmission.
701 if (chunk->fast_retransmit != SCTP_NEED_FRTX)
702 if (flight_size >= transport->cwnd) {
703 retval = SCTP_XMIT_RWND_FULL;
704 goto finish;
707 /* Nagle's algorithm to solve small-packet problem:
708 * Inhibit the sending of new chunks when new outgoing data arrives
709 * if any previously transmitted data on the connection remains
710 * unacknowledged.
712 if (!sctp_sk(asoc->base.sk)->nodelay && sctp_packet_empty(packet) &&
713 inflight && sctp_state(asoc, ESTABLISHED)) {
714 unsigned int max = transport->pathmtu - packet->overhead;
715 unsigned int len = chunk->skb->len + q->out_qlen;
717 /* Check whether this chunk and all the rest of pending
718 * data will fit or delay in hopes of bundling a full
719 * sized packet.
720 * Don't delay large message writes that may have been
721 * fragmeneted into small peices.
723 if ((len < max) && chunk->msg->can_delay) {
724 retval = SCTP_XMIT_NAGLE_DELAY;
725 goto finish;
729 finish:
730 return retval;
733 /* This private function does management things when adding DATA chunk */
734 static void sctp_packet_append_data(struct sctp_packet *packet,
735 struct sctp_chunk *chunk)
737 struct sctp_transport *transport = packet->transport;
738 size_t datasize = sctp_data_size(chunk);
739 struct sctp_association *asoc = transport->asoc;
740 u32 rwnd = asoc->peer.rwnd;
742 /* Keep track of how many bytes are in flight over this transport. */
743 transport->flight_size += datasize;
745 /* Keep track of how many bytes are in flight to the receiver. */
746 asoc->outqueue.outstanding_bytes += datasize;
748 /* Update our view of the receiver's rwnd. */
749 if (datasize < rwnd)
750 rwnd -= datasize;
751 else
752 rwnd = 0;
754 asoc->peer.rwnd = rwnd;
755 /* Has been accepted for transmission. */
756 if (!asoc->peer.prsctp_capable)
757 chunk->msg->can_abandon = 0;
758 sctp_chunk_assign_tsn(chunk);
759 sctp_chunk_assign_ssn(chunk);
762 static sctp_xmit_t sctp_packet_will_fit(struct sctp_packet *packet,
763 struct sctp_chunk *chunk,
764 u16 chunk_len)
766 size_t psize;
767 size_t pmtu;
768 int too_big;
769 sctp_xmit_t retval = SCTP_XMIT_OK;
771 psize = packet->size;
772 pmtu = ((packet->transport->asoc) ?
773 (packet->transport->asoc->pathmtu) :
774 (packet->transport->pathmtu));
776 too_big = (psize + chunk_len > pmtu);
778 /* Decide if we need to fragment or resubmit later. */
779 if (too_big) {
780 /* It's OK to fragmet at IP level if any one of the following
781 * is true:
782 * 1. The packet is empty (meaning this chunk is greater
783 * the MTU)
784 * 2. The chunk we are adding is a control chunk
785 * 3. The packet doesn't have any data in it yet and data
786 * requires authentication.
788 if (sctp_packet_empty(packet) || !sctp_chunk_is_data(chunk) ||
789 (!packet->has_data && chunk->auth)) {
790 /* We no longer do re-fragmentation.
791 * Just fragment at the IP layer, if we
792 * actually hit this condition
794 packet->ipfragok = 1;
795 } else {
796 retval = SCTP_XMIT_PMTU_FULL;
800 return retval;