2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
20 * See ip_input.c for original log
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
44 * Hirokazu Takahashi: sendfile() on UDP works now.
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/module.h>
50 #include <linux/types.h>
51 #include <linux/kernel.h>
53 #include <linux/string.h>
54 #include <linux/errno.h>
55 #include <linux/highmem.h>
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
60 #include <linux/inet.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/proc_fs.h>
64 #include <linux/stat.h>
65 #include <linux/init.h>
69 #include <net/protocol.h>
70 #include <net/route.h>
72 #include <linux/skbuff.h>
76 #include <net/checksum.h>
77 #include <net/inetpeer.h>
78 #include <net/checksum.h>
79 #include <linux/igmp.h>
80 #include <linux/netfilter_ipv4.h>
81 #include <linux/netfilter_bridge.h>
82 #include <linux/mroute.h>
83 #include <linux/netlink.h>
84 #include <linux/tcp.h>
86 int sysctl_ip_default_ttl __read_mostly
= IPDEFTTL
;
88 /* Generate a checksum for an outgoing IP datagram. */
89 __inline__
void ip_send_check(struct iphdr
*iph
)
92 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
95 /* dev_loopback_xmit for use with netfilter. */
96 static int ip_dev_loopback_xmit(struct sk_buff
*newskb
)
98 skb_reset_mac_header(newskb
);
99 __skb_pull(newskb
, skb_network_offset(newskb
));
100 newskb
->pkt_type
= PACKET_LOOPBACK
;
101 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
102 BUG_TRAP(newskb
->dst
);
107 static inline int ip_select_ttl(struct inet_sock
*inet
, struct dst_entry
*dst
)
109 int ttl
= inet
->uc_ttl
;
112 ttl
= dst_metric(dst
, RTAX_HOPLIMIT
);
117 * Add an ip header to a skbuff and send it out.
120 int ip_build_and_send_pkt(struct sk_buff
*skb
, struct sock
*sk
,
121 __be32 saddr
, __be32 daddr
, struct ip_options
*opt
)
123 struct inet_sock
*inet
= inet_sk(sk
);
124 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
127 /* Build the IP header. */
128 skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0));
129 skb_reset_network_header(skb
);
133 iph
->tos
= inet
->tos
;
134 if (ip_dont_fragment(sk
, &rt
->u
.dst
))
135 iph
->frag_off
= htons(IP_DF
);
138 iph
->ttl
= ip_select_ttl(inet
, &rt
->u
.dst
);
139 iph
->daddr
= rt
->rt_dst
;
140 iph
->saddr
= rt
->rt_src
;
141 iph
->protocol
= sk
->sk_protocol
;
142 iph
->tot_len
= htons(skb
->len
);
143 ip_select_ident(iph
, &rt
->u
.dst
, sk
);
145 if (opt
&& opt
->optlen
) {
146 iph
->ihl
+= opt
->optlen
>>2;
147 ip_options_build(skb
, opt
, daddr
, rt
, 0);
151 skb
->priority
= sk
->sk_priority
;
154 return NF_HOOK(PF_INET
, NF_IP_LOCAL_OUT
, skb
, NULL
, rt
->u
.dst
.dev
,
158 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt
);
160 static inline int ip_finish_output2(struct sk_buff
*skb
)
162 struct dst_entry
*dst
= skb
->dst
;
163 struct net_device
*dev
= dst
->dev
;
164 int hh_len
= LL_RESERVED_SPACE(dev
);
166 /* Be paranoid, rather than too clever. */
167 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->hard_header
)) {
168 struct sk_buff
*skb2
;
170 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
176 skb_set_owner_w(skb2
, skb
->sk
);
182 return neigh_hh_output(dst
->hh
, skb
);
183 else if (dst
->neighbour
)
184 return dst
->neighbour
->output(skb
);
187 printk(KERN_DEBUG
"ip_finish_output2: No header cache and no neighbour!\n");
192 static inline int ip_skb_dst_mtu(struct sk_buff
*skb
)
194 struct inet_sock
*inet
= skb
->sk
? inet_sk(skb
->sk
) : NULL
;
196 return (inet
&& inet
->pmtudisc
== IP_PMTUDISC_PROBE
) ?
197 skb
->dst
->dev
->mtu
: dst_mtu(skb
->dst
);
200 static inline int ip_finish_output(struct sk_buff
*skb
)
202 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
203 /* Policy lookup after SNAT yielded a new policy */
204 if (skb
->dst
->xfrm
!= NULL
) {
205 IPCB(skb
)->flags
|= IPSKB_REROUTED
;
206 return dst_output(skb
);
209 if (skb
->len
> ip_skb_dst_mtu(skb
) && !skb_is_gso(skb
))
210 return ip_fragment(skb
, ip_finish_output2
);
212 return ip_finish_output2(skb
);
215 int ip_mc_output(struct sk_buff
*skb
)
217 struct sock
*sk
= skb
->sk
;
218 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
219 struct net_device
*dev
= rt
->u
.dst
.dev
;
222 * If the indicated interface is up and running, send the packet.
224 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS
);
227 skb
->protocol
= htons(ETH_P_IP
);
230 * Multicasts are looped back for other local users
233 if (rt
->rt_flags
&RTCF_MULTICAST
) {
234 if ((!sk
|| inet_sk(sk
)->mc_loop
)
235 #ifdef CONFIG_IP_MROUTE
236 /* Small optimization: do not loopback not local frames,
237 which returned after forwarding; they will be dropped
238 by ip_mr_input in any case.
239 Note, that local frames are looped back to be delivered
242 This check is duplicated in ip_mr_input at the moment.
244 && ((rt
->rt_flags
&RTCF_LOCAL
) || !(IPCB(skb
)->flags
&IPSKB_FORWARDED
))
247 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
249 NF_HOOK(PF_INET
, NF_IP_POST_ROUTING
, newskb
, NULL
,
251 ip_dev_loopback_xmit
);
254 /* Multicasts with ttl 0 must not go beyond the host */
256 if (ip_hdr(skb
)->ttl
== 0) {
262 if (rt
->rt_flags
&RTCF_BROADCAST
) {
263 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
265 NF_HOOK(PF_INET
, NF_IP_POST_ROUTING
, newskb
, NULL
,
266 newskb
->dev
, ip_dev_loopback_xmit
);
269 return NF_HOOK_COND(PF_INET
, NF_IP_POST_ROUTING
, skb
, NULL
, skb
->dev
,
271 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
274 int ip_output(struct sk_buff
*skb
)
276 struct net_device
*dev
= skb
->dst
->dev
;
278 IP_INC_STATS(IPSTATS_MIB_OUTREQUESTS
);
281 skb
->protocol
= htons(ETH_P_IP
);
283 return NF_HOOK_COND(PF_INET
, NF_IP_POST_ROUTING
, skb
, NULL
, dev
,
285 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
288 int ip_queue_xmit(struct sk_buff
*skb
, int ipfragok
)
290 struct sock
*sk
= skb
->sk
;
291 struct inet_sock
*inet
= inet_sk(sk
);
292 struct ip_options
*opt
= inet
->opt
;
296 /* Skip all of this if the packet is already routed,
297 * f.e. by something like SCTP.
299 rt
= (struct rtable
*) skb
->dst
;
303 /* Make sure we can route this packet. */
304 rt
= (struct rtable
*)__sk_dst_check(sk
, 0);
308 /* Use correct destination address if we have options. */
314 struct flowi fl
= { .oif
= sk
->sk_bound_dev_if
,
317 .saddr
= inet
->saddr
,
318 .tos
= RT_CONN_FLAGS(sk
) } },
319 .proto
= sk
->sk_protocol
,
321 { .sport
= inet
->sport
,
322 .dport
= inet
->dport
} } };
324 /* If this fails, retransmit mechanism of transport layer will
325 * keep trying until route appears or the connection times
328 security_sk_classify_flow(sk
, &fl
);
329 if (ip_route_output_flow(&rt
, &fl
, sk
, 0))
332 sk_setup_caps(sk
, &rt
->u
.dst
);
334 skb
->dst
= dst_clone(&rt
->u
.dst
);
337 if (opt
&& opt
->is_strictroute
&& rt
->rt_dst
!= rt
->rt_gateway
)
340 /* OK, we know where to send it, allocate and build IP header. */
341 skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0));
342 skb_reset_network_header(skb
);
344 *((__be16
*)iph
) = htons((4 << 12) | (5 << 8) | (inet
->tos
& 0xff));
345 iph
->tot_len
= htons(skb
->len
);
346 if (ip_dont_fragment(sk
, &rt
->u
.dst
) && !ipfragok
)
347 iph
->frag_off
= htons(IP_DF
);
350 iph
->ttl
= ip_select_ttl(inet
, &rt
->u
.dst
);
351 iph
->protocol
= sk
->sk_protocol
;
352 iph
->saddr
= rt
->rt_src
;
353 iph
->daddr
= rt
->rt_dst
;
354 /* Transport layer set skb->h.foo itself. */
356 if (opt
&& opt
->optlen
) {
357 iph
->ihl
+= opt
->optlen
>> 2;
358 ip_options_build(skb
, opt
, inet
->daddr
, rt
, 0);
361 ip_select_ident_more(iph
, &rt
->u
.dst
, sk
,
362 (skb_shinfo(skb
)->gso_segs
?: 1) - 1);
364 /* Add an IP checksum. */
367 skb
->priority
= sk
->sk_priority
;
369 return NF_HOOK(PF_INET
, NF_IP_LOCAL_OUT
, skb
, NULL
, rt
->u
.dst
.dev
,
373 IP_INC_STATS(IPSTATS_MIB_OUTNOROUTES
);
375 return -EHOSTUNREACH
;
379 static void ip_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
381 to
->pkt_type
= from
->pkt_type
;
382 to
->priority
= from
->priority
;
383 to
->protocol
= from
->protocol
;
384 dst_release(to
->dst
);
385 to
->dst
= dst_clone(from
->dst
);
387 to
->mark
= from
->mark
;
389 /* Copy the flags to each fragment. */
390 IPCB(to
)->flags
= IPCB(from
)->flags
;
392 #ifdef CONFIG_NET_SCHED
393 to
->tc_index
= from
->tc_index
;
396 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
397 to
->ipvs_property
= from
->ipvs_property
;
399 skb_copy_secmark(to
, from
);
403 * This IP datagram is too large to be sent in one piece. Break it up into
404 * smaller pieces (each of size equal to IP header plus
405 * a block of the data of the original IP data part) that will yet fit in a
406 * single device frame, and queue such a frame for sending.
409 int ip_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*))
414 struct net_device
*dev
;
415 struct sk_buff
*skb2
;
416 unsigned int mtu
, hlen
, left
, len
, ll_rs
, pad
;
418 __be16 not_last_frag
;
419 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
425 * Point into the IP datagram header.
430 if (unlikely((iph
->frag_off
& htons(IP_DF
)) && !skb
->local_df
)) {
431 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
432 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
433 htonl(ip_skb_dst_mtu(skb
)));
439 * Setup starting values.
443 mtu
= dst_mtu(&rt
->u
.dst
) - hlen
; /* Size of data space */
444 IPCB(skb
)->flags
|= IPSKB_FRAG_COMPLETE
;
446 /* When frag_list is given, use it. First, check its validity:
447 * some transformers could create wrong frag_list or break existing
448 * one, it is not prohibited. In this case fall back to copying.
450 * LATER: this step can be merged to real generation of fragments,
451 * we can switch to copy when see the first bad fragment.
453 if (skb_shinfo(skb
)->frag_list
) {
454 struct sk_buff
*frag
;
455 int first_len
= skb_pagelen(skb
);
457 if (first_len
- hlen
> mtu
||
458 ((first_len
- hlen
) & 7) ||
459 (iph
->frag_off
& htons(IP_MF
|IP_OFFSET
)) ||
463 for (frag
= skb_shinfo(skb
)->frag_list
; frag
; frag
= frag
->next
) {
464 /* Correct geometry. */
465 if (frag
->len
> mtu
||
466 ((frag
->len
& 7) && frag
->next
) ||
467 skb_headroom(frag
) < hlen
)
470 /* Partially cloned skb? */
471 if (skb_shared(frag
))
478 frag
->destructor
= sock_wfree
;
479 skb
->truesize
-= frag
->truesize
;
483 /* Everything is OK. Generate! */
487 frag
= skb_shinfo(skb
)->frag_list
;
488 skb_shinfo(skb
)->frag_list
= NULL
;
489 skb
->data_len
= first_len
- skb_headlen(skb
);
490 skb
->len
= first_len
;
491 iph
->tot_len
= htons(first_len
);
492 iph
->frag_off
= htons(IP_MF
);
496 /* Prepare header of the next frame,
497 * before previous one went down. */
499 frag
->ip_summed
= CHECKSUM_NONE
;
500 skb_reset_transport_header(frag
);
501 __skb_push(frag
, hlen
);
502 skb_reset_network_header(frag
);
503 memcpy(skb_network_header(frag
), iph
, hlen
);
505 iph
->tot_len
= htons(frag
->len
);
506 ip_copy_metadata(frag
, skb
);
508 ip_options_fragment(frag
);
509 offset
+= skb
->len
- hlen
;
510 iph
->frag_off
= htons(offset
>>3);
511 if (frag
->next
!= NULL
)
512 iph
->frag_off
|= htons(IP_MF
);
513 /* Ready, complete checksum */
520 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES
);
530 IP_INC_STATS(IPSTATS_MIB_FRAGOKS
);
539 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
544 left
= skb
->len
- hlen
; /* Space per frame */
545 ptr
= raw
+ hlen
; /* Where to start from */
547 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
548 * we need to make room for the encapsulating header
550 pad
= nf_bridge_pad(skb
);
551 ll_rs
= LL_RESERVED_SPACE_EXTRA(rt
->u
.dst
.dev
, pad
);
555 * Fragment the datagram.
558 offset
= (ntohs(iph
->frag_off
) & IP_OFFSET
) << 3;
559 not_last_frag
= iph
->frag_off
& htons(IP_MF
);
562 * Keep copying data until we run out.
567 /* IF: it doesn't fit, use 'mtu' - the data space left */
570 /* IF: we are not sending upto and including the packet end
571 then align the next start on an eight byte boundary */
579 if ((skb2
= alloc_skb(len
+hlen
+ll_rs
, GFP_ATOMIC
)) == NULL
) {
580 NETDEBUG(KERN_INFO
"IP: frag: no memory for new fragment!\n");
586 * Set up data on packet
589 ip_copy_metadata(skb2
, skb
);
590 skb_reserve(skb2
, ll_rs
);
591 skb_put(skb2
, len
+ hlen
);
592 skb_reset_network_header(skb2
);
593 skb2
->transport_header
= skb2
->network_header
+ hlen
;
596 * Charge the memory for the fragment to any owner
601 skb_set_owner_w(skb2
, skb
->sk
);
604 * Copy the packet header into the new buffer.
607 skb_copy_from_linear_data(skb
, skb_network_header(skb2
), hlen
);
610 * Copy a block of the IP datagram.
612 if (skb_copy_bits(skb
, ptr
, skb_transport_header(skb2
), len
))
617 * Fill in the new header fields.
620 iph
->frag_off
= htons((offset
>> 3));
622 /* ANK: dirty, but effective trick. Upgrade options only if
623 * the segment to be fragmented was THE FIRST (otherwise,
624 * options are already fixed) and make it ONCE
625 * on the initial skb, so that all the following fragments
626 * will inherit fixed options.
629 ip_options_fragment(skb
);
632 * Added AC : If we are fragmenting a fragment that's not the
633 * last fragment then keep MF on each bit
635 if (left
> 0 || not_last_frag
)
636 iph
->frag_off
|= htons(IP_MF
);
641 * Put this fragment into the sending queue.
643 iph
->tot_len
= htons(len
+ hlen
);
651 IP_INC_STATS(IPSTATS_MIB_FRAGCREATES
);
654 IP_INC_STATS(IPSTATS_MIB_FRAGOKS
);
659 IP_INC_STATS(IPSTATS_MIB_FRAGFAILS
);
663 EXPORT_SYMBOL(ip_fragment
);
666 ip_generic_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
668 struct iovec
*iov
= from
;
670 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
671 if (memcpy_fromiovecend(to
, iov
, offset
, len
) < 0)
675 if (csum_partial_copy_fromiovecend(to
, iov
, offset
, len
, &csum
) < 0)
677 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
683 csum_page(struct page
*page
, int offset
, int copy
)
688 csum
= csum_partial(kaddr
+ offset
, copy
, 0);
693 static inline int ip_ufo_append_data(struct sock
*sk
,
694 int getfrag(void *from
, char *to
, int offset
, int len
,
695 int odd
, struct sk_buff
*skb
),
696 void *from
, int length
, int hh_len
, int fragheaderlen
,
697 int transhdrlen
, int mtu
,unsigned int flags
)
702 /* There is support for UDP fragmentation offload by network
703 * device, so create one single skb packet containing complete
706 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
) {
707 skb
= sock_alloc_send_skb(sk
,
708 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
709 (flags
& MSG_DONTWAIT
), &err
);
714 /* reserve space for Hardware header */
715 skb_reserve(skb
, hh_len
);
717 /* create space for UDP/IP header */
718 skb_put(skb
,fragheaderlen
+ transhdrlen
);
720 /* initialize network header pointer */
721 skb_reset_network_header(skb
);
723 /* initialize protocol header pointer */
724 skb
->transport_header
= skb
->network_header
+ fragheaderlen
;
726 skb
->ip_summed
= CHECKSUM_PARTIAL
;
728 sk
->sk_sndmsg_off
= 0;
731 err
= skb_append_datato_frags(sk
,skb
, getfrag
, from
,
732 (length
- transhdrlen
));
734 /* specify the length of each IP datagram fragment*/
735 skb_shinfo(skb
)->gso_size
= mtu
- fragheaderlen
;
736 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
737 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
741 /* There is not enough support do UFO ,
742 * so follow normal path
749 * ip_append_data() and ip_append_page() can make one large IP datagram
750 * from many pieces of data. Each pieces will be holded on the socket
751 * until ip_push_pending_frames() is called. Each piece can be a page
754 * Not only UDP, other transport protocols - e.g. raw sockets - can use
755 * this interface potentially.
757 * LATER: length must be adjusted by pad at tail, when it is required.
759 int ip_append_data(struct sock
*sk
,
760 int getfrag(void *from
, char *to
, int offset
, int len
,
761 int odd
, struct sk_buff
*skb
),
762 void *from
, int length
, int transhdrlen
,
763 struct ipcm_cookie
*ipc
, struct rtable
*rt
,
766 struct inet_sock
*inet
= inet_sk(sk
);
769 struct ip_options
*opt
= NULL
;
776 unsigned int maxfraglen
, fragheaderlen
;
777 int csummode
= CHECKSUM_NONE
;
782 if (skb_queue_empty(&sk
->sk_write_queue
)) {
788 if (inet
->cork
.opt
== NULL
) {
789 inet
->cork
.opt
= kmalloc(sizeof(struct ip_options
) + 40, sk
->sk_allocation
);
790 if (unlikely(inet
->cork
.opt
== NULL
))
793 memcpy(inet
->cork
.opt
, opt
, sizeof(struct ip_options
)+opt
->optlen
);
794 inet
->cork
.flags
|= IPCORK_OPT
;
795 inet
->cork
.addr
= ipc
->addr
;
797 dst_hold(&rt
->u
.dst
);
798 inet
->cork
.fragsize
= mtu
= inet
->pmtudisc
== IP_PMTUDISC_PROBE
?
800 dst_mtu(rt
->u
.dst
.path
);
802 inet
->cork
.length
= 0;
803 sk
->sk_sndmsg_page
= NULL
;
804 sk
->sk_sndmsg_off
= 0;
805 if ((exthdrlen
= rt
->u
.dst
.header_len
) != 0) {
807 transhdrlen
+= exthdrlen
;
811 if (inet
->cork
.flags
& IPCORK_OPT
)
812 opt
= inet
->cork
.opt
;
816 mtu
= inet
->cork
.fragsize
;
818 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
820 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
821 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
823 if (inet
->cork
.length
+ length
> 0xFFFF - fragheaderlen
) {
824 ip_local_error(sk
, EMSGSIZE
, rt
->rt_dst
, inet
->dport
, mtu
-exthdrlen
);
829 * transhdrlen > 0 means that this is the first fragment and we wish
830 * it won't be fragmented in the future.
833 length
+ fragheaderlen
<= mtu
&&
834 rt
->u
.dst
.dev
->features
& NETIF_F_ALL_CSUM
&&
836 csummode
= CHECKSUM_PARTIAL
;
838 inet
->cork
.length
+= length
;
839 if (((length
> mtu
) && (sk
->sk_protocol
== IPPROTO_UDP
)) &&
840 (rt
->u
.dst
.dev
->features
& NETIF_F_UFO
)) {
842 err
= ip_ufo_append_data(sk
, getfrag
, from
, length
, hh_len
,
843 fragheaderlen
, transhdrlen
, mtu
,
850 /* So, what's going on in the loop below?
852 * We use calculated fragment length to generate chained skb,
853 * each of segments is IP fragment ready for sending to network after
854 * adding appropriate IP header.
857 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
861 /* Check if the remaining data fits into current packet. */
862 copy
= mtu
- skb
->len
;
864 copy
= maxfraglen
- skb
->len
;
867 unsigned int datalen
;
868 unsigned int fraglen
;
869 unsigned int fraggap
;
870 unsigned int alloclen
;
871 struct sk_buff
*skb_prev
;
875 fraggap
= skb_prev
->len
- maxfraglen
;
880 * If remaining data exceeds the mtu,
881 * we know we need more fragment(s).
883 datalen
= length
+ fraggap
;
884 if (datalen
> mtu
- fragheaderlen
)
885 datalen
= maxfraglen
- fragheaderlen
;
886 fraglen
= datalen
+ fragheaderlen
;
888 if ((flags
& MSG_MORE
) &&
889 !(rt
->u
.dst
.dev
->features
&NETIF_F_SG
))
892 alloclen
= datalen
+ fragheaderlen
;
894 /* The last fragment gets additional space at tail.
895 * Note, with MSG_MORE we overallocate on fragments,
896 * because we have no idea what fragment will be
899 if (datalen
== length
+ fraggap
)
900 alloclen
+= rt
->u
.dst
.trailer_len
;
903 skb
= sock_alloc_send_skb(sk
,
904 alloclen
+ hh_len
+ 15,
905 (flags
& MSG_DONTWAIT
), &err
);
908 if (atomic_read(&sk
->sk_wmem_alloc
) <=
910 skb
= sock_wmalloc(sk
,
911 alloclen
+ hh_len
+ 15, 1,
913 if (unlikely(skb
== NULL
))
920 * Fill in the control structures
922 skb
->ip_summed
= csummode
;
924 skb_reserve(skb
, hh_len
);
927 * Find where to start putting bytes.
929 data
= skb_put(skb
, fraglen
);
930 skb_set_network_header(skb
, exthdrlen
);
931 skb
->transport_header
= (skb
->network_header
+
933 data
+= fragheaderlen
;
936 skb
->csum
= skb_copy_and_csum_bits(
937 skb_prev
, maxfraglen
,
938 data
+ transhdrlen
, fraggap
, 0);
939 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
942 pskb_trim_unique(skb_prev
, maxfraglen
);
945 copy
= datalen
- transhdrlen
- fraggap
;
946 if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
953 length
-= datalen
- fraggap
;
956 csummode
= CHECKSUM_NONE
;
959 * Put the packet on the pending queue.
961 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
968 if (!(rt
->u
.dst
.dev
->features
&NETIF_F_SG
)) {
972 if (getfrag(from
, skb_put(skb
, copy
),
973 offset
, copy
, off
, skb
) < 0) {
974 __skb_trim(skb
, off
);
979 int i
= skb_shinfo(skb
)->nr_frags
;
980 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
981 struct page
*page
= sk
->sk_sndmsg_page
;
982 int off
= sk
->sk_sndmsg_off
;
985 if (page
&& (left
= PAGE_SIZE
- off
) > 0) {
988 if (page
!= frag
->page
) {
989 if (i
== MAX_SKB_FRAGS
) {
994 skb_fill_page_desc(skb
, i
, page
, sk
->sk_sndmsg_off
, 0);
995 frag
= &skb_shinfo(skb
)->frags
[i
];
997 } else if (i
< MAX_SKB_FRAGS
) {
998 if (copy
> PAGE_SIZE
)
1000 page
= alloc_pages(sk
->sk_allocation
, 0);
1005 sk
->sk_sndmsg_page
= page
;
1006 sk
->sk_sndmsg_off
= 0;
1008 skb_fill_page_desc(skb
, i
, page
, 0, 0);
1009 frag
= &skb_shinfo(skb
)->frags
[i
];
1010 skb
->truesize
+= PAGE_SIZE
;
1011 atomic_add(PAGE_SIZE
, &sk
->sk_wmem_alloc
);
1016 if (getfrag(from
, page_address(frag
->page
)+frag
->page_offset
+frag
->size
, offset
, copy
, skb
->len
, skb
) < 0) {
1020 sk
->sk_sndmsg_off
+= copy
;
1023 skb
->data_len
+= copy
;
1032 inet
->cork
.length
-= length
;
1033 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1037 ssize_t
ip_append_page(struct sock
*sk
, struct page
*page
,
1038 int offset
, size_t size
, int flags
)
1040 struct inet_sock
*inet
= inet_sk(sk
);
1041 struct sk_buff
*skb
;
1043 struct ip_options
*opt
= NULL
;
1048 unsigned int maxfraglen
, fragheaderlen
, fraggap
;
1053 if (flags
&MSG_PROBE
)
1056 if (skb_queue_empty(&sk
->sk_write_queue
))
1060 if (inet
->cork
.flags
& IPCORK_OPT
)
1061 opt
= inet
->cork
.opt
;
1063 if (!(rt
->u
.dst
.dev
->features
&NETIF_F_SG
))
1066 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
1067 mtu
= inet
->cork
.fragsize
;
1069 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
1070 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
1072 if (inet
->cork
.length
+ size
> 0xFFFF - fragheaderlen
) {
1073 ip_local_error(sk
, EMSGSIZE
, rt
->rt_dst
, inet
->dport
, mtu
);
1077 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
1080 inet
->cork
.length
+= size
;
1081 if ((sk
->sk_protocol
== IPPROTO_UDP
) &&
1082 (rt
->u
.dst
.dev
->features
& NETIF_F_UFO
)) {
1083 skb_shinfo(skb
)->gso_size
= mtu
- fragheaderlen
;
1084 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1091 if (skb_is_gso(skb
))
1095 /* Check if the remaining data fits into current packet. */
1096 len
= mtu
- skb
->len
;
1098 len
= maxfraglen
- skb
->len
;
1101 struct sk_buff
*skb_prev
;
1105 fraggap
= skb_prev
->len
- maxfraglen
;
1107 alloclen
= fragheaderlen
+ hh_len
+ fraggap
+ 15;
1108 skb
= sock_wmalloc(sk
, alloclen
, 1, sk
->sk_allocation
);
1109 if (unlikely(!skb
)) {
1115 * Fill in the control structures
1117 skb
->ip_summed
= CHECKSUM_NONE
;
1119 skb_reserve(skb
, hh_len
);
1122 * Find where to start putting bytes.
1124 skb_put(skb
, fragheaderlen
+ fraggap
);
1125 skb_reset_network_header(skb
);
1126 skb
->transport_header
= (skb
->network_header
+
1129 skb
->csum
= skb_copy_and_csum_bits(skb_prev
,
1131 skb_transport_header(skb
),
1133 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1135 pskb_trim_unique(skb_prev
, maxfraglen
);
1139 * Put the packet on the pending queue.
1141 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1145 i
= skb_shinfo(skb
)->nr_frags
;
1148 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
1149 skb_shinfo(skb
)->frags
[i
-1].size
+= len
;
1150 } else if (i
< MAX_SKB_FRAGS
) {
1152 skb_fill_page_desc(skb
, i
, page
, offset
, len
);
1158 if (skb
->ip_summed
== CHECKSUM_NONE
) {
1160 csum
= csum_page(page
, offset
, len
);
1161 skb
->csum
= csum_block_add(skb
->csum
, csum
, skb
->len
);
1165 skb
->data_len
+= len
;
1172 inet
->cork
.length
-= size
;
1173 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1178 * Combined all pending IP fragments on the socket as one IP datagram
1179 * and push them out.
1181 int ip_push_pending_frames(struct sock
*sk
)
1183 struct sk_buff
*skb
, *tmp_skb
;
1184 struct sk_buff
**tail_skb
;
1185 struct inet_sock
*inet
= inet_sk(sk
);
1186 struct ip_options
*opt
= NULL
;
1187 struct rtable
*rt
= inet
->cork
.rt
;
1193 if ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) == NULL
)
1195 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1197 /* move skb->data to ip header from ext header */
1198 if (skb
->data
< skb_network_header(skb
))
1199 __skb_pull(skb
, skb_network_offset(skb
));
1200 while ((tmp_skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
1201 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1202 *tail_skb
= tmp_skb
;
1203 tail_skb
= &(tmp_skb
->next
);
1204 skb
->len
+= tmp_skb
->len
;
1205 skb
->data_len
+= tmp_skb
->len
;
1206 skb
->truesize
+= tmp_skb
->truesize
;
1207 __sock_put(tmp_skb
->sk
);
1208 tmp_skb
->destructor
= NULL
;
1212 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1213 * to fragment the frame generated here. No matter, what transforms
1214 * how transforms change size of the packet, it will come out.
1216 if (inet
->pmtudisc
< IP_PMTUDISC_DO
)
1219 /* DF bit is set when we want to see DF on outgoing frames.
1220 * If local_df is set too, we still allow to fragment this frame
1222 if (inet
->pmtudisc
>= IP_PMTUDISC_DO
||
1223 (skb
->len
<= dst_mtu(&rt
->u
.dst
) &&
1224 ip_dont_fragment(sk
, &rt
->u
.dst
)))
1227 if (inet
->cork
.flags
& IPCORK_OPT
)
1228 opt
= inet
->cork
.opt
;
1230 if (rt
->rt_type
== RTN_MULTICAST
)
1233 ttl
= ip_select_ttl(inet
, &rt
->u
.dst
);
1235 iph
= (struct iphdr
*)skb
->data
;
1239 iph
->ihl
+= opt
->optlen
>>2;
1240 ip_options_build(skb
, opt
, inet
->cork
.addr
, rt
, 0);
1242 iph
->tos
= inet
->tos
;
1243 iph
->tot_len
= htons(skb
->len
);
1245 ip_select_ident(iph
, &rt
->u
.dst
, sk
);
1247 iph
->protocol
= sk
->sk_protocol
;
1248 iph
->saddr
= rt
->rt_src
;
1249 iph
->daddr
= rt
->rt_dst
;
1252 skb
->priority
= sk
->sk_priority
;
1253 skb
->dst
= dst_clone(&rt
->u
.dst
);
1255 /* Netfilter gets whole the not fragmented skb. */
1256 err
= NF_HOOK(PF_INET
, NF_IP_LOCAL_OUT
, skb
, NULL
,
1257 skb
->dst
->dev
, dst_output
);
1260 err
= inet
->recverr
? net_xmit_errno(err
) : 0;
1266 inet
->cork
.flags
&= ~IPCORK_OPT
;
1267 kfree(inet
->cork
.opt
);
1268 inet
->cork
.opt
= NULL
;
1269 if (inet
->cork
.rt
) {
1270 ip_rt_put(inet
->cork
.rt
);
1271 inet
->cork
.rt
= NULL
;
1276 IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS
);
1281 * Throw away all pending data on the socket.
1283 void ip_flush_pending_frames(struct sock
*sk
)
1285 struct inet_sock
*inet
= inet_sk(sk
);
1286 struct sk_buff
*skb
;
1288 while ((skb
= __skb_dequeue_tail(&sk
->sk_write_queue
)) != NULL
)
1291 inet
->cork
.flags
&= ~IPCORK_OPT
;
1292 kfree(inet
->cork
.opt
);
1293 inet
->cork
.opt
= NULL
;
1294 if (inet
->cork
.rt
) {
1295 ip_rt_put(inet
->cork
.rt
);
1296 inet
->cork
.rt
= NULL
;
1302 * Fetch data from kernel space and fill in checksum if needed.
1304 static int ip_reply_glue_bits(void *dptr
, char *to
, int offset
,
1305 int len
, int odd
, struct sk_buff
*skb
)
1309 csum
= csum_partial_copy_nocheck(dptr
+offset
, to
, len
, 0);
1310 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
1315 * Generic function to send a packet as reply to another packet.
1316 * Used to send TCP resets so far. ICMP should use this function too.
1318 * Should run single threaded per socket because it uses the sock
1319 * structure to pass arguments.
1321 * LATER: switch from ip_build_xmit to ip_append_*
1323 void ip_send_reply(struct sock
*sk
, struct sk_buff
*skb
, struct ip_reply_arg
*arg
,
1326 struct inet_sock
*inet
= inet_sk(sk
);
1328 struct ip_options opt
;
1331 struct ipcm_cookie ipc
;
1333 struct rtable
*rt
= (struct rtable
*)skb
->dst
;
1335 if (ip_options_echo(&replyopts
.opt
, skb
))
1338 daddr
= ipc
.addr
= rt
->rt_src
;
1341 if (replyopts
.opt
.optlen
) {
1342 ipc
.opt
= &replyopts
.opt
;
1345 daddr
= replyopts
.opt
.faddr
;
1349 struct flowi fl
= { .nl_u
= { .ip4_u
=
1351 .saddr
= rt
->rt_spec_dst
,
1352 .tos
= RT_TOS(ip_hdr(skb
)->tos
) } },
1353 /* Not quite clean, but right. */
1355 { .sport
= tcp_hdr(skb
)->dest
,
1356 .dport
= tcp_hdr(skb
)->source
} },
1357 .proto
= sk
->sk_protocol
};
1358 security_skb_classify_flow(skb
, &fl
);
1359 if (ip_route_output_key(&rt
, &fl
))
1363 /* And let IP do all the hard work.
1365 This chunk is not reenterable, hence spinlock.
1366 Note that it uses the fact, that this function is called
1367 with locally disabled BH and that sk cannot be already spinlocked.
1370 inet
->tos
= ip_hdr(skb
)->tos
;
1371 sk
->sk_priority
= skb
->priority
;
1372 sk
->sk_protocol
= ip_hdr(skb
)->protocol
;
1373 ip_append_data(sk
, ip_reply_glue_bits
, arg
->iov
->iov_base
, len
, 0,
1374 &ipc
, rt
, MSG_DONTWAIT
);
1375 if ((skb
= skb_peek(&sk
->sk_write_queue
)) != NULL
) {
1376 if (arg
->csumoffset
>= 0)
1377 *((__sum16
*)skb_transport_header(skb
) +
1378 arg
->csumoffset
) = csum_fold(csum_add(skb
->csum
,
1380 skb
->ip_summed
= CHECKSUM_NONE
;
1381 ip_push_pending_frames(sk
);
1389 void __init
ip_init(void)
1394 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1395 igmp_mc_proc_init();
1399 EXPORT_SYMBOL(ip_generic_getfrag
);
1400 EXPORT_SYMBOL(ip_queue_xmit
);
1401 EXPORT_SYMBOL(ip_send_check
);