2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 * See ip_input.c for original log
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
23 * Bradford Johnson: Fix faulty handling of some frames when
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
34 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
37 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
42 * Hirokazu Takahashi: sendfile() on UDP works now.
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
54 #include <linux/slab.h>
56 #include <linux/socket.h>
57 #include <linux/sockios.h>
59 #include <linux/inet.h>
60 #include <linux/netdevice.h>
61 #include <linux/etherdevice.h>
62 #include <linux/proc_fs.h>
63 #include <linux/stat.h>
64 #include <linux/init.h>
68 #include <net/protocol.h>
69 #include <net/route.h>
71 #include <linux/skbuff.h>
75 #include <net/checksum.h>
76 #include <net/inetpeer.h>
77 #include <linux/igmp.h>
78 #include <linux/netfilter_ipv4.h>
79 #include <linux/netfilter_bridge.h>
80 #include <linux/mroute.h>
81 #include <linux/netlink.h>
82 #include <linux/tcp.h>
84 int sysctl_ip_default_ttl __read_mostly
= IPDEFTTL
;
86 /* Generate a checksum for an outgoing IP datagram. */
87 __inline__
void ip_send_check(struct iphdr
*iph
)
90 iph
->check
= ip_fast_csum((unsigned char *)iph
, iph
->ihl
);
93 int __ip_local_out(struct sk_buff
*skb
)
95 struct iphdr
*iph
= ip_hdr(skb
);
97 iph
->tot_len
= htons(skb
->len
);
99 return nf_hook(NFPROTO_IPV4
, NF_INET_LOCAL_OUT
, skb
, NULL
,
100 skb_dst(skb
)->dev
, dst_output
);
103 int ip_local_out(struct sk_buff
*skb
)
107 err
= __ip_local_out(skb
);
108 if (likely(err
== 1))
109 err
= dst_output(skb
);
113 EXPORT_SYMBOL_GPL(ip_local_out
);
115 /* dev_loopback_xmit for use with netfilter. */
116 static int ip_dev_loopback_xmit(struct sk_buff
*newskb
)
118 skb_reset_mac_header(newskb
);
119 __skb_pull(newskb
, skb_network_offset(newskb
));
120 newskb
->pkt_type
= PACKET_LOOPBACK
;
121 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
122 WARN_ON(!skb_dst(newskb
));
127 static inline int ip_select_ttl(struct inet_sock
*inet
, struct dst_entry
*dst
)
129 int ttl
= inet
->uc_ttl
;
132 ttl
= dst_metric(dst
, RTAX_HOPLIMIT
);
137 * Add an ip header to a skbuff and send it out.
140 int ip_build_and_send_pkt(struct sk_buff
*skb
, struct sock
*sk
,
141 __be32 saddr
, __be32 daddr
, struct ip_options
*opt
)
143 struct inet_sock
*inet
= inet_sk(sk
);
144 struct rtable
*rt
= skb_rtable(skb
);
147 /* Build the IP header. */
148 skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0));
149 skb_reset_network_header(skb
);
153 iph
->tos
= inet
->tos
;
154 if (ip_dont_fragment(sk
, &rt
->u
.dst
))
155 iph
->frag_off
= htons(IP_DF
);
158 iph
->ttl
= ip_select_ttl(inet
, &rt
->u
.dst
);
159 iph
->daddr
= rt
->rt_dst
;
160 iph
->saddr
= rt
->rt_src
;
161 iph
->protocol
= sk
->sk_protocol
;
162 ip_select_ident(iph
, &rt
->u
.dst
, sk
);
164 if (opt
&& opt
->optlen
) {
165 iph
->ihl
+= opt
->optlen
>>2;
166 ip_options_build(skb
, opt
, daddr
, rt
, 0);
169 skb
->priority
= sk
->sk_priority
;
170 skb
->mark
= sk
->sk_mark
;
173 return ip_local_out(skb
);
176 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt
);
178 static inline int ip_finish_output2(struct sk_buff
*skb
)
180 struct dst_entry
*dst
= skb_dst(skb
);
181 struct rtable
*rt
= (struct rtable
*)dst
;
182 struct net_device
*dev
= dst
->dev
;
183 unsigned int hh_len
= LL_RESERVED_SPACE(dev
);
185 if (rt
->rt_type
== RTN_MULTICAST
) {
186 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUTMCAST
, skb
->len
);
187 } else if (rt
->rt_type
== RTN_BROADCAST
)
188 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUTBCAST
, skb
->len
);
190 /* Be paranoid, rather than too clever. */
191 if (unlikely(skb_headroom(skb
) < hh_len
&& dev
->header_ops
)) {
192 struct sk_buff
*skb2
;
194 skb2
= skb_realloc_headroom(skb
, LL_RESERVED_SPACE(dev
));
200 skb_set_owner_w(skb2
, skb
->sk
);
206 return neigh_hh_output(dst
->hh
, skb
);
207 else if (dst
->neighbour
)
208 return dst
->neighbour
->output(skb
);
211 printk(KERN_DEBUG
"ip_finish_output2: No header cache and no neighbour!\n");
216 static inline int ip_skb_dst_mtu(struct sk_buff
*skb
)
218 struct inet_sock
*inet
= skb
->sk
? inet_sk(skb
->sk
) : NULL
;
220 return (inet
&& inet
->pmtudisc
== IP_PMTUDISC_PROBE
) ?
221 skb_dst(skb
)->dev
->mtu
: dst_mtu(skb_dst(skb
));
224 static int ip_finish_output(struct sk_buff
*skb
)
226 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
227 /* Policy lookup after SNAT yielded a new policy */
228 if (skb_dst(skb
)->xfrm
!= NULL
) {
229 IPCB(skb
)->flags
|= IPSKB_REROUTED
;
230 return dst_output(skb
);
233 if (skb
->len
> ip_skb_dst_mtu(skb
) && !skb_is_gso(skb
))
234 return ip_fragment(skb
, ip_finish_output2
);
236 return ip_finish_output2(skb
);
239 int ip_mc_output(struct sk_buff
*skb
)
241 struct sock
*sk
= skb
->sk
;
242 struct rtable
*rt
= skb_rtable(skb
);
243 struct net_device
*dev
= rt
->u
.dst
.dev
;
246 * If the indicated interface is up and running, send the packet.
248 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUT
, skb
->len
);
251 skb
->protocol
= htons(ETH_P_IP
);
254 * Multicasts are looped back for other local users
257 if (rt
->rt_flags
&RTCF_MULTICAST
) {
259 #ifdef CONFIG_IP_MROUTE
260 /* Small optimization: do not loopback not local frames,
261 which returned after forwarding; they will be dropped
262 by ip_mr_input in any case.
263 Note, that local frames are looped back to be delivered
266 This check is duplicated in ip_mr_input at the moment.
269 ((rt
->rt_flags
& RTCF_LOCAL
) ||
270 !(IPCB(skb
)->flags
& IPSKB_FORWARDED
))
273 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
275 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
,
276 newskb
, NULL
, newskb
->dev
,
277 ip_dev_loopback_xmit
);
280 /* Multicasts with ttl 0 must not go beyond the host */
282 if (ip_hdr(skb
)->ttl
== 0) {
288 if (rt
->rt_flags
&RTCF_BROADCAST
) {
289 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
291 NF_HOOK(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, newskb
,
292 NULL
, newskb
->dev
, ip_dev_loopback_xmit
);
295 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, skb
, NULL
,
296 skb
->dev
, ip_finish_output
,
297 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
300 int ip_output(struct sk_buff
*skb
)
302 struct net_device
*dev
= skb_dst(skb
)->dev
;
304 IP_UPD_PO_STATS(dev_net(dev
), IPSTATS_MIB_OUT
, skb
->len
);
307 skb
->protocol
= htons(ETH_P_IP
);
309 return NF_HOOK_COND(NFPROTO_IPV4
, NF_INET_POST_ROUTING
, skb
, NULL
, dev
,
311 !(IPCB(skb
)->flags
& IPSKB_REROUTED
));
314 int ip_queue_xmit(struct sk_buff
*skb
)
316 struct sock
*sk
= skb
->sk
;
317 struct inet_sock
*inet
= inet_sk(sk
);
318 struct ip_options
*opt
= inet
->opt
;
323 /* Skip all of this if the packet is already routed,
324 * f.e. by something like SCTP.
327 rt
= skb_rtable(skb
);
331 /* Make sure we can route this packet. */
332 rt
= (struct rtable
*)__sk_dst_check(sk
, 0);
336 /* Use correct destination address if we have options. */
337 daddr
= inet
->inet_daddr
;
342 struct flowi fl
= { .oif
= sk
->sk_bound_dev_if
,
346 .saddr
= inet
->inet_saddr
,
347 .tos
= RT_CONN_FLAGS(sk
) } },
348 .proto
= sk
->sk_protocol
,
349 .flags
= inet_sk_flowi_flags(sk
),
351 { .sport
= inet
->inet_sport
,
352 .dport
= inet
->inet_dport
} } };
354 /* If this fails, retransmit mechanism of transport layer will
355 * keep trying until route appears or the connection times
358 security_sk_classify_flow(sk
, &fl
);
359 if (ip_route_output_flow(sock_net(sk
), &rt
, &fl
, sk
, 0))
362 sk_setup_caps(sk
, &rt
->u
.dst
);
364 skb_dst_set_noref(skb
, &rt
->u
.dst
);
367 if (opt
&& opt
->is_strictroute
&& rt
->rt_dst
!= rt
->rt_gateway
)
370 /* OK, we know where to send it, allocate and build IP header. */
371 skb_push(skb
, sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0));
372 skb_reset_network_header(skb
);
374 *((__be16
*)iph
) = htons((4 << 12) | (5 << 8) | (inet
->tos
& 0xff));
375 if (ip_dont_fragment(sk
, &rt
->u
.dst
) && !skb
->local_df
)
376 iph
->frag_off
= htons(IP_DF
);
379 iph
->ttl
= ip_select_ttl(inet
, &rt
->u
.dst
);
380 iph
->protocol
= sk
->sk_protocol
;
381 iph
->saddr
= rt
->rt_src
;
382 iph
->daddr
= rt
->rt_dst
;
383 /* Transport layer set skb->h.foo itself. */
385 if (opt
&& opt
->optlen
) {
386 iph
->ihl
+= opt
->optlen
>> 2;
387 ip_options_build(skb
, opt
, inet
->inet_daddr
, rt
, 0);
390 ip_select_ident_more(iph
, &rt
->u
.dst
, sk
,
391 (skb_shinfo(skb
)->gso_segs
?: 1) - 1);
393 skb
->priority
= sk
->sk_priority
;
394 skb
->mark
= sk
->sk_mark
;
396 res
= ip_local_out(skb
);
402 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTNOROUTES
);
404 return -EHOSTUNREACH
;
408 static void ip_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
410 to
->pkt_type
= from
->pkt_type
;
411 to
->priority
= from
->priority
;
412 to
->protocol
= from
->protocol
;
414 skb_dst_set(to
, dst_clone(skb_dst(from
)));
416 to
->mark
= from
->mark
;
418 /* Copy the flags to each fragment. */
419 IPCB(to
)->flags
= IPCB(from
)->flags
;
421 #ifdef CONFIG_NET_SCHED
422 to
->tc_index
= from
->tc_index
;
425 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
426 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
427 to
->nf_trace
= from
->nf_trace
;
429 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
430 to
->ipvs_property
= from
->ipvs_property
;
432 skb_copy_secmark(to
, from
);
436 * This IP datagram is too large to be sent in one piece. Break it up into
437 * smaller pieces (each of size equal to IP header plus
438 * a block of the data of the original IP data part) that will yet fit in a
439 * single device frame, and queue such a frame for sending.
442 int ip_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*))
447 struct net_device
*dev
;
448 struct sk_buff
*skb2
;
449 unsigned int mtu
, hlen
, left
, len
, ll_rs
, pad
;
451 __be16 not_last_frag
;
452 struct rtable
*rt
= skb_rtable(skb
);
458 * Point into the IP datagram header.
463 if (unlikely((iph
->frag_off
& htons(IP_DF
)) && !skb
->local_df
)) {
464 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
465 icmp_send(skb
, ICMP_DEST_UNREACH
, ICMP_FRAG_NEEDED
,
466 htonl(ip_skb_dst_mtu(skb
)));
472 * Setup starting values.
476 mtu
= dst_mtu(&rt
->u
.dst
) - hlen
; /* Size of data space */
477 #ifdef CONFIG_BRIDGE_NETFILTER
479 mtu
-= nf_bridge_mtu_reduction(skb
);
481 IPCB(skb
)->flags
|= IPSKB_FRAG_COMPLETE
;
483 /* When frag_list is given, use it. First, check its validity:
484 * some transformers could create wrong frag_list or break existing
485 * one, it is not prohibited. In this case fall back to copying.
487 * LATER: this step can be merged to real generation of fragments,
488 * we can switch to copy when see the first bad fragment.
490 if (skb_has_frags(skb
)) {
491 struct sk_buff
*frag
, *frag2
;
492 int first_len
= skb_pagelen(skb
);
494 if (first_len
- hlen
> mtu
||
495 ((first_len
- hlen
) & 7) ||
496 (iph
->frag_off
& htons(IP_MF
|IP_OFFSET
)) ||
500 skb_walk_frags(skb
, frag
) {
501 /* Correct geometry. */
502 if (frag
->len
> mtu
||
503 ((frag
->len
& 7) && frag
->next
) ||
504 skb_headroom(frag
) < hlen
)
505 goto slow_path_clean
;
507 /* Partially cloned skb? */
508 if (skb_shared(frag
))
509 goto slow_path_clean
;
514 frag
->destructor
= sock_wfree
;
516 skb
->truesize
-= frag
->truesize
;
519 /* Everything is OK. Generate! */
523 frag
= skb_shinfo(skb
)->frag_list
;
524 skb_frag_list_init(skb
);
525 skb
->data_len
= first_len
- skb_headlen(skb
);
526 skb
->len
= first_len
;
527 iph
->tot_len
= htons(first_len
);
528 iph
->frag_off
= htons(IP_MF
);
532 /* Prepare header of the next frame,
533 * before previous one went down. */
535 frag
->ip_summed
= CHECKSUM_NONE
;
536 skb_reset_transport_header(frag
);
537 __skb_push(frag
, hlen
);
538 skb_reset_network_header(frag
);
539 memcpy(skb_network_header(frag
), iph
, hlen
);
541 iph
->tot_len
= htons(frag
->len
);
542 ip_copy_metadata(frag
, skb
);
544 ip_options_fragment(frag
);
545 offset
+= skb
->len
- hlen
;
546 iph
->frag_off
= htons(offset
>>3);
547 if (frag
->next
!= NULL
)
548 iph
->frag_off
|= htons(IP_MF
);
549 /* Ready, complete checksum */
556 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGCREATES
);
566 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGOKS
);
575 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
579 skb_walk_frags(skb
, frag2
) {
583 frag2
->destructor
= NULL
;
584 skb
->truesize
+= frag2
->truesize
;
589 left
= skb
->len
- hlen
; /* Space per frame */
590 ptr
= raw
+ hlen
; /* Where to start from */
592 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
593 * we need to make room for the encapsulating header
595 pad
= nf_bridge_pad(skb
);
596 ll_rs
= LL_RESERVED_SPACE_EXTRA(rt
->u
.dst
.dev
, pad
);
600 * Fragment the datagram.
603 offset
= (ntohs(iph
->frag_off
) & IP_OFFSET
) << 3;
604 not_last_frag
= iph
->frag_off
& htons(IP_MF
);
607 * Keep copying data until we run out.
612 /* IF: it doesn't fit, use 'mtu' - the data space left */
615 /* IF: we are not sending upto and including the packet end
616 then align the next start on an eight byte boundary */
624 if ((skb2
= alloc_skb(len
+hlen
+ll_rs
, GFP_ATOMIC
)) == NULL
) {
625 NETDEBUG(KERN_INFO
"IP: frag: no memory for new fragment!\n");
631 * Set up data on packet
634 ip_copy_metadata(skb2
, skb
);
635 skb_reserve(skb2
, ll_rs
);
636 skb_put(skb2
, len
+ hlen
);
637 skb_reset_network_header(skb2
);
638 skb2
->transport_header
= skb2
->network_header
+ hlen
;
641 * Charge the memory for the fragment to any owner
646 skb_set_owner_w(skb2
, skb
->sk
);
649 * Copy the packet header into the new buffer.
652 skb_copy_from_linear_data(skb
, skb_network_header(skb2
), hlen
);
655 * Copy a block of the IP datagram.
657 if (skb_copy_bits(skb
, ptr
, skb_transport_header(skb2
), len
))
662 * Fill in the new header fields.
665 iph
->frag_off
= htons((offset
>> 3));
667 /* ANK: dirty, but effective trick. Upgrade options only if
668 * the segment to be fragmented was THE FIRST (otherwise,
669 * options are already fixed) and make it ONCE
670 * on the initial skb, so that all the following fragments
671 * will inherit fixed options.
674 ip_options_fragment(skb
);
677 * Added AC : If we are fragmenting a fragment that's not the
678 * last fragment then keep MF on each bit
680 if (left
> 0 || not_last_frag
)
681 iph
->frag_off
|= htons(IP_MF
);
686 * Put this fragment into the sending queue.
688 iph
->tot_len
= htons(len
+ hlen
);
696 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGCREATES
);
699 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGOKS
);
704 IP_INC_STATS(dev_net(dev
), IPSTATS_MIB_FRAGFAILS
);
708 EXPORT_SYMBOL(ip_fragment
);
711 ip_generic_getfrag(void *from
, char *to
, int offset
, int len
, int odd
, struct sk_buff
*skb
)
713 struct iovec
*iov
= from
;
715 if (skb
->ip_summed
== CHECKSUM_PARTIAL
) {
716 if (memcpy_fromiovecend(to
, iov
, offset
, len
) < 0)
720 if (csum_partial_copy_fromiovecend(to
, iov
, offset
, len
, &csum
) < 0)
722 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
728 csum_page(struct page
*page
, int offset
, int copy
)
733 csum
= csum_partial(kaddr
+ offset
, copy
, 0);
738 static inline int ip_ufo_append_data(struct sock
*sk
,
739 int getfrag(void *from
, char *to
, int offset
, int len
,
740 int odd
, struct sk_buff
*skb
),
741 void *from
, int length
, int hh_len
, int fragheaderlen
,
742 int transhdrlen
, int mtu
, unsigned int flags
)
747 /* There is support for UDP fragmentation offload by network
748 * device, so create one single skb packet containing complete
751 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
) {
752 skb
= sock_alloc_send_skb(sk
,
753 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
754 (flags
& MSG_DONTWAIT
), &err
);
759 /* reserve space for Hardware header */
760 skb_reserve(skb
, hh_len
);
762 /* create space for UDP/IP header */
763 skb_put(skb
, fragheaderlen
+ transhdrlen
);
765 /* initialize network header pointer */
766 skb_reset_network_header(skb
);
768 /* initialize protocol header pointer */
769 skb
->transport_header
= skb
->network_header
+ fragheaderlen
;
771 skb
->ip_summed
= CHECKSUM_PARTIAL
;
773 sk
->sk_sndmsg_off
= 0;
775 /* specify the length of each IP datagram fragment */
776 skb_shinfo(skb
)->gso_size
= mtu
- fragheaderlen
;
777 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
778 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
781 return skb_append_datato_frags(sk
, skb
, getfrag
, from
,
782 (length
- transhdrlen
));
786 * ip_append_data() and ip_append_page() can make one large IP datagram
787 * from many pieces of data. Each pieces will be holded on the socket
788 * until ip_push_pending_frames() is called. Each piece can be a page
791 * Not only UDP, other transport protocols - e.g. raw sockets - can use
792 * this interface potentially.
794 * LATER: length must be adjusted by pad at tail, when it is required.
796 int ip_append_data(struct sock
*sk
,
797 int getfrag(void *from
, char *to
, int offset
, int len
,
798 int odd
, struct sk_buff
*skb
),
799 void *from
, int length
, int transhdrlen
,
800 struct ipcm_cookie
*ipc
, struct rtable
**rtp
,
803 struct inet_sock
*inet
= inet_sk(sk
);
806 struct ip_options
*opt
= NULL
;
813 unsigned int maxfraglen
, fragheaderlen
;
814 int csummode
= CHECKSUM_NONE
;
820 if (skb_queue_empty(&sk
->sk_write_queue
)) {
826 if (inet
->cork
.opt
== NULL
) {
827 inet
->cork
.opt
= kmalloc(sizeof(struct ip_options
) + 40, sk
->sk_allocation
);
828 if (unlikely(inet
->cork
.opt
== NULL
))
831 memcpy(inet
->cork
.opt
, opt
, sizeof(struct ip_options
)+opt
->optlen
);
832 inet
->cork
.flags
|= IPCORK_OPT
;
833 inet
->cork
.addr
= ipc
->addr
;
839 * We steal reference to this route, caller should not release it
842 inet
->cork
.fragsize
= mtu
= inet
->pmtudisc
== IP_PMTUDISC_PROBE
?
844 dst_mtu(rt
->u
.dst
.path
);
845 inet
->cork
.dst
= &rt
->u
.dst
;
846 inet
->cork
.length
= 0;
847 sk
->sk_sndmsg_page
= NULL
;
848 sk
->sk_sndmsg_off
= 0;
849 if ((exthdrlen
= rt
->u
.dst
.header_len
) != 0) {
851 transhdrlen
+= exthdrlen
;
854 rt
= (struct rtable
*)inet
->cork
.dst
;
855 if (inet
->cork
.flags
& IPCORK_OPT
)
856 opt
= inet
->cork
.opt
;
860 mtu
= inet
->cork
.fragsize
;
862 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
864 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
865 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
867 if (inet
->cork
.length
+ length
> 0xFFFF - fragheaderlen
) {
868 ip_local_error(sk
, EMSGSIZE
, rt
->rt_dst
, inet
->inet_dport
,
874 * transhdrlen > 0 means that this is the first fragment and we wish
875 * it won't be fragmented in the future.
878 length
+ fragheaderlen
<= mtu
&&
879 rt
->u
.dst
.dev
->features
& NETIF_F_V4_CSUM
&&
881 csummode
= CHECKSUM_PARTIAL
;
883 skb
= skb_peek_tail(&sk
->sk_write_queue
);
885 inet
->cork
.length
+= length
;
886 if (((length
> mtu
) || (skb
&& skb_is_gso(skb
))) &&
887 (sk
->sk_protocol
== IPPROTO_UDP
) &&
888 (rt
->u
.dst
.dev
->features
& NETIF_F_UFO
)) {
889 err
= ip_ufo_append_data(sk
, getfrag
, from
, length
, hh_len
,
890 fragheaderlen
, transhdrlen
, mtu
,
897 /* So, what's going on in the loop below?
899 * We use calculated fragment length to generate chained skb,
900 * each of segments is IP fragment ready for sending to network after
901 * adding appropriate IP header.
908 /* Check if the remaining data fits into current packet. */
909 copy
= mtu
- skb
->len
;
911 copy
= maxfraglen
- skb
->len
;
914 unsigned int datalen
;
915 unsigned int fraglen
;
916 unsigned int fraggap
;
917 unsigned int alloclen
;
918 struct sk_buff
*skb_prev
;
922 fraggap
= skb_prev
->len
- maxfraglen
;
927 * If remaining data exceeds the mtu,
928 * we know we need more fragment(s).
930 datalen
= length
+ fraggap
;
931 if (datalen
> mtu
- fragheaderlen
)
932 datalen
= maxfraglen
- fragheaderlen
;
933 fraglen
= datalen
+ fragheaderlen
;
935 if ((flags
& MSG_MORE
) &&
936 !(rt
->u
.dst
.dev
->features
&NETIF_F_SG
))
939 alloclen
= datalen
+ fragheaderlen
;
941 /* The last fragment gets additional space at tail.
942 * Note, with MSG_MORE we overallocate on fragments,
943 * because we have no idea what fragment will be
946 if (datalen
== length
+ fraggap
)
947 alloclen
+= rt
->u
.dst
.trailer_len
;
950 skb
= sock_alloc_send_skb(sk
,
951 alloclen
+ hh_len
+ 15,
952 (flags
& MSG_DONTWAIT
), &err
);
955 if (atomic_read(&sk
->sk_wmem_alloc
) <=
957 skb
= sock_wmalloc(sk
,
958 alloclen
+ hh_len
+ 15, 1,
960 if (unlikely(skb
== NULL
))
963 /* only the initial fragment is
971 * Fill in the control structures
973 skb
->ip_summed
= csummode
;
975 skb_reserve(skb
, hh_len
);
976 *skb_tx(skb
) = ipc
->shtx
;
979 * Find where to start putting bytes.
981 data
= skb_put(skb
, fraglen
);
982 skb_set_network_header(skb
, exthdrlen
);
983 skb
->transport_header
= (skb
->network_header
+
985 data
+= fragheaderlen
;
988 skb
->csum
= skb_copy_and_csum_bits(
989 skb_prev
, maxfraglen
,
990 data
+ transhdrlen
, fraggap
, 0);
991 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
994 pskb_trim_unique(skb_prev
, maxfraglen
);
997 copy
= datalen
- transhdrlen
- fraggap
;
998 if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1005 length
-= datalen
- fraggap
;
1008 csummode
= CHECKSUM_NONE
;
1011 * Put the packet on the pending queue.
1013 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1020 if (!(rt
->u
.dst
.dev
->features
&NETIF_F_SG
)) {
1024 if (getfrag(from
, skb_put(skb
, copy
),
1025 offset
, copy
, off
, skb
) < 0) {
1026 __skb_trim(skb
, off
);
1031 int i
= skb_shinfo(skb
)->nr_frags
;
1032 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
1033 struct page
*page
= sk
->sk_sndmsg_page
;
1034 int off
= sk
->sk_sndmsg_off
;
1037 if (page
&& (left
= PAGE_SIZE
- off
) > 0) {
1040 if (page
!= frag
->page
) {
1041 if (i
== MAX_SKB_FRAGS
) {
1046 skb_fill_page_desc(skb
, i
, page
, sk
->sk_sndmsg_off
, 0);
1047 frag
= &skb_shinfo(skb
)->frags
[i
];
1049 } else if (i
< MAX_SKB_FRAGS
) {
1050 if (copy
> PAGE_SIZE
)
1052 page
= alloc_pages(sk
->sk_allocation
, 0);
1057 sk
->sk_sndmsg_page
= page
;
1058 sk
->sk_sndmsg_off
= 0;
1060 skb_fill_page_desc(skb
, i
, page
, 0, 0);
1061 frag
= &skb_shinfo(skb
)->frags
[i
];
1066 if (getfrag(from
, page_address(frag
->page
)+frag
->page_offset
+frag
->size
, offset
, copy
, skb
->len
, skb
) < 0) {
1070 sk
->sk_sndmsg_off
+= copy
;
1073 skb
->data_len
+= copy
;
1074 skb
->truesize
+= copy
;
1075 atomic_add(copy
, &sk
->sk_wmem_alloc
);
1084 inet
->cork
.length
-= length
;
1085 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1089 ssize_t
ip_append_page(struct sock
*sk
, struct page
*page
,
1090 int offset
, size_t size
, int flags
)
1092 struct inet_sock
*inet
= inet_sk(sk
);
1093 struct sk_buff
*skb
;
1095 struct ip_options
*opt
= NULL
;
1100 unsigned int maxfraglen
, fragheaderlen
, fraggap
;
1105 if (flags
&MSG_PROBE
)
1108 if (skb_queue_empty(&sk
->sk_write_queue
))
1111 rt
= (struct rtable
*)inet
->cork
.dst
;
1112 if (inet
->cork
.flags
& IPCORK_OPT
)
1113 opt
= inet
->cork
.opt
;
1115 if (!(rt
->u
.dst
.dev
->features
&NETIF_F_SG
))
1118 hh_len
= LL_RESERVED_SPACE(rt
->u
.dst
.dev
);
1119 mtu
= inet
->cork
.fragsize
;
1121 fragheaderlen
= sizeof(struct iphdr
) + (opt
? opt
->optlen
: 0);
1122 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
;
1124 if (inet
->cork
.length
+ size
> 0xFFFF - fragheaderlen
) {
1125 ip_local_error(sk
, EMSGSIZE
, rt
->rt_dst
, inet
->inet_dport
, mtu
);
1129 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
1132 inet
->cork
.length
+= size
;
1133 if ((size
+ skb
->len
> mtu
) &&
1134 (sk
->sk_protocol
== IPPROTO_UDP
) &&
1135 (rt
->u
.dst
.dev
->features
& NETIF_F_UFO
)) {
1136 skb_shinfo(skb
)->gso_size
= mtu
- fragheaderlen
;
1137 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1144 if (skb_is_gso(skb
))
1148 /* Check if the remaining data fits into current packet. */
1149 len
= mtu
- skb
->len
;
1151 len
= maxfraglen
- skb
->len
;
1154 struct sk_buff
*skb_prev
;
1158 fraggap
= skb_prev
->len
- maxfraglen
;
1160 alloclen
= fragheaderlen
+ hh_len
+ fraggap
+ 15;
1161 skb
= sock_wmalloc(sk
, alloclen
, 1, sk
->sk_allocation
);
1162 if (unlikely(!skb
)) {
1168 * Fill in the control structures
1170 skb
->ip_summed
= CHECKSUM_NONE
;
1172 skb_reserve(skb
, hh_len
);
1175 * Find where to start putting bytes.
1177 skb_put(skb
, fragheaderlen
+ fraggap
);
1178 skb_reset_network_header(skb
);
1179 skb
->transport_header
= (skb
->network_header
+
1182 skb
->csum
= skb_copy_and_csum_bits(skb_prev
,
1184 skb_transport_header(skb
),
1186 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1188 pskb_trim_unique(skb_prev
, maxfraglen
);
1192 * Put the packet on the pending queue.
1194 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1198 i
= skb_shinfo(skb
)->nr_frags
;
1201 if (skb_can_coalesce(skb
, i
, page
, offset
)) {
1202 skb_shinfo(skb
)->frags
[i
-1].size
+= len
;
1203 } else if (i
< MAX_SKB_FRAGS
) {
1205 skb_fill_page_desc(skb
, i
, page
, offset
, len
);
1211 if (skb
->ip_summed
== CHECKSUM_NONE
) {
1213 csum
= csum_page(page
, offset
, len
);
1214 skb
->csum
= csum_block_add(skb
->csum
, csum
, skb
->len
);
1218 skb
->data_len
+= len
;
1219 skb
->truesize
+= len
;
1220 atomic_add(len
, &sk
->sk_wmem_alloc
);
1227 inet
->cork
.length
-= size
;
1228 IP_INC_STATS(sock_net(sk
), IPSTATS_MIB_OUTDISCARDS
);
1232 static void ip_cork_release(struct inet_sock
*inet
)
1234 inet
->cork
.flags
&= ~IPCORK_OPT
;
1235 kfree(inet
->cork
.opt
);
1236 inet
->cork
.opt
= NULL
;
1237 dst_release(inet
->cork
.dst
);
1238 inet
->cork
.dst
= NULL
;
1242 * Combined all pending IP fragments on the socket as one IP datagram
1243 * and push them out.
1245 int ip_push_pending_frames(struct sock
*sk
)
1247 struct sk_buff
*skb
, *tmp_skb
;
1248 struct sk_buff
**tail_skb
;
1249 struct inet_sock
*inet
= inet_sk(sk
);
1250 struct net
*net
= sock_net(sk
);
1251 struct ip_options
*opt
= NULL
;
1252 struct rtable
*rt
= (struct rtable
*)inet
->cork
.dst
;
1258 if ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) == NULL
)
1260 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1262 /* move skb->data to ip header from ext header */
1263 if (skb
->data
< skb_network_header(skb
))
1264 __skb_pull(skb
, skb_network_offset(skb
));
1265 while ((tmp_skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
1266 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1267 *tail_skb
= tmp_skb
;
1268 tail_skb
= &(tmp_skb
->next
);
1269 skb
->len
+= tmp_skb
->len
;
1270 skb
->data_len
+= tmp_skb
->len
;
1271 skb
->truesize
+= tmp_skb
->truesize
;
1272 tmp_skb
->destructor
= NULL
;
1276 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1277 * to fragment the frame generated here. No matter, what transforms
1278 * how transforms change size of the packet, it will come out.
1280 if (inet
->pmtudisc
< IP_PMTUDISC_DO
)
1283 /* DF bit is set when we want to see DF on outgoing frames.
1284 * If local_df is set too, we still allow to fragment this frame
1286 if (inet
->pmtudisc
>= IP_PMTUDISC_DO
||
1287 (skb
->len
<= dst_mtu(&rt
->u
.dst
) &&
1288 ip_dont_fragment(sk
, &rt
->u
.dst
)))
1291 if (inet
->cork
.flags
& IPCORK_OPT
)
1292 opt
= inet
->cork
.opt
;
1294 if (rt
->rt_type
== RTN_MULTICAST
)
1297 ttl
= ip_select_ttl(inet
, &rt
->u
.dst
);
1299 iph
= (struct iphdr
*)skb
->data
;
1303 iph
->ihl
+= opt
->optlen
>>2;
1304 ip_options_build(skb
, opt
, inet
->cork
.addr
, rt
, 0);
1306 iph
->tos
= inet
->tos
;
1308 ip_select_ident(iph
, &rt
->u
.dst
, sk
);
1310 iph
->protocol
= sk
->sk_protocol
;
1311 iph
->saddr
= rt
->rt_src
;
1312 iph
->daddr
= rt
->rt_dst
;
1314 skb
->priority
= sk
->sk_priority
;
1315 skb
->mark
= sk
->sk_mark
;
1317 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1320 inet
->cork
.dst
= NULL
;
1321 skb_dst_set(skb
, &rt
->u
.dst
);
1323 if (iph
->protocol
== IPPROTO_ICMP
)
1324 icmp_out_count(net
, ((struct icmphdr
*)
1325 skb_transport_header(skb
))->type
);
1327 /* Netfilter gets whole the not fragmented skb. */
1328 err
= ip_local_out(skb
);
1331 err
= net_xmit_errno(err
);
1337 ip_cork_release(inet
);
1341 IP_INC_STATS(net
, IPSTATS_MIB_OUTDISCARDS
);
1346 * Throw away all pending data on the socket.
1348 void ip_flush_pending_frames(struct sock
*sk
)
1350 struct sk_buff
*skb
;
1352 while ((skb
= __skb_dequeue_tail(&sk
->sk_write_queue
)) != NULL
)
1355 ip_cork_release(inet_sk(sk
));
1360 * Fetch data from kernel space and fill in checksum if needed.
1362 static int ip_reply_glue_bits(void *dptr
, char *to
, int offset
,
1363 int len
, int odd
, struct sk_buff
*skb
)
1367 csum
= csum_partial_copy_nocheck(dptr
+offset
, to
, len
, 0);
1368 skb
->csum
= csum_block_add(skb
->csum
, csum
, odd
);
1373 * Generic function to send a packet as reply to another packet.
1374 * Used to send TCP resets so far. ICMP should use this function too.
1376 * Should run single threaded per socket because it uses the sock
1377 * structure to pass arguments.
1379 void ip_send_reply(struct sock
*sk
, struct sk_buff
*skb
, struct ip_reply_arg
*arg
,
1382 struct inet_sock
*inet
= inet_sk(sk
);
1384 struct ip_options opt
;
1387 struct ipcm_cookie ipc
;
1389 struct rtable
*rt
= skb_rtable(skb
);
1391 if (ip_options_echo(&replyopts
.opt
, skb
))
1394 daddr
= ipc
.addr
= rt
->rt_src
;
1398 if (replyopts
.opt
.optlen
) {
1399 ipc
.opt
= &replyopts
.opt
;
1402 daddr
= replyopts
.opt
.faddr
;
1406 struct flowi fl
= { .oif
= arg
->bound_dev_if
,
1409 .saddr
= rt
->rt_spec_dst
,
1410 .tos
= RT_TOS(ip_hdr(skb
)->tos
) } },
1411 /* Not quite clean, but right. */
1413 { .sport
= tcp_hdr(skb
)->dest
,
1414 .dport
= tcp_hdr(skb
)->source
} },
1415 .proto
= sk
->sk_protocol
,
1416 .flags
= ip_reply_arg_flowi_flags(arg
) };
1417 security_skb_classify_flow(skb
, &fl
);
1418 if (ip_route_output_key(sock_net(sk
), &rt
, &fl
))
1422 /* And let IP do all the hard work.
1424 This chunk is not reenterable, hence spinlock.
1425 Note that it uses the fact, that this function is called
1426 with locally disabled BH and that sk cannot be already spinlocked.
1429 inet
->tos
= ip_hdr(skb
)->tos
;
1430 sk
->sk_priority
= skb
->priority
;
1431 sk
->sk_protocol
= ip_hdr(skb
)->protocol
;
1432 sk
->sk_bound_dev_if
= arg
->bound_dev_if
;
1433 ip_append_data(sk
, ip_reply_glue_bits
, arg
->iov
->iov_base
, len
, 0,
1434 &ipc
, &rt
, MSG_DONTWAIT
);
1435 if ((skb
= skb_peek(&sk
->sk_write_queue
)) != NULL
) {
1436 if (arg
->csumoffset
>= 0)
1437 *((__sum16
*)skb_transport_header(skb
) +
1438 arg
->csumoffset
) = csum_fold(csum_add(skb
->csum
,
1440 skb
->ip_summed
= CHECKSUM_NONE
;
1441 ip_push_pending_frames(sk
);
1449 void __init
ip_init(void)
1454 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1455 igmp_mc_proc_init();
1459 EXPORT_SYMBOL(ip_generic_getfrag
);
1460 EXPORT_SYMBOL(ip_queue_xmit
);
1461 EXPORT_SYMBOL(ip_send_check
);