char: Mark /dev/zero and /dev/kmem as not capable of writeback
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / ipv4 / ip_output.c
blobb29672117493339efbc8143c3c6d4f7193939156
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Authors: Ross Biro
9 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
10 * Donald Becker, <becker@super.org>
11 * Alan Cox, <Alan.Cox@linux.org>
12 * Richard Underwood
13 * Stefan Becker, <stefanb@yello.ping.de>
14 * Jorge Cwik, <jorge@laser.satlink.net>
15 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
16 * Hirokazu Takahashi, <taka@valinux.co.jp>
18 * See ip_input.c for original log
20 * Fixes:
21 * Alan Cox : Missing nonblock feature in ip_build_xmit.
22 * Mike Kilburn : htons() missing in ip_build_xmit.
23 * Bradford Johnson: Fix faulty handling of some frames when
24 * no route is found.
25 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
26 * (in case if packet not accepted by
27 * output firewall rules)
28 * Mike McLagan : Routing by source
29 * Alexey Kuznetsov: use new route cache
30 * Andi Kleen: Fix broken PMTU recovery and remove
31 * some redundant tests.
32 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
33 * Andi Kleen : Replace ip_reply with ip_send_reply.
34 * Andi Kleen : Split fast and slow ip_build_xmit path
35 * for decreased register pressure on x86
36 * and more readibility.
37 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
38 * silently drop skb instead of failing with -EPERM.
39 * Detlev Wengorz : Copy protocol for fragments.
40 * Hirokazu Takahashi: HW checksumming for outgoing UDP
41 * datagrams.
42 * Hirokazu Takahashi: sendfile() on UDP works now.
45 #include <asm/uaccess.h>
46 #include <asm/system.h>
47 #include <linux/module.h>
48 #include <linux/types.h>
49 #include <linux/kernel.h>
50 #include <linux/mm.h>
51 #include <linux/string.h>
52 #include <linux/errno.h>
53 #include <linux/highmem.h>
55 #include <linux/socket.h>
56 #include <linux/sockios.h>
57 #include <linux/in.h>
58 #include <linux/inet.h>
59 #include <linux/netdevice.h>
60 #include <linux/etherdevice.h>
61 #include <linux/proc_fs.h>
62 #include <linux/stat.h>
63 #include <linux/init.h>
65 #include <net/snmp.h>
66 #include <net/ip.h>
67 #include <net/protocol.h>
68 #include <net/route.h>
69 #include <net/xfrm.h>
70 #include <linux/skbuff.h>
71 #include <net/sock.h>
72 #include <net/arp.h>
73 #include <net/icmp.h>
74 #include <net/checksum.h>
75 #include <net/inetpeer.h>
76 #include <linux/igmp.h>
77 #include <linux/netfilter_ipv4.h>
78 #include <linux/netfilter_bridge.h>
79 #include <linux/mroute.h>
80 #include <linux/netlink.h>
81 #include <linux/tcp.h>
83 int sysctl_ip_default_ttl __read_mostly = IPDEFTTL;
85 /* Generate a checksum for an outgoing IP datagram. */
86 __inline__ void ip_send_check(struct iphdr *iph)
88 iph->check = 0;
89 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
92 int __ip_local_out(struct sk_buff *skb)
94 struct iphdr *iph = ip_hdr(skb);
96 iph->tot_len = htons(skb->len);
97 ip_send_check(iph);
98 return nf_hook(PF_INET, NF_INET_LOCAL_OUT, skb, NULL, skb_dst(skb)->dev,
99 dst_output);
102 int ip_local_out(struct sk_buff *skb)
104 int err;
106 err = __ip_local_out(skb);
107 if (likely(err == 1))
108 err = dst_output(skb);
110 return err;
112 EXPORT_SYMBOL_GPL(ip_local_out);
114 /* dev_loopback_xmit for use with netfilter. */
115 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
117 skb_reset_mac_header(newskb);
118 __skb_pull(newskb, skb_network_offset(newskb));
119 newskb->pkt_type = PACKET_LOOPBACK;
120 newskb->ip_summed = CHECKSUM_UNNECESSARY;
121 WARN_ON(!skb_dst(newskb));
122 netif_rx(newskb);
123 return 0;
126 static inline int ip_select_ttl(struct inet_sock *inet, struct dst_entry *dst)
128 int ttl = inet->uc_ttl;
130 if (ttl < 0)
131 ttl = dst_metric(dst, RTAX_HOPLIMIT);
132 return ttl;
136 * Add an ip header to a skbuff and send it out.
139 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
140 __be32 saddr, __be32 daddr, struct ip_options *opt)
142 struct inet_sock *inet = inet_sk(sk);
143 struct rtable *rt = skb_rtable(skb);
144 struct iphdr *iph;
146 /* Build the IP header. */
147 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
148 skb_reset_network_header(skb);
149 iph = ip_hdr(skb);
150 iph->version = 4;
151 iph->ihl = 5;
152 iph->tos = inet->tos;
153 if (ip_dont_fragment(sk, &rt->u.dst))
154 iph->frag_off = htons(IP_DF);
155 else
156 iph->frag_off = 0;
157 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
158 iph->daddr = rt->rt_dst;
159 iph->saddr = rt->rt_src;
160 iph->protocol = sk->sk_protocol;
161 ip_select_ident(iph, &rt->u.dst, sk);
163 if (opt && opt->optlen) {
164 iph->ihl += opt->optlen>>2;
165 ip_options_build(skb, opt, daddr, rt, 0);
168 skb->priority = sk->sk_priority;
169 skb->mark = sk->sk_mark;
171 /* Send it out. */
172 return ip_local_out(skb);
175 EXPORT_SYMBOL_GPL(ip_build_and_send_pkt);
177 static inline int ip_finish_output2(struct sk_buff *skb)
179 struct dst_entry *dst = skb_dst(skb);
180 struct rtable *rt = (struct rtable *)dst;
181 struct net_device *dev = dst->dev;
182 unsigned int hh_len = LL_RESERVED_SPACE(dev);
184 if (rt->rt_type == RTN_MULTICAST) {
185 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
186 } else if (rt->rt_type == RTN_BROADCAST)
187 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
189 /* Be paranoid, rather than too clever. */
190 if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
191 struct sk_buff *skb2;
193 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
194 if (skb2 == NULL) {
195 kfree_skb(skb);
196 return -ENOMEM;
198 if (skb->sk)
199 skb_set_owner_w(skb2, skb->sk);
200 kfree_skb(skb);
201 skb = skb2;
204 if (dst->hh)
205 return neigh_hh_output(dst->hh, skb);
206 else if (dst->neighbour)
207 return dst->neighbour->output(skb);
209 if (net_ratelimit())
210 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
211 kfree_skb(skb);
212 return -EINVAL;
215 static inline int ip_skb_dst_mtu(struct sk_buff *skb)
217 struct inet_sock *inet = skb->sk ? inet_sk(skb->sk) : NULL;
219 return (inet && inet->pmtudisc == IP_PMTUDISC_PROBE) ?
220 skb_dst(skb)->dev->mtu : dst_mtu(skb_dst(skb));
223 static int ip_finish_output(struct sk_buff *skb)
225 #if defined(CONFIG_NETFILTER) && defined(CONFIG_XFRM)
226 /* Policy lookup after SNAT yielded a new policy */
227 if (skb_dst(skb)->xfrm != NULL) {
228 IPCB(skb)->flags |= IPSKB_REROUTED;
229 return dst_output(skb);
231 #endif
232 if (skb->len > ip_skb_dst_mtu(skb) && !skb_is_gso(skb))
233 return ip_fragment(skb, ip_finish_output2);
234 else
235 return ip_finish_output2(skb);
238 int ip_mc_output(struct sk_buff *skb)
240 struct sock *sk = skb->sk;
241 struct rtable *rt = skb_rtable(skb);
242 struct net_device *dev = rt->u.dst.dev;
245 * If the indicated interface is up and running, send the packet.
247 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
249 skb->dev = dev;
250 skb->protocol = htons(ETH_P_IP);
253 * Multicasts are looped back for other local users
256 if (rt->rt_flags&RTCF_MULTICAST) {
257 if (sk_mc_loop(sk)
258 #ifdef CONFIG_IP_MROUTE
259 /* Small optimization: do not loopback not local frames,
260 which returned after forwarding; they will be dropped
261 by ip_mr_input in any case.
262 Note, that local frames are looped back to be delivered
263 to local recipients.
265 This check is duplicated in ip_mr_input at the moment.
268 ((rt->rt_flags & RTCF_LOCAL) ||
269 !(IPCB(skb)->flags & IPSKB_FORWARDED))
270 #endif
272 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
273 if (newskb)
274 NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb,
275 NULL, newskb->dev,
276 ip_dev_loopback_xmit);
279 /* Multicasts with ttl 0 must not go beyond the host */
281 if (ip_hdr(skb)->ttl == 0) {
282 kfree_skb(skb);
283 return 0;
287 if (rt->rt_flags&RTCF_BROADCAST) {
288 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
289 if (newskb)
290 NF_HOOK(PF_INET, NF_INET_POST_ROUTING, newskb, NULL,
291 newskb->dev, ip_dev_loopback_xmit);
294 return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, skb->dev,
295 ip_finish_output,
296 !(IPCB(skb)->flags & IPSKB_REROUTED));
299 int ip_output(struct sk_buff *skb)
301 struct net_device *dev = skb_dst(skb)->dev;
303 IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUT, skb->len);
305 skb->dev = dev;
306 skb->protocol = htons(ETH_P_IP);
308 return NF_HOOK_COND(PF_INET, NF_INET_POST_ROUTING, skb, NULL, dev,
309 ip_finish_output,
310 !(IPCB(skb)->flags & IPSKB_REROUTED));
313 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
315 struct sock *sk = skb->sk;
316 struct inet_sock *inet = inet_sk(sk);
317 struct ip_options *opt = inet->opt;
318 struct rtable *rt;
319 struct iphdr *iph;
321 /* Skip all of this if the packet is already routed,
322 * f.e. by something like SCTP.
324 rt = skb_rtable(skb);
325 if (rt != NULL)
326 goto packet_routed;
328 /* Make sure we can route this packet. */
329 rt = (struct rtable *)__sk_dst_check(sk, 0);
330 if (rt == NULL) {
331 __be32 daddr;
333 /* Use correct destination address if we have options. */
334 daddr = inet->inet_daddr;
335 if(opt && opt->srr)
336 daddr = opt->faddr;
339 struct flowi fl = { .oif = sk->sk_bound_dev_if,
340 .mark = sk->sk_mark,
341 .nl_u = { .ip4_u =
342 { .daddr = daddr,
343 .saddr = inet->inet_saddr,
344 .tos = RT_CONN_FLAGS(sk) } },
345 .proto = sk->sk_protocol,
346 .flags = inet_sk_flowi_flags(sk),
347 .uli_u = { .ports =
348 { .sport = inet->inet_sport,
349 .dport = inet->inet_dport } } };
351 /* If this fails, retransmit mechanism of transport layer will
352 * keep trying until route appears or the connection times
353 * itself out.
355 security_sk_classify_flow(sk, &fl);
356 if (ip_route_output_flow(sock_net(sk), &rt, &fl, sk, 0))
357 goto no_route;
359 sk_setup_caps(sk, &rt->u.dst);
361 skb_dst_set(skb, dst_clone(&rt->u.dst));
363 packet_routed:
364 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
365 goto no_route;
367 /* OK, we know where to send it, allocate and build IP header. */
368 skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
369 skb_reset_network_header(skb);
370 iph = ip_hdr(skb);
371 *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
372 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
373 iph->frag_off = htons(IP_DF);
374 else
375 iph->frag_off = 0;
376 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
377 iph->protocol = sk->sk_protocol;
378 iph->saddr = rt->rt_src;
379 iph->daddr = rt->rt_dst;
380 /* Transport layer set skb->h.foo itself. */
382 if (opt && opt->optlen) {
383 iph->ihl += opt->optlen >> 2;
384 ip_options_build(skb, opt, inet->inet_daddr, rt, 0);
387 ip_select_ident_more(iph, &rt->u.dst, sk,
388 (skb_shinfo(skb)->gso_segs ?: 1) - 1);
390 skb->priority = sk->sk_priority;
391 skb->mark = sk->sk_mark;
393 return ip_local_out(skb);
395 no_route:
396 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTNOROUTES);
397 kfree_skb(skb);
398 return -EHOSTUNREACH;
402 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
404 to->pkt_type = from->pkt_type;
405 to->priority = from->priority;
406 to->protocol = from->protocol;
407 skb_dst_drop(to);
408 skb_dst_set(to, dst_clone(skb_dst(from)));
409 to->dev = from->dev;
410 to->mark = from->mark;
412 /* Copy the flags to each fragment. */
413 IPCB(to)->flags = IPCB(from)->flags;
415 #ifdef CONFIG_NET_SCHED
416 to->tc_index = from->tc_index;
417 #endif
418 nf_copy(to, from);
419 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
420 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
421 to->nf_trace = from->nf_trace;
422 #endif
423 #if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
424 to->ipvs_property = from->ipvs_property;
425 #endif
426 skb_copy_secmark(to, from);
430 * This IP datagram is too large to be sent in one piece. Break it up into
431 * smaller pieces (each of size equal to IP header plus
432 * a block of the data of the original IP data part) that will yet fit in a
433 * single device frame, and queue such a frame for sending.
436 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
438 struct iphdr *iph;
439 int raw = 0;
440 int ptr;
441 struct net_device *dev;
442 struct sk_buff *skb2;
443 unsigned int mtu, hlen, left, len, ll_rs, pad;
444 int offset;
445 __be16 not_last_frag;
446 struct rtable *rt = skb_rtable(skb);
447 int err = 0;
449 dev = rt->u.dst.dev;
452 * Point into the IP datagram header.
455 iph = ip_hdr(skb);
457 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
458 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
459 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
460 htonl(ip_skb_dst_mtu(skb)));
461 kfree_skb(skb);
462 return -EMSGSIZE;
466 * Setup starting values.
469 hlen = iph->ihl * 4;
470 mtu = dst_mtu(&rt->u.dst) - hlen; /* Size of data space */
471 IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
473 /* When frag_list is given, use it. First, check its validity:
474 * some transformers could create wrong frag_list or break existing
475 * one, it is not prohibited. In this case fall back to copying.
477 * LATER: this step can be merged to real generation of fragments,
478 * we can switch to copy when see the first bad fragment.
480 if (skb_has_frags(skb)) {
481 struct sk_buff *frag, *frag2;
482 int first_len = skb_pagelen(skb);
484 if (first_len - hlen > mtu ||
485 ((first_len - hlen) & 7) ||
486 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
487 skb_cloned(skb))
488 goto slow_path;
490 skb_walk_frags(skb, frag) {
491 /* Correct geometry. */
492 if (frag->len > mtu ||
493 ((frag->len & 7) && frag->next) ||
494 skb_headroom(frag) < hlen)
495 goto slow_path_clean;
497 /* Partially cloned skb? */
498 if (skb_shared(frag))
499 goto slow_path_clean;
501 BUG_ON(frag->sk);
502 if (skb->sk) {
503 frag->sk = skb->sk;
504 frag->destructor = sock_wfree;
506 skb->truesize -= frag->truesize;
509 /* Everything is OK. Generate! */
511 err = 0;
512 offset = 0;
513 frag = skb_shinfo(skb)->frag_list;
514 skb_frag_list_init(skb);
515 skb->data_len = first_len - skb_headlen(skb);
516 skb->len = first_len;
517 iph->tot_len = htons(first_len);
518 iph->frag_off = htons(IP_MF);
519 ip_send_check(iph);
521 for (;;) {
522 /* Prepare header of the next frame,
523 * before previous one went down. */
524 if (frag) {
525 frag->ip_summed = CHECKSUM_NONE;
526 skb_reset_transport_header(frag);
527 __skb_push(frag, hlen);
528 skb_reset_network_header(frag);
529 memcpy(skb_network_header(frag), iph, hlen);
530 iph = ip_hdr(frag);
531 iph->tot_len = htons(frag->len);
532 ip_copy_metadata(frag, skb);
533 if (offset == 0)
534 ip_options_fragment(frag);
535 offset += skb->len - hlen;
536 iph->frag_off = htons(offset>>3);
537 if (frag->next != NULL)
538 iph->frag_off |= htons(IP_MF);
539 /* Ready, complete checksum */
540 ip_send_check(iph);
543 err = output(skb);
545 if (!err)
546 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
547 if (err || !frag)
548 break;
550 skb = frag;
551 frag = skb->next;
552 skb->next = NULL;
555 if (err == 0) {
556 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
557 return 0;
560 while (frag) {
561 skb = frag->next;
562 kfree_skb(frag);
563 frag = skb;
565 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
566 return err;
568 slow_path_clean:
569 skb_walk_frags(skb, frag2) {
570 if (frag2 == frag)
571 break;
572 frag2->sk = NULL;
573 frag2->destructor = NULL;
574 skb->truesize += frag2->truesize;
578 slow_path:
579 left = skb->len - hlen; /* Space per frame */
580 ptr = raw + hlen; /* Where to start from */
582 /* for bridged IP traffic encapsulated inside f.e. a vlan header,
583 * we need to make room for the encapsulating header
585 pad = nf_bridge_pad(skb);
586 ll_rs = LL_RESERVED_SPACE_EXTRA(rt->u.dst.dev, pad);
587 mtu -= pad;
590 * Fragment the datagram.
593 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
594 not_last_frag = iph->frag_off & htons(IP_MF);
597 * Keep copying data until we run out.
600 while (left > 0) {
601 len = left;
602 /* IF: it doesn't fit, use 'mtu' - the data space left */
603 if (len > mtu)
604 len = mtu;
605 /* IF: we are not sending upto and including the packet end
606 then align the next start on an eight byte boundary */
607 if (len < left) {
608 len &= ~7;
611 * Allocate buffer.
614 if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
615 NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
616 err = -ENOMEM;
617 goto fail;
621 * Set up data on packet
624 ip_copy_metadata(skb2, skb);
625 skb_reserve(skb2, ll_rs);
626 skb_put(skb2, len + hlen);
627 skb_reset_network_header(skb2);
628 skb2->transport_header = skb2->network_header + hlen;
631 * Charge the memory for the fragment to any owner
632 * it might possess
635 if (skb->sk)
636 skb_set_owner_w(skb2, skb->sk);
639 * Copy the packet header into the new buffer.
642 skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
645 * Copy a block of the IP datagram.
647 if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
648 BUG();
649 left -= len;
652 * Fill in the new header fields.
654 iph = ip_hdr(skb2);
655 iph->frag_off = htons((offset >> 3));
657 /* ANK: dirty, but effective trick. Upgrade options only if
658 * the segment to be fragmented was THE FIRST (otherwise,
659 * options are already fixed) and make it ONCE
660 * on the initial skb, so that all the following fragments
661 * will inherit fixed options.
663 if (offset == 0)
664 ip_options_fragment(skb);
667 * Added AC : If we are fragmenting a fragment that's not the
668 * last fragment then keep MF on each bit
670 if (left > 0 || not_last_frag)
671 iph->frag_off |= htons(IP_MF);
672 ptr += len;
673 offset += len;
676 * Put this fragment into the sending queue.
678 iph->tot_len = htons(len + hlen);
680 ip_send_check(iph);
682 err = output(skb2);
683 if (err)
684 goto fail;
686 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
688 kfree_skb(skb);
689 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
690 return err;
692 fail:
693 kfree_skb(skb);
694 IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
695 return err;
698 EXPORT_SYMBOL(ip_fragment);
701 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
703 struct iovec *iov = from;
705 if (skb->ip_summed == CHECKSUM_PARTIAL) {
706 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
707 return -EFAULT;
708 } else {
709 __wsum csum = 0;
710 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
711 return -EFAULT;
712 skb->csum = csum_block_add(skb->csum, csum, odd);
714 return 0;
717 static inline __wsum
718 csum_page(struct page *page, int offset, int copy)
720 char *kaddr;
721 __wsum csum;
722 kaddr = kmap(page);
723 csum = csum_partial(kaddr + offset, copy, 0);
724 kunmap(page);
725 return csum;
728 static inline int ip_ufo_append_data(struct sock *sk,
729 int getfrag(void *from, char *to, int offset, int len,
730 int odd, struct sk_buff *skb),
731 void *from, int length, int hh_len, int fragheaderlen,
732 int transhdrlen, int mtu, unsigned int flags)
734 struct sk_buff *skb;
735 int err;
737 /* There is support for UDP fragmentation offload by network
738 * device, so create one single skb packet containing complete
739 * udp datagram
741 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
742 skb = sock_alloc_send_skb(sk,
743 hh_len + fragheaderlen + transhdrlen + 20,
744 (flags & MSG_DONTWAIT), &err);
746 if (skb == NULL)
747 return err;
749 /* reserve space for Hardware header */
750 skb_reserve(skb, hh_len);
752 /* create space for UDP/IP header */
753 skb_put(skb, fragheaderlen + transhdrlen);
755 /* initialize network header pointer */
756 skb_reset_network_header(skb);
758 /* initialize protocol header pointer */
759 skb->transport_header = skb->network_header + fragheaderlen;
761 skb->ip_summed = CHECKSUM_PARTIAL;
762 skb->csum = 0;
763 sk->sk_sndmsg_off = 0;
765 /* specify the length of each IP datagram fragment */
766 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
767 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
768 __skb_queue_tail(&sk->sk_write_queue, skb);
771 return skb_append_datato_frags(sk, skb, getfrag, from,
772 (length - transhdrlen));
776 * ip_append_data() and ip_append_page() can make one large IP datagram
777 * from many pieces of data. Each pieces will be holded on the socket
778 * until ip_push_pending_frames() is called. Each piece can be a page
779 * or non-page data.
781 * Not only UDP, other transport protocols - e.g. raw sockets - can use
782 * this interface potentially.
784 * LATER: length must be adjusted by pad at tail, when it is required.
786 int ip_append_data(struct sock *sk,
787 int getfrag(void *from, char *to, int offset, int len,
788 int odd, struct sk_buff *skb),
789 void *from, int length, int transhdrlen,
790 struct ipcm_cookie *ipc, struct rtable **rtp,
791 unsigned int flags)
793 struct inet_sock *inet = inet_sk(sk);
794 struct sk_buff *skb;
796 struct ip_options *opt = NULL;
797 int hh_len;
798 int exthdrlen;
799 int mtu;
800 int copy;
801 int err;
802 int offset = 0;
803 unsigned int maxfraglen, fragheaderlen;
804 int csummode = CHECKSUM_NONE;
805 struct rtable *rt;
807 if (flags&MSG_PROBE)
808 return 0;
810 if (skb_queue_empty(&sk->sk_write_queue)) {
812 * setup for corking.
814 opt = ipc->opt;
815 if (opt) {
816 if (inet->cork.opt == NULL) {
817 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
818 if (unlikely(inet->cork.opt == NULL))
819 return -ENOBUFS;
821 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
822 inet->cork.flags |= IPCORK_OPT;
823 inet->cork.addr = ipc->addr;
825 rt = *rtp;
826 if (unlikely(!rt))
827 return -EFAULT;
829 * We steal reference to this route, caller should not release it
831 *rtp = NULL;
832 inet->cork.fragsize = mtu = inet->pmtudisc == IP_PMTUDISC_PROBE ?
833 rt->u.dst.dev->mtu :
834 dst_mtu(rt->u.dst.path);
835 inet->cork.dst = &rt->u.dst;
836 inet->cork.length = 0;
837 sk->sk_sndmsg_page = NULL;
838 sk->sk_sndmsg_off = 0;
839 if ((exthdrlen = rt->u.dst.header_len) != 0) {
840 length += exthdrlen;
841 transhdrlen += exthdrlen;
843 } else {
844 rt = (struct rtable *)inet->cork.dst;
845 if (inet->cork.flags & IPCORK_OPT)
846 opt = inet->cork.opt;
848 transhdrlen = 0;
849 exthdrlen = 0;
850 mtu = inet->cork.fragsize;
852 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
854 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
855 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
857 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
858 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport,
859 mtu-exthdrlen);
860 return -EMSGSIZE;
864 * transhdrlen > 0 means that this is the first fragment and we wish
865 * it won't be fragmented in the future.
867 if (transhdrlen &&
868 length + fragheaderlen <= mtu &&
869 rt->u.dst.dev->features & NETIF_F_V4_CSUM &&
870 !exthdrlen)
871 csummode = CHECKSUM_PARTIAL;
873 inet->cork.length += length;
874 if (((length> mtu) || !skb_queue_empty(&sk->sk_write_queue)) &&
875 (sk->sk_protocol == IPPROTO_UDP) &&
876 (rt->u.dst.dev->features & NETIF_F_UFO)) {
877 err = ip_ufo_append_data(sk, getfrag, from, length, hh_len,
878 fragheaderlen, transhdrlen, mtu,
879 flags);
880 if (err)
881 goto error;
882 return 0;
885 /* So, what's going on in the loop below?
887 * We use calculated fragment length to generate chained skb,
888 * each of segments is IP fragment ready for sending to network after
889 * adding appropriate IP header.
892 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
893 goto alloc_new_skb;
895 while (length > 0) {
896 /* Check if the remaining data fits into current packet. */
897 copy = mtu - skb->len;
898 if (copy < length)
899 copy = maxfraglen - skb->len;
900 if (copy <= 0) {
901 char *data;
902 unsigned int datalen;
903 unsigned int fraglen;
904 unsigned int fraggap;
905 unsigned int alloclen;
906 struct sk_buff *skb_prev;
907 alloc_new_skb:
908 skb_prev = skb;
909 if (skb_prev)
910 fraggap = skb_prev->len - maxfraglen;
911 else
912 fraggap = 0;
915 * If remaining data exceeds the mtu,
916 * we know we need more fragment(s).
918 datalen = length + fraggap;
919 if (datalen > mtu - fragheaderlen)
920 datalen = maxfraglen - fragheaderlen;
921 fraglen = datalen + fragheaderlen;
923 if ((flags & MSG_MORE) &&
924 !(rt->u.dst.dev->features&NETIF_F_SG))
925 alloclen = mtu;
926 else
927 alloclen = datalen + fragheaderlen;
929 /* The last fragment gets additional space at tail.
930 * Note, with MSG_MORE we overallocate on fragments,
931 * because we have no idea what fragment will be
932 * the last.
934 if (datalen == length + fraggap)
935 alloclen += rt->u.dst.trailer_len;
937 if (transhdrlen) {
938 skb = sock_alloc_send_skb(sk,
939 alloclen + hh_len + 15,
940 (flags & MSG_DONTWAIT), &err);
941 } else {
942 skb = NULL;
943 if (atomic_read(&sk->sk_wmem_alloc) <=
944 2 * sk->sk_sndbuf)
945 skb = sock_wmalloc(sk,
946 alloclen + hh_len + 15, 1,
947 sk->sk_allocation);
948 if (unlikely(skb == NULL))
949 err = -ENOBUFS;
950 else
951 /* only the initial fragment is
952 time stamped */
953 ipc->shtx.flags = 0;
955 if (skb == NULL)
956 goto error;
959 * Fill in the control structures
961 skb->ip_summed = csummode;
962 skb->csum = 0;
963 skb_reserve(skb, hh_len);
964 *skb_tx(skb) = ipc->shtx;
967 * Find where to start putting bytes.
969 data = skb_put(skb, fraglen);
970 skb_set_network_header(skb, exthdrlen);
971 skb->transport_header = (skb->network_header +
972 fragheaderlen);
973 data += fragheaderlen;
975 if (fraggap) {
976 skb->csum = skb_copy_and_csum_bits(
977 skb_prev, maxfraglen,
978 data + transhdrlen, fraggap, 0);
979 skb_prev->csum = csum_sub(skb_prev->csum,
980 skb->csum);
981 data += fraggap;
982 pskb_trim_unique(skb_prev, maxfraglen);
985 copy = datalen - transhdrlen - fraggap;
986 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
987 err = -EFAULT;
988 kfree_skb(skb);
989 goto error;
992 offset += copy;
993 length -= datalen - fraggap;
994 transhdrlen = 0;
995 exthdrlen = 0;
996 csummode = CHECKSUM_NONE;
999 * Put the packet on the pending queue.
1001 __skb_queue_tail(&sk->sk_write_queue, skb);
1002 continue;
1005 if (copy > length)
1006 copy = length;
1008 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
1009 unsigned int off;
1011 off = skb->len;
1012 if (getfrag(from, skb_put(skb, copy),
1013 offset, copy, off, skb) < 0) {
1014 __skb_trim(skb, off);
1015 err = -EFAULT;
1016 goto error;
1018 } else {
1019 int i = skb_shinfo(skb)->nr_frags;
1020 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
1021 struct page *page = sk->sk_sndmsg_page;
1022 int off = sk->sk_sndmsg_off;
1023 unsigned int left;
1025 if (page && (left = PAGE_SIZE - off) > 0) {
1026 if (copy >= left)
1027 copy = left;
1028 if (page != frag->page) {
1029 if (i == MAX_SKB_FRAGS) {
1030 err = -EMSGSIZE;
1031 goto error;
1033 get_page(page);
1034 skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
1035 frag = &skb_shinfo(skb)->frags[i];
1037 } else if (i < MAX_SKB_FRAGS) {
1038 if (copy > PAGE_SIZE)
1039 copy = PAGE_SIZE;
1040 page = alloc_pages(sk->sk_allocation, 0);
1041 if (page == NULL) {
1042 err = -ENOMEM;
1043 goto error;
1045 sk->sk_sndmsg_page = page;
1046 sk->sk_sndmsg_off = 0;
1048 skb_fill_page_desc(skb, i, page, 0, 0);
1049 frag = &skb_shinfo(skb)->frags[i];
1050 } else {
1051 err = -EMSGSIZE;
1052 goto error;
1054 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
1055 err = -EFAULT;
1056 goto error;
1058 sk->sk_sndmsg_off += copy;
1059 frag->size += copy;
1060 skb->len += copy;
1061 skb->data_len += copy;
1062 skb->truesize += copy;
1063 atomic_add(copy, &sk->sk_wmem_alloc);
1065 offset += copy;
1066 length -= copy;
1069 return 0;
1071 error:
1072 inet->cork.length -= length;
1073 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1074 return err;
1077 ssize_t ip_append_page(struct sock *sk, struct page *page,
1078 int offset, size_t size, int flags)
1080 struct inet_sock *inet = inet_sk(sk);
1081 struct sk_buff *skb;
1082 struct rtable *rt;
1083 struct ip_options *opt = NULL;
1084 int hh_len;
1085 int mtu;
1086 int len;
1087 int err;
1088 unsigned int maxfraglen, fragheaderlen, fraggap;
1090 if (inet->hdrincl)
1091 return -EPERM;
1093 if (flags&MSG_PROBE)
1094 return 0;
1096 if (skb_queue_empty(&sk->sk_write_queue))
1097 return -EINVAL;
1099 rt = (struct rtable *)inet->cork.dst;
1100 if (inet->cork.flags & IPCORK_OPT)
1101 opt = inet->cork.opt;
1103 if (!(rt->u.dst.dev->features&NETIF_F_SG))
1104 return -EOPNOTSUPP;
1106 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
1107 mtu = inet->cork.fragsize;
1109 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
1110 maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
1112 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1113 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->inet_dport, mtu);
1114 return -EMSGSIZE;
1117 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1118 return -EINVAL;
1120 inet->cork.length += size;
1121 if ((sk->sk_protocol == IPPROTO_UDP) &&
1122 (rt->u.dst.dev->features & NETIF_F_UFO)) {
1123 skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
1124 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
1128 while (size > 0) {
1129 int i;
1131 if (skb_is_gso(skb))
1132 len = size;
1133 else {
1135 /* Check if the remaining data fits into current packet. */
1136 len = mtu - skb->len;
1137 if (len < size)
1138 len = maxfraglen - skb->len;
1140 if (len <= 0) {
1141 struct sk_buff *skb_prev;
1142 int alloclen;
1144 skb_prev = skb;
1145 fraggap = skb_prev->len - maxfraglen;
1147 alloclen = fragheaderlen + hh_len + fraggap + 15;
1148 skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
1149 if (unlikely(!skb)) {
1150 err = -ENOBUFS;
1151 goto error;
1155 * Fill in the control structures
1157 skb->ip_summed = CHECKSUM_NONE;
1158 skb->csum = 0;
1159 skb_reserve(skb, hh_len);
1162 * Find where to start putting bytes.
1164 skb_put(skb, fragheaderlen + fraggap);
1165 skb_reset_network_header(skb);
1166 skb->transport_header = (skb->network_header +
1167 fragheaderlen);
1168 if (fraggap) {
1169 skb->csum = skb_copy_and_csum_bits(skb_prev,
1170 maxfraglen,
1171 skb_transport_header(skb),
1172 fraggap, 0);
1173 skb_prev->csum = csum_sub(skb_prev->csum,
1174 skb->csum);
1175 pskb_trim_unique(skb_prev, maxfraglen);
1179 * Put the packet on the pending queue.
1181 __skb_queue_tail(&sk->sk_write_queue, skb);
1182 continue;
1185 i = skb_shinfo(skb)->nr_frags;
1186 if (len > size)
1187 len = size;
1188 if (skb_can_coalesce(skb, i, page, offset)) {
1189 skb_shinfo(skb)->frags[i-1].size += len;
1190 } else if (i < MAX_SKB_FRAGS) {
1191 get_page(page);
1192 skb_fill_page_desc(skb, i, page, offset, len);
1193 } else {
1194 err = -EMSGSIZE;
1195 goto error;
1198 if (skb->ip_summed == CHECKSUM_NONE) {
1199 __wsum csum;
1200 csum = csum_page(page, offset, len);
1201 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1204 skb->len += len;
1205 skb->data_len += len;
1206 skb->truesize += len;
1207 atomic_add(len, &sk->sk_wmem_alloc);
1208 offset += len;
1209 size -= len;
1211 return 0;
1213 error:
1214 inet->cork.length -= size;
1215 IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
1216 return err;
1219 static void ip_cork_release(struct inet_sock *inet)
1221 inet->cork.flags &= ~IPCORK_OPT;
1222 kfree(inet->cork.opt);
1223 inet->cork.opt = NULL;
1224 dst_release(inet->cork.dst);
1225 inet->cork.dst = NULL;
1229 * Combined all pending IP fragments on the socket as one IP datagram
1230 * and push them out.
1232 int ip_push_pending_frames(struct sock *sk)
1234 struct sk_buff *skb, *tmp_skb;
1235 struct sk_buff **tail_skb;
1236 struct inet_sock *inet = inet_sk(sk);
1237 struct net *net = sock_net(sk);
1238 struct ip_options *opt = NULL;
1239 struct rtable *rt = (struct rtable *)inet->cork.dst;
1240 struct iphdr *iph;
1241 __be16 df = 0;
1242 __u8 ttl;
1243 int err = 0;
1245 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1246 goto out;
1247 tail_skb = &(skb_shinfo(skb)->frag_list);
1249 /* move skb->data to ip header from ext header */
1250 if (skb->data < skb_network_header(skb))
1251 __skb_pull(skb, skb_network_offset(skb));
1252 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1253 __skb_pull(tmp_skb, skb_network_header_len(skb));
1254 *tail_skb = tmp_skb;
1255 tail_skb = &(tmp_skb->next);
1256 skb->len += tmp_skb->len;
1257 skb->data_len += tmp_skb->len;
1258 skb->truesize += tmp_skb->truesize;
1259 tmp_skb->destructor = NULL;
1260 tmp_skb->sk = NULL;
1263 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1264 * to fragment the frame generated here. No matter, what transforms
1265 * how transforms change size of the packet, it will come out.
1267 if (inet->pmtudisc < IP_PMTUDISC_DO)
1268 skb->local_df = 1;
1270 /* DF bit is set when we want to see DF on outgoing frames.
1271 * If local_df is set too, we still allow to fragment this frame
1272 * locally. */
1273 if (inet->pmtudisc >= IP_PMTUDISC_DO ||
1274 (skb->len <= dst_mtu(&rt->u.dst) &&
1275 ip_dont_fragment(sk, &rt->u.dst)))
1276 df = htons(IP_DF);
1278 if (inet->cork.flags & IPCORK_OPT)
1279 opt = inet->cork.opt;
1281 if (rt->rt_type == RTN_MULTICAST)
1282 ttl = inet->mc_ttl;
1283 else
1284 ttl = ip_select_ttl(inet, &rt->u.dst);
1286 iph = (struct iphdr *)skb->data;
1287 iph->version = 4;
1288 iph->ihl = 5;
1289 if (opt) {
1290 iph->ihl += opt->optlen>>2;
1291 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1293 iph->tos = inet->tos;
1294 iph->frag_off = df;
1295 ip_select_ident(iph, &rt->u.dst, sk);
1296 iph->ttl = ttl;
1297 iph->protocol = sk->sk_protocol;
1298 iph->saddr = rt->rt_src;
1299 iph->daddr = rt->rt_dst;
1301 skb->priority = sk->sk_priority;
1302 skb->mark = sk->sk_mark;
1304 * Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
1305 * on dst refcount
1307 inet->cork.dst = NULL;
1308 skb_dst_set(skb, &rt->u.dst);
1310 if (iph->protocol == IPPROTO_ICMP)
1311 icmp_out_count(net, ((struct icmphdr *)
1312 skb_transport_header(skb))->type);
1314 /* Netfilter gets whole the not fragmented skb. */
1315 err = ip_local_out(skb);
1316 if (err) {
1317 if (err > 0)
1318 err = net_xmit_errno(err);
1319 if (err)
1320 goto error;
1323 out:
1324 ip_cork_release(inet);
1325 return err;
1327 error:
1328 IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
1329 goto out;
1333 * Throw away all pending data on the socket.
1335 void ip_flush_pending_frames(struct sock *sk)
1337 struct sk_buff *skb;
1339 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1340 kfree_skb(skb);
1342 ip_cork_release(inet_sk(sk));
1347 * Fetch data from kernel space and fill in checksum if needed.
1349 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1350 int len, int odd, struct sk_buff *skb)
1352 __wsum csum;
1354 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1355 skb->csum = csum_block_add(skb->csum, csum, odd);
1356 return 0;
1360 * Generic function to send a packet as reply to another packet.
1361 * Used to send TCP resets so far. ICMP should use this function too.
1363 * Should run single threaded per socket because it uses the sock
1364 * structure to pass arguments.
1366 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1367 unsigned int len)
1369 struct inet_sock *inet = inet_sk(sk);
1370 struct {
1371 struct ip_options opt;
1372 char data[40];
1373 } replyopts;
1374 struct ipcm_cookie ipc;
1375 __be32 daddr;
1376 struct rtable *rt = skb_rtable(skb);
1378 if (ip_options_echo(&replyopts.opt, skb))
1379 return;
1381 daddr = ipc.addr = rt->rt_src;
1382 ipc.opt = NULL;
1383 ipc.shtx.flags = 0;
1385 if (replyopts.opt.optlen) {
1386 ipc.opt = &replyopts.opt;
1388 if (ipc.opt->srr)
1389 daddr = replyopts.opt.faddr;
1393 struct flowi fl = { .oif = arg->bound_dev_if,
1394 .nl_u = { .ip4_u =
1395 { .daddr = daddr,
1396 .saddr = rt->rt_spec_dst,
1397 .tos = RT_TOS(ip_hdr(skb)->tos) } },
1398 /* Not quite clean, but right. */
1399 .uli_u = { .ports =
1400 { .sport = tcp_hdr(skb)->dest,
1401 .dport = tcp_hdr(skb)->source } },
1402 .proto = sk->sk_protocol,
1403 .flags = ip_reply_arg_flowi_flags(arg) };
1404 security_skb_classify_flow(skb, &fl);
1405 if (ip_route_output_key(sock_net(sk), &rt, &fl))
1406 return;
1409 /* And let IP do all the hard work.
1411 This chunk is not reenterable, hence spinlock.
1412 Note that it uses the fact, that this function is called
1413 with locally disabled BH and that sk cannot be already spinlocked.
1415 bh_lock_sock(sk);
1416 inet->tos = ip_hdr(skb)->tos;
1417 sk->sk_priority = skb->priority;
1418 sk->sk_protocol = ip_hdr(skb)->protocol;
1419 sk->sk_bound_dev_if = arg->bound_dev_if;
1420 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1421 &ipc, &rt, MSG_DONTWAIT);
1422 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1423 if (arg->csumoffset >= 0)
1424 *((__sum16 *)skb_transport_header(skb) +
1425 arg->csumoffset) = csum_fold(csum_add(skb->csum,
1426 arg->csum));
1427 skb->ip_summed = CHECKSUM_NONE;
1428 ip_push_pending_frames(sk);
1431 bh_unlock_sock(sk);
1433 ip_rt_put(rt);
1436 void __init ip_init(void)
1438 ip_rt_init();
1439 inet_initpeers();
1441 #if defined(CONFIG_IP_MULTICAST) && defined(CONFIG_PROC_FS)
1442 igmp_mc_proc_init();
1443 #endif
1446 EXPORT_SYMBOL(ip_generic_getfrag);
1447 EXPORT_SYMBOL(ip_queue_xmit);
1448 EXPORT_SYMBOL(ip_send_check);