Get rid of arch/mips64/kernel. 9116 lines of code gone.
[linux-2.6/linux-mips.git] / net / ipv4 / ip_output.c
blobd2681083585953fcd5784c509a06e404d2771712
1 /*
2 * INET An implementation of the TCP/IP protocol suite for the LINUX
3 * operating system. INET is implemented using the BSD Socket
4 * interface as the means of communication with the user level.
6 * The Internet Protocol (IP) output module.
8 * Version: $Id: ip_output.c,v 1.100 2002/02/01 22:01:03 davem Exp $
10 * Authors: Ross Biro, <bir7@leland.Stanford.Edu>
11 * Fred N. van Kempen, <waltje@uWalt.NL.Mugnet.ORG>
12 * Donald Becker, <becker@super.org>
13 * Alan Cox, <Alan.Cox@linux.org>
14 * Richard Underwood
15 * Stefan Becker, <stefanb@yello.ping.de>
16 * Jorge Cwik, <jorge@laser.satlink.net>
17 * Arnt Gulbrandsen, <agulbra@nvg.unit.no>
18 * Hirokazu Takahashi, <taka@valinux.co.jp>
20 * See ip_input.c for original log
22 * Fixes:
23 * Alan Cox : Missing nonblock feature in ip_build_xmit.
24 * Mike Kilburn : htons() missing in ip_build_xmit.
25 * Bradford Johnson: Fix faulty handling of some frames when
26 * no route is found.
27 * Alexander Demenshin: Missing sk/skb free in ip_queue_xmit
28 * (in case if packet not accepted by
29 * output firewall rules)
30 * Mike McLagan : Routing by source
31 * Alexey Kuznetsov: use new route cache
32 * Andi Kleen: Fix broken PMTU recovery and remove
33 * some redundant tests.
34 * Vitaly E. Lavrov : Transparent proxy revived after year coma.
35 * Andi Kleen : Replace ip_reply with ip_send_reply.
36 * Andi Kleen : Split fast and slow ip_build_xmit path
37 * for decreased register pressure on x86
38 * and more readibility.
39 * Marc Boucher : When call_out_firewall returns FW_QUEUE,
40 * silently drop skb instead of failing with -EPERM.
41 * Detlev Wengorz : Copy protocol for fragments.
42 * Hirokazu Takahashi: HW checksumming for outgoing UDP
43 * datagrams.
44 * Hirokazu Takahashi: sendfile() on UDP works now.
47 #include <asm/uaccess.h>
48 #include <asm/system.h>
49 #include <linux/types.h>
50 #include <linux/kernel.h>
51 #include <linux/sched.h>
52 #include <linux/mm.h>
53 #include <linux/string.h>
54 #include <linux/errno.h>
55 #include <linux/config.h>
57 #include <linux/socket.h>
58 #include <linux/sockios.h>
59 #include <linux/in.h>
60 #include <linux/inet.h>
61 #include <linux/netdevice.h>
62 #include <linux/etherdevice.h>
63 #include <linux/proc_fs.h>
64 #include <linux/stat.h>
65 #include <linux/init.h>
67 #include <net/snmp.h>
68 #include <net/ip.h>
69 #include <net/protocol.h>
70 #include <net/route.h>
71 #include <net/tcp.h>
72 #include <net/udp.h>
73 #include <linux/skbuff.h>
74 #include <net/sock.h>
75 #include <net/arp.h>
76 #include <net/icmp.h>
77 #include <net/raw.h>
78 #include <net/checksum.h>
79 #include <net/inetpeer.h>
80 #include <linux/igmp.h>
81 #include <linux/netfilter_ipv4.h>
82 #include <linux/mroute.h>
83 #include <linux/netlink.h>
86 * Shall we try to damage output packets if routing dev changes?
89 int sysctl_ip_dynaddr;
90 int sysctl_ip_default_ttl = IPDEFTTL;
92 /* Generate a checksum for an outgoing IP datagram. */
93 __inline__ void ip_send_check(struct iphdr *iph)
95 iph->check = 0;
96 iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
99 /* dev_loopback_xmit for use with netfilter. */
100 static int ip_dev_loopback_xmit(struct sk_buff *newskb)
102 newskb->mac.raw = newskb->data;
103 __skb_pull(newskb, newskb->nh.raw - newskb->data);
104 newskb->pkt_type = PACKET_LOOPBACK;
105 newskb->ip_summed = CHECKSUM_UNNECESSARY;
106 BUG_TRAP(newskb->dst);
108 #ifdef CONFIG_NETFILTER_DEBUG
109 nf_debug_ip_loopback_xmit(newskb);
110 #endif
111 netif_rx(newskb);
112 return 0;
115 static inline int ip_select_ttl(struct inet_opt *inet, struct dst_entry *dst)
117 int ttl = inet->uc_ttl;
119 if (ttl < 0)
120 ttl = dst_metric(dst, RTAX_HOPLIMIT);
121 return ttl;
125 * Add an ip header to a skbuff and send it out.
128 int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
129 u32 saddr, u32 daddr, struct ip_options *opt)
131 struct inet_opt *inet = inet_sk(sk);
132 struct rtable *rt = (struct rtable *)skb->dst;
133 struct iphdr *iph;
135 /* Build the IP header. */
136 if (opt)
137 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr) + opt->optlen);
138 else
139 iph=(struct iphdr *)skb_push(skb,sizeof(struct iphdr));
141 iph->version = 4;
142 iph->ihl = 5;
143 iph->tos = inet->tos;
144 if (ip_dont_fragment(sk, &rt->u.dst))
145 iph->frag_off = htons(IP_DF);
146 else
147 iph->frag_off = 0;
148 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
149 iph->daddr = rt->rt_dst;
150 iph->saddr = rt->rt_src;
151 iph->protocol = sk->sk_protocol;
152 iph->tot_len = htons(skb->len);
153 ip_select_ident(iph, &rt->u.dst, sk);
154 skb->nh.iph = iph;
156 if (opt && opt->optlen) {
157 iph->ihl += opt->optlen>>2;
158 ip_options_build(skb, opt, daddr, rt, 0);
160 ip_send_check(iph);
162 skb->priority = sk->sk_priority;
164 /* Send it out. */
165 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
166 dst_output);
169 static inline int ip_finish_output2(struct sk_buff *skb)
171 struct dst_entry *dst = skb->dst;
172 struct hh_cache *hh = dst->hh;
173 struct net_device *dev = dst->dev;
174 int hh_len = LL_RESERVED_SPACE(dev);
176 /* Be paranoid, rather than too clever. */
177 if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
178 struct sk_buff *skb2;
180 skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
181 if (skb2 == NULL) {
182 kfree_skb(skb);
183 return -ENOMEM;
185 if (skb->sk)
186 skb_set_owner_w(skb2, skb->sk);
187 kfree_skb(skb);
188 skb = skb2;
191 #ifdef CONFIG_NETFILTER_DEBUG
192 nf_debug_ip_finish_output2(skb);
193 #endif /*CONFIG_NETFILTER_DEBUG*/
195 if (hh) {
196 int hh_alen;
198 read_lock_bh(&hh->hh_lock);
199 hh_alen = HH_DATA_ALIGN(hh->hh_len);
200 memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
201 read_unlock_bh(&hh->hh_lock);
202 skb_push(skb, hh->hh_len);
203 return hh->hh_output(skb);
204 } else if (dst->neighbour)
205 return dst->neighbour->output(skb);
207 if (net_ratelimit())
208 printk(KERN_DEBUG "ip_finish_output2: No header cache and no neighbour!\n");
209 kfree_skb(skb);
210 return -EINVAL;
213 int ip_finish_output(struct sk_buff *skb)
215 struct net_device *dev = skb->dst->dev;
217 skb->dev = dev;
218 skb->protocol = htons(ETH_P_IP);
220 return NF_HOOK(PF_INET, NF_IP_POST_ROUTING, skb, NULL, dev,
221 ip_finish_output2);
224 int ip_mc_output(struct sk_buff *skb)
226 struct sock *sk = skb->sk;
227 struct rtable *rt = (struct rtable*)skb->dst;
228 struct net_device *dev = rt->u.dst.dev;
231 * If the indicated interface is up and running, send the packet.
233 IP_INC_STATS(IpOutRequests);
235 skb->dev = dev;
236 skb->protocol = htons(ETH_P_IP);
239 * Multicasts are looped back for other local users
242 if (rt->rt_flags&RTCF_MULTICAST) {
243 if ((!sk || inet_sk(sk)->mc_loop)
244 #ifdef CONFIG_IP_MROUTE
245 /* Small optimization: do not loopback not local frames,
246 which returned after forwarding; they will be dropped
247 by ip_mr_input in any case.
248 Note, that local frames are looped back to be delivered
249 to local recipients.
251 This check is duplicated in ip_mr_input at the moment.
253 && ((rt->rt_flags&RTCF_LOCAL) || !(IPCB(skb)->flags&IPSKB_FORWARDED))
254 #endif
256 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
257 if (newskb)
258 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
259 newskb->dev,
260 ip_dev_loopback_xmit);
263 /* Multicasts with ttl 0 must not go beyond the host */
265 if (skb->nh.iph->ttl == 0) {
266 kfree_skb(skb);
267 return 0;
271 if (rt->rt_flags&RTCF_BROADCAST) {
272 struct sk_buff *newskb = skb_clone(skb, GFP_ATOMIC);
273 if (newskb)
274 NF_HOOK(PF_INET, NF_IP_POST_ROUTING, newskb, NULL,
275 newskb->dev, ip_dev_loopback_xmit);
278 if (skb->len > dst_pmtu(&rt->u.dst) || skb_shinfo(skb)->frag_list)
279 return ip_fragment(skb, ip_finish_output);
280 else
281 return ip_finish_output(skb);
284 int ip_output(struct sk_buff *skb)
286 IP_INC_STATS(IpOutRequests);
288 if ((skb->len > dst_pmtu(skb->dst) || skb_shinfo(skb)->frag_list) &&
289 !skb_shinfo(skb)->tso_size)
290 return ip_fragment(skb, ip_finish_output);
291 else
292 return ip_finish_output(skb);
295 int ip_queue_xmit(struct sk_buff *skb, int ipfragok)
297 struct sock *sk = skb->sk;
298 struct inet_opt *inet = inet_sk(sk);
299 struct ip_options *opt = inet->opt;
300 struct rtable *rt;
301 struct iphdr *iph;
302 u32 mtu;
304 /* Skip all of this if the packet is already routed,
305 * f.e. by something like SCTP.
307 rt = (struct rtable *) skb->dst;
308 if (rt != NULL)
309 goto packet_routed;
311 /* Make sure we can route this packet. */
312 rt = (struct rtable *)__sk_dst_check(sk, 0);
313 if (rt == NULL) {
314 u32 daddr;
316 /* Use correct destination address if we have options. */
317 daddr = inet->daddr;
318 if(opt && opt->srr)
319 daddr = opt->faddr;
322 struct flowi fl = { .oif = sk->sk_bound_dev_if,
323 .nl_u = { .ip4_u =
324 { .daddr = daddr,
325 .saddr = inet->saddr,
326 .tos = RT_CONN_FLAGS(sk) } },
327 .proto = sk->sk_protocol,
328 .uli_u = { .ports =
329 { .sport = inet->sport,
330 .dport = inet->dport } } };
332 /* If this fails, retransmit mechanism of transport layer will
333 * keep trying until route appears or the connection times
334 * itself out.
336 if (ip_route_output_flow(&rt, &fl, sk, 0))
337 goto no_route;
339 __sk_dst_set(sk, &rt->u.dst);
340 tcp_v4_setup_caps(sk, &rt->u.dst);
342 skb->dst = dst_clone(&rt->u.dst);
344 packet_routed:
345 if (opt && opt->is_strictroute && rt->rt_dst != rt->rt_gateway)
346 goto no_route;
348 /* OK, we know where to send it, allocate and build IP header. */
349 iph = (struct iphdr *) skb_push(skb, sizeof(struct iphdr) + (opt ? opt->optlen : 0));
350 *((__u16 *)iph) = htons((4 << 12) | (5 << 8) | (inet->tos & 0xff));
351 iph->tot_len = htons(skb->len);
352 if (ip_dont_fragment(sk, &rt->u.dst) && !ipfragok)
353 iph->frag_off = htons(IP_DF);
354 else
355 iph->frag_off = 0;
356 iph->ttl = ip_select_ttl(inet, &rt->u.dst);
357 iph->protocol = sk->sk_protocol;
358 iph->saddr = rt->rt_src;
359 iph->daddr = rt->rt_dst;
360 skb->nh.iph = iph;
361 /* Transport layer set skb->h.foo itself. */
363 if(opt && opt->optlen) {
364 iph->ihl += opt->optlen >> 2;
365 ip_options_build(skb, opt, inet->daddr, rt, 0);
368 mtu = dst_pmtu(&rt->u.dst);
369 if (skb->len > mtu && (sk->sk_route_caps & NETIF_F_TSO)) {
370 unsigned int hlen;
372 /* Hack zone: all this must be done by TCP. */
373 hlen = ((skb->h.raw - skb->data) + (skb->h.th->doff << 2));
374 skb_shinfo(skb)->tso_size = mtu - hlen;
375 skb_shinfo(skb)->tso_segs =
376 (skb->len - hlen + skb_shinfo(skb)->tso_size - 1)/
377 skb_shinfo(skb)->tso_size - 1;
380 ip_select_ident_more(iph, &rt->u.dst, sk, skb_shinfo(skb)->tso_segs);
382 /* Add an IP checksum. */
383 ip_send_check(iph);
385 skb->priority = sk->sk_priority;
387 return NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
388 dst_output);
390 no_route:
391 IP_INC_STATS(IpOutNoRoutes);
392 kfree_skb(skb);
393 return -EHOSTUNREACH;
397 static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
399 to->pkt_type = from->pkt_type;
400 to->priority = from->priority;
401 to->protocol = from->protocol;
402 to->security = from->security;
403 to->dst = dst_clone(from->dst);
404 to->dev = from->dev;
406 /* Copy the flags to each fragment. */
407 IPCB(to)->flags = IPCB(from)->flags;
409 #ifdef CONFIG_NET_SCHED
410 to->tc_index = from->tc_index;
411 #endif
412 #ifdef CONFIG_NETFILTER
413 to->nfmark = from->nfmark;
414 /* Connection association is same as pre-frag packet */
415 to->nfct = from->nfct;
416 nf_conntrack_get(to->nfct);
417 #if defined(CONFIG_BRIDGE) || defined(CONFIG_BRIDGE_MODULE)
418 to->nf_bridge = from->nf_bridge;
419 nf_bridge_get(to->nf_bridge);
420 #endif
421 #ifdef CONFIG_NETFILTER_DEBUG
422 to->nf_debug = from->nf_debug;
423 #endif
424 #endif
428 * This IP datagram is too large to be sent in one piece. Break it up into
429 * smaller pieces (each of size equal to IP header plus
430 * a block of the data of the original IP data part) that will yet fit in a
431 * single device frame, and queue such a frame for sending.
434 int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff*))
436 struct iphdr *iph;
437 int raw = 0;
438 int ptr;
439 struct net_device *dev;
440 struct sk_buff *skb2;
441 unsigned int mtu, hlen, left, len;
442 int offset;
443 int not_last_frag;
444 struct rtable *rt = (struct rtable*)skb->dst;
445 int err = 0;
447 dev = rt->u.dst.dev;
450 * Point into the IP datagram header.
453 iph = skb->nh.iph;
455 if (unlikely((iph->frag_off & htons(IP_DF)) && !skb->local_df)) {
456 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
457 htonl(dst_pmtu(&rt->u.dst)));
458 kfree_skb(skb);
459 return -EMSGSIZE;
463 * Setup starting values.
466 hlen = iph->ihl * 4;
467 mtu = dst_pmtu(&rt->u.dst) - hlen; /* Size of data space */
469 /* When frag_list is given, use it. First, check its validity:
470 * some transformers could create wrong frag_list or break existing
471 * one, it is not prohibited. In this case fall back to copying.
473 * LATER: this step can be merged to real generation of fragments,
474 * we can switch to copy when see the first bad fragment.
476 if (skb_shinfo(skb)->frag_list) {
477 struct sk_buff *frag;
478 int first_len = skb_pagelen(skb);
480 if (first_len - hlen > mtu ||
481 ((first_len - hlen) & 7) ||
482 (iph->frag_off & htons(IP_MF|IP_OFFSET)) ||
483 skb_cloned(skb))
484 goto slow_path;
486 for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
487 /* Correct geometry. */
488 if (frag->len > mtu ||
489 ((frag->len & 7) && frag->next) ||
490 skb_headroom(frag) < hlen)
491 goto slow_path;
493 /* Correct socket ownership. */
494 if (frag->sk == NULL)
495 goto slow_path;
497 /* Partially cloned skb? */
498 if (skb_shared(frag))
499 goto slow_path;
502 /* Everything is OK. Generate! */
504 err = 0;
505 offset = 0;
506 frag = skb_shinfo(skb)->frag_list;
507 skb_shinfo(skb)->frag_list = 0;
508 skb->data_len = first_len - skb_headlen(skb);
509 skb->len = first_len;
510 iph->tot_len = htons(first_len);
511 iph->frag_off |= htons(IP_MF);
512 ip_send_check(iph);
514 for (;;) {
515 /* Prepare header of the next frame,
516 * before previous one went down. */
517 if (frag) {
518 frag->h.raw = frag->data;
519 frag->nh.raw = __skb_push(frag, hlen);
520 memcpy(frag->nh.raw, iph, hlen);
521 iph = frag->nh.iph;
522 iph->tot_len = htons(frag->len);
523 ip_copy_metadata(frag, skb);
524 if (offset == 0)
525 ip_options_fragment(frag);
526 offset += skb->len - hlen;
527 iph->frag_off = htons(offset>>3);
528 if (frag->next != NULL)
529 iph->frag_off |= htons(IP_MF);
530 /* Ready, complete checksum */
531 ip_send_check(iph);
534 err = output(skb);
536 if (err || !frag)
537 break;
539 skb = frag;
540 frag = skb->next;
541 skb->next = NULL;
544 if (err == 0) {
545 IP_INC_STATS(IpFragOKs);
546 return 0;
549 while (frag) {
550 skb = frag->next;
551 kfree_skb(frag);
552 frag = skb;
554 IP_INC_STATS(IpFragFails);
555 return err;
558 slow_path:
559 left = skb->len - hlen; /* Space per frame */
560 ptr = raw + hlen; /* Where to start from */
563 * Fragment the datagram.
566 offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
567 not_last_frag = iph->frag_off & htons(IP_MF);
570 * Keep copying data until we run out.
573 while(left > 0) {
574 len = left;
575 /* IF: it doesn't fit, use 'mtu' - the data space left */
576 if (len > mtu)
577 len = mtu;
578 /* IF: we are not sending upto and including the packet end
579 then align the next start on an eight byte boundary */
580 if (len < left) {
581 len &= ~7;
584 * Allocate buffer.
587 if ((skb2 = alloc_skb(len+hlen+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
588 NETDEBUG(printk(KERN_INFO "IP: frag: no memory for new fragment!\n"));
589 err = -ENOMEM;
590 goto fail;
594 * Set up data on packet
597 ip_copy_metadata(skb2, skb);
598 skb_reserve(skb2, LL_RESERVED_SPACE(rt->u.dst.dev));
599 skb_put(skb2, len + hlen);
600 skb2->nh.raw = skb2->data;
601 skb2->h.raw = skb2->data + hlen;
604 * Charge the memory for the fragment to any owner
605 * it might possess
608 if (skb->sk)
609 skb_set_owner_w(skb2, skb->sk);
612 * Copy the packet header into the new buffer.
615 memcpy(skb2->nh.raw, skb->data, hlen);
618 * Copy a block of the IP datagram.
620 if (skb_copy_bits(skb, ptr, skb2->h.raw, len))
621 BUG();
622 left -= len;
625 * Fill in the new header fields.
627 iph = skb2->nh.iph;
628 iph->frag_off = htons((offset >> 3));
630 /* ANK: dirty, but effective trick. Upgrade options only if
631 * the segment to be fragmented was THE FIRST (otherwise,
632 * options are already fixed) and make it ONCE
633 * on the initial skb, so that all the following fragments
634 * will inherit fixed options.
636 if (offset == 0)
637 ip_options_fragment(skb);
640 * Added AC : If we are fragmenting a fragment that's not the
641 * last fragment then keep MF on each bit
643 if (left > 0 || not_last_frag)
644 iph->frag_off |= htons(IP_MF);
645 ptr += len;
646 offset += len;
649 * Put this fragment into the sending queue.
652 IP_INC_STATS(IpFragCreates);
654 iph->tot_len = htons(len + hlen);
656 ip_send_check(iph);
658 err = output(skb2);
659 if (err)
660 goto fail;
662 kfree_skb(skb);
663 IP_INC_STATS(IpFragOKs);
664 return err;
666 fail:
667 kfree_skb(skb);
668 IP_INC_STATS(IpFragFails);
669 return err;
673 ip_generic_getfrag(void *from, char *to, int offset, int len, int odd, struct sk_buff *skb)
675 struct iovec *iov = from;
677 if (skb->ip_summed == CHECKSUM_HW) {
678 if (memcpy_fromiovecend(to, iov, offset, len) < 0)
679 return -EFAULT;
680 } else {
681 unsigned int csum = 0;
682 if (csum_partial_copy_fromiovecend(to, iov, offset, len, &csum) < 0)
683 return -EFAULT;
684 skb->csum = csum_block_add(skb->csum, csum, odd);
686 return 0;
689 static inline int
690 skb_can_coalesce(struct sk_buff *skb, int i, struct page *page, int off)
692 if (i) {
693 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
694 return page == frag->page &&
695 off == frag->page_offset+frag->size;
697 return 0;
700 static inline unsigned int
701 csum_page(struct page *page, int offset, int copy)
703 char *kaddr;
704 unsigned int csum;
705 kaddr = kmap(page);
706 csum = csum_partial(kaddr + offset, copy, 0);
707 kunmap(page);
708 return csum;
712 * ip_append_data() and ip_append_page() can make one large IP datagram
713 * from many pieces of data. Each pieces will be holded on the socket
714 * until ip_push_pending_frames() is called. Eache pieces can be a page
715 * or non-page data.
717 * Not only UDP, other transport protocols - e.g. raw sockets - can use
718 * this interface potentially.
720 * LATER: length must be adjusted by pad at tail, when it is required.
722 int ip_append_data(struct sock *sk,
723 int getfrag(void *from, char *to, int offset, int len,
724 int odd, struct sk_buff *skb),
725 void *from, int length, int transhdrlen,
726 struct ipcm_cookie *ipc, struct rtable *rt,
727 unsigned int flags)
729 struct inet_opt *inet = inet_sk(sk);
730 struct sk_buff *skb;
732 struct ip_options *opt = NULL;
733 int hh_len;
734 int exthdrlen;
735 int mtu;
736 int copy;
737 int err;
738 int offset = 0;
739 unsigned int maxfraglen, fragheaderlen;
740 int csummode = CHECKSUM_NONE;
742 if (flags&MSG_PROBE)
743 return 0;
745 if (skb_queue_empty(&sk->sk_write_queue)) {
747 * setup for corking.
749 opt = ipc->opt;
750 if (opt) {
751 if (inet->cork.opt == NULL)
752 inet->cork.opt = kmalloc(sizeof(struct ip_options) + 40, sk->sk_allocation);
753 memcpy(inet->cork.opt, opt, sizeof(struct ip_options)+opt->optlen);
754 inet->cork.flags |= IPCORK_OPT;
755 inet->cork.addr = ipc->addr;
757 dst_hold(&rt->u.dst);
758 inet->cork.fragsize = mtu = dst_pmtu(&rt->u.dst);
759 inet->cork.rt = rt;
760 inet->cork.length = 0;
761 inet->sndmsg_page = NULL;
762 inet->sndmsg_off = 0;
763 if ((exthdrlen = rt->u.dst.header_len) != 0) {
764 length += exthdrlen;
765 transhdrlen += exthdrlen;
767 } else {
768 rt = inet->cork.rt;
769 if (inet->cork.flags & IPCORK_OPT)
770 opt = inet->cork.opt;
772 transhdrlen = 0;
773 exthdrlen = 0;
774 mtu = inet->cork.fragsize;
776 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
778 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
779 maxfraglen = ((mtu-fragheaderlen) & ~7) + fragheaderlen;
781 if (inet->cork.length + length > 0xFFFF - fragheaderlen) {
782 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu-exthdrlen);
783 return -EMSGSIZE;
787 * transhdrlen > 0 means that this is the first fragment and we wish
788 * it won't be fragmented in the future.
790 if (transhdrlen &&
791 length + fragheaderlen <= maxfraglen &&
792 rt->u.dst.dev->features&(NETIF_F_IP_CSUM|NETIF_F_NO_CSUM|NETIF_F_HW_CSUM) &&
793 !exthdrlen)
794 csummode = CHECKSUM_HW;
796 inet->cork.length += length;
798 /* So, what's going on in the loop below?
800 * We use calculated fragment length to generate chained skb,
801 * each of segments is IP fragment ready for sending to network after
802 * adding appropriate IP header.
804 * Mistake is:
806 * If mtu-fragheaderlen is not 0 modulo 8, we generate additional
807 * small fragment of length (mtu-fragheaderlen)%8, even though
808 * it is not necessary. Not a big bug, but needs a fix.
811 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
812 goto alloc_new_skb;
814 while (length > 0) {
815 if ((copy = maxfraglen - skb->len) <= 0) {
816 char *data;
817 unsigned int datalen;
818 unsigned int fraglen;
819 unsigned int alloclen;
820 BUG_TRAP(copy == 0);
822 alloc_new_skb:
823 datalen = maxfraglen - fragheaderlen;
824 if (datalen > length)
825 datalen = length;
827 fraglen = datalen + fragheaderlen;
828 if ((flags & MSG_MORE) &&
829 !(rt->u.dst.dev->features&NETIF_F_SG))
830 alloclen = maxfraglen;
831 else
832 alloclen = datalen + fragheaderlen;
834 /* The last fragment gets additional space at tail.
835 * Note, with MSG_MORE we overallocate on fragments,
836 * because we have no idea what fragment will be
837 * the last.
839 if (datalen == length)
840 alloclen += rt->u.dst.trailer_len;
842 if (transhdrlen) {
843 skb = sock_alloc_send_skb(sk,
844 alloclen + hh_len + 15,
845 (flags & MSG_DONTWAIT), &err);
846 } else {
847 skb = NULL;
848 if (atomic_read(&sk->sk_wmem_alloc) <=
849 2 * sk->sk_sndbuf)
850 skb = sock_wmalloc(sk,
851 alloclen + hh_len + 15, 1,
852 sk->sk_allocation);
853 if (unlikely(skb == NULL))
854 err = -ENOBUFS;
856 if (skb == NULL)
857 goto error;
860 * Fill in the control structures
862 skb->ip_summed = csummode;
863 skb->csum = 0;
864 skb_reserve(skb, hh_len);
867 * Find where to start putting bytes.
869 data = skb_put(skb, fraglen);
870 skb->nh.raw = data + exthdrlen;
871 data += fragheaderlen;
872 skb->h.raw = data + exthdrlen;
874 copy = datalen - transhdrlen;
875 if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, 0, skb) < 0) {
876 err = -EFAULT;
877 kfree_skb(skb);
878 goto error;
881 offset += copy;
882 length -= datalen;
883 transhdrlen = 0;
884 exthdrlen = 0;
885 csummode = CHECKSUM_NONE;
888 * Put the packet on the pending queue.
890 __skb_queue_tail(&sk->sk_write_queue, skb);
891 continue;
894 if (copy > length)
895 copy = length;
897 if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
898 unsigned int off;
900 off = skb->len;
901 if (getfrag(from, skb_put(skb, copy),
902 offset, copy, off, skb) < 0) {
903 __skb_trim(skb, off);
904 err = -EFAULT;
905 goto error;
907 } else {
908 int i = skb_shinfo(skb)->nr_frags;
909 skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
910 struct page *page = inet->sndmsg_page;
911 int off = inet->sndmsg_off;
912 unsigned int left;
914 if (page && (left = PAGE_SIZE - off) > 0) {
915 if (copy >= left)
916 copy = left;
917 if (page != frag->page) {
918 if (i == MAX_SKB_FRAGS) {
919 err = -EMSGSIZE;
920 goto error;
922 get_page(page);
923 skb_fill_page_desc(skb, i, page, inet->sndmsg_off, 0);
924 frag = &skb_shinfo(skb)->frags[i];
926 } else if (i < MAX_SKB_FRAGS) {
927 if (copy > PAGE_SIZE)
928 copy = PAGE_SIZE;
929 page = alloc_pages(sk->sk_allocation, 0);
930 if (page == NULL) {
931 err = -ENOMEM;
932 goto error;
934 inet->sndmsg_page = page;
935 inet->sndmsg_off = 0;
937 skb_fill_page_desc(skb, i, page, 0, 0);
938 frag = &skb_shinfo(skb)->frags[i];
939 skb->truesize += PAGE_SIZE;
940 atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
941 } else {
942 err = -EMSGSIZE;
943 goto error;
945 if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
946 err = -EFAULT;
947 goto error;
949 inet->sndmsg_off += copy;
950 frag->size += copy;
951 skb->len += copy;
952 skb->data_len += copy;
954 offset += copy;
955 length -= copy;
958 return 0;
960 error:
961 inet->cork.length -= length;
962 IP_INC_STATS(IpOutDiscards);
963 return err;
966 ssize_t ip_append_page(struct sock *sk, struct page *page,
967 int offset, size_t size, int flags)
969 struct inet_opt *inet = inet_sk(sk);
970 struct sk_buff *skb;
971 struct rtable *rt;
972 struct ip_options *opt = NULL;
973 int hh_len;
974 int mtu;
975 int len;
976 int err;
977 unsigned int maxfraglen, fragheaderlen;
979 if (inet->hdrincl)
980 return -EPERM;
982 if (flags&MSG_PROBE)
983 return 0;
985 if (skb_queue_empty(&sk->sk_write_queue))
986 return -EINVAL;
988 rt = inet->cork.rt;
989 if (inet->cork.flags & IPCORK_OPT)
990 opt = inet->cork.opt;
992 if (!(rt->u.dst.dev->features&NETIF_F_SG))
993 return -EOPNOTSUPP;
995 hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);
996 mtu = inet->cork.fragsize;
998 fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
999 maxfraglen = ((mtu-fragheaderlen) & ~7) + fragheaderlen;
1001 if (inet->cork.length + size > 0xFFFF - fragheaderlen) {
1002 ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport, mtu);
1003 return -EMSGSIZE;
1006 if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
1007 return -EINVAL;
1009 inet->cork.length += size;
1011 while (size > 0) {
1012 int i;
1013 if ((len = maxfraglen - skb->len) <= 0) {
1014 char *data;
1015 struct iphdr *iph;
1016 BUG_TRAP(len == 0);
1018 skb = sock_wmalloc(sk, fragheaderlen + hh_len + 15, 1,
1019 sk->sk_allocation);
1020 if (unlikely(!skb)) {
1021 err = -ENOBUFS;
1022 goto error;
1026 * Fill in the control structures
1028 skb->ip_summed = CHECKSUM_NONE;
1029 skb->csum = 0;
1030 skb_reserve(skb, hh_len);
1033 * Find where to start putting bytes.
1035 data = skb_put(skb, fragheaderlen);
1036 skb->nh.iph = iph = (struct iphdr *)data;
1037 data += fragheaderlen;
1038 skb->h.raw = data;
1041 * Put the packet on the pending queue.
1043 __skb_queue_tail(&sk->sk_write_queue, skb);
1044 continue;
1047 i = skb_shinfo(skb)->nr_frags;
1048 if (len > size)
1049 len = size;
1050 if (skb_can_coalesce(skb, i, page, offset)) {
1051 skb_shinfo(skb)->frags[i-1].size += len;
1052 } else if (i < MAX_SKB_FRAGS) {
1053 get_page(page);
1054 skb_fill_page_desc(skb, i, page, offset, len);
1055 } else {
1056 err = -EMSGSIZE;
1057 goto error;
1060 if (skb->ip_summed == CHECKSUM_NONE) {
1061 unsigned int csum;
1062 csum = csum_page(page, offset, len);
1063 skb->csum = csum_block_add(skb->csum, csum, skb->len);
1066 skb->len += len;
1067 skb->data_len += len;
1068 offset += len;
1069 size -= len;
1071 return 0;
1073 error:
1074 inet->cork.length -= size;
1075 IP_INC_STATS(IpOutDiscards);
1076 return err;
1080 * Combined all pending IP fragments on the socket as one IP datagram
1081 * and push them out.
1083 int ip_push_pending_frames(struct sock *sk)
1085 struct sk_buff *skb, *tmp_skb;
1086 struct sk_buff **tail_skb;
1087 struct inet_opt *inet = inet_sk(sk);
1088 struct ip_options *opt = NULL;
1089 struct rtable *rt = inet->cork.rt;
1090 struct iphdr *iph;
1091 int df = 0;
1092 __u8 ttl;
1093 int err = 0;
1095 if ((skb = __skb_dequeue(&sk->sk_write_queue)) == NULL)
1096 goto out;
1097 tail_skb = &(skb_shinfo(skb)->frag_list);
1099 /* move skb->data to ip header from ext header */
1100 if (skb->data < skb->nh.raw)
1101 __skb_pull(skb, skb->nh.raw - skb->data);
1102 while ((tmp_skb = __skb_dequeue(&sk->sk_write_queue)) != NULL) {
1103 __skb_pull(tmp_skb, skb->h.raw - skb->nh.raw);
1104 *tail_skb = tmp_skb;
1105 tail_skb = &(tmp_skb->next);
1106 skb->len += tmp_skb->len;
1107 skb->data_len += tmp_skb->len;
1108 #if 0 /* Logically correct, but useless work, ip_fragment() will have to undo */
1109 skb->truesize += tmp_skb->truesize;
1110 __sock_put(tmp_skb->sk);
1111 tmp_skb->destructor = NULL;
1112 tmp_skb->sk = NULL;
1113 #endif
1116 /* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
1117 * to fragment the frame generated here. No matter, what transforms
1118 * how transforms change size of the packet, it will come out.
1120 if (inet->pmtudisc != IP_PMTUDISC_DO)
1121 skb->local_df = 1;
1123 /* DF bit is set when we want to see DF on outgoing frames.
1124 * If local_df is set too, we still allow to fragment this frame
1125 * locally. */
1126 if (inet->pmtudisc == IP_PMTUDISC_DO ||
1127 (!skb_shinfo(skb)->frag_list && ip_dont_fragment(sk, &rt->u.dst)))
1128 df = htons(IP_DF);
1130 if (inet->cork.flags & IPCORK_OPT)
1131 opt = inet->cork.opt;
1133 if (rt->rt_type == RTN_MULTICAST)
1134 ttl = inet->mc_ttl;
1135 else
1136 ttl = ip_select_ttl(inet, &rt->u.dst);
1138 iph = (struct iphdr *)skb->data;
1139 iph->version = 4;
1140 iph->ihl = 5;
1141 if (opt) {
1142 iph->ihl += opt->optlen>>2;
1143 ip_options_build(skb, opt, inet->cork.addr, rt, 0);
1145 iph->tos = inet->tos;
1146 iph->tot_len = htons(skb->len);
1147 iph->frag_off = df;
1148 if (!df) {
1149 __ip_select_ident(iph, &rt->u.dst, 0);
1150 } else {
1151 iph->id = htons(inet->id++);
1153 iph->ttl = ttl;
1154 iph->protocol = sk->sk_protocol;
1155 iph->saddr = rt->rt_src;
1156 iph->daddr = rt->rt_dst;
1157 ip_send_check(iph);
1159 skb->priority = sk->sk_priority;
1160 skb->dst = dst_clone(&rt->u.dst);
1162 /* Netfilter gets whole the not fragmented skb. */
1163 err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL,
1164 skb->dst->dev, dst_output);
1165 if (err) {
1166 if (err > 0)
1167 err = inet->recverr ? net_xmit_errno(err) : 0;
1168 if (err)
1169 goto error;
1172 out:
1173 inet->cork.flags &= ~IPCORK_OPT;
1174 if (inet->cork.rt) {
1175 ip_rt_put(inet->cork.rt);
1176 inet->cork.rt = NULL;
1178 return err;
1180 error:
1181 IP_INC_STATS(IpOutDiscards);
1182 goto out;
1186 * Throw away all pending data on the socket.
1188 void ip_flush_pending_frames(struct sock *sk)
1190 struct inet_opt *inet = inet_sk(sk);
1191 struct sk_buff *skb;
1193 while ((skb = __skb_dequeue_tail(&sk->sk_write_queue)) != NULL)
1194 kfree_skb(skb);
1196 inet->cork.flags &= ~IPCORK_OPT;
1197 if (inet->cork.opt) {
1198 kfree(inet->cork.opt);
1199 inet->cork.opt = NULL;
1201 if (inet->cork.rt) {
1202 ip_rt_put(inet->cork.rt);
1203 inet->cork.rt = NULL;
1209 * Fetch data from kernel space and fill in checksum if needed.
1211 static int ip_reply_glue_bits(void *dptr, char *to, int offset,
1212 int len, int odd, struct sk_buff *skb)
1214 unsigned int csum;
1216 csum = csum_partial_copy_nocheck(dptr+offset, to, len, 0);
1217 skb->csum = csum_block_add(skb->csum, csum, odd);
1218 return 0;
1222 * Generic function to send a packet as reply to another packet.
1223 * Used to send TCP resets so far. ICMP should use this function too.
1225 * Should run single threaded per socket because it uses the sock
1226 * structure to pass arguments.
1228 * LATER: switch from ip_build_xmit to ip_append_*
1230 void ip_send_reply(struct sock *sk, struct sk_buff *skb, struct ip_reply_arg *arg,
1231 unsigned int len)
1233 struct inet_opt *inet = inet_sk(sk);
1234 struct {
1235 struct ip_options opt;
1236 char data[40];
1237 } replyopts;
1238 struct ipcm_cookie ipc;
1239 u32 daddr;
1240 struct rtable *rt = (struct rtable*)skb->dst;
1242 if (ip_options_echo(&replyopts.opt, skb))
1243 return;
1245 daddr = ipc.addr = rt->rt_src;
1246 ipc.opt = NULL;
1248 if (replyopts.opt.optlen) {
1249 ipc.opt = &replyopts.opt;
1251 if (ipc.opt->srr)
1252 daddr = replyopts.opt.faddr;
1256 struct flowi fl = { .nl_u = { .ip4_u =
1257 { .daddr = daddr,
1258 .saddr = rt->rt_spec_dst,
1259 .tos = RT_TOS(skb->nh.iph->tos) } },
1260 /* Not quite clean, but right. */
1261 .uli_u = { .ports =
1262 { .sport = skb->h.th->dest,
1263 .dport = skb->h.th->source } },
1264 .proto = sk->sk_protocol };
1265 if (ip_route_output_key(&rt, &fl))
1266 return;
1269 /* And let IP do all the hard work.
1271 This chunk is not reenterable, hence spinlock.
1272 Note that it uses the fact, that this function is called
1273 with locally disabled BH and that sk cannot be already spinlocked.
1275 bh_lock_sock(sk);
1276 inet->tos = skb->nh.iph->tos;
1277 sk->sk_priority = skb->priority;
1278 sk->sk_protocol = skb->nh.iph->protocol;
1279 ip_append_data(sk, ip_reply_glue_bits, arg->iov->iov_base, len, 0,
1280 &ipc, rt, MSG_DONTWAIT);
1281 if ((skb = skb_peek(&sk->sk_write_queue)) != NULL) {
1282 if (arg->csumoffset >= 0)
1283 *((u16 *)skb->h.raw + arg->csumoffset) = csum_fold(csum_add(skb->csum, arg->csum));
1284 skb->ip_summed = CHECKSUM_NONE;
1285 ip_push_pending_frames(sk);
1288 bh_unlock_sock(sk);
1290 ip_rt_put(rt);
1294 * IP protocol layer initialiser
1297 static struct packet_type ip_packet_type =
1299 .type = __constant_htons(ETH_P_IP),
1300 .dev = NULL, /* All devices */
1301 .func = ip_rcv,
1302 .data = (void*)1,
1306 * IP registers the packet type and then calls the subprotocol initialisers
1309 void __init ip_init(void)
1311 dev_add_pack(&ip_packet_type);
1313 ip_rt_init();
1314 inet_initpeers();
1316 #ifdef CONFIG_IP_MULTICAST
1317 igmp_mc_proc_init();
1318 #endif