2 * IPv6 output functions
3 * Linux INET6 implementation
6 * Pedro Roque <roque@di.fc.ul.pt>
8 * Based on linux/net/ipv4/ip_output.c
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * as published by the Free Software Foundation; either version
13 * 2 of the License, or (at your option) any later version.
16 * A.N.Kuznetsov : airthmetics in fragmentation.
17 * extension headers are implemented.
18 * route changes now work.
19 * ip6_forward does not confuse sniffers.
22 * H. von Brand : Added missing #include <linux/string.h>
23 * Imran Patel : frag id should be in NBO
24 * Kazunori MIYAZAWA @USAGI
25 * : add ip6_append_data and related functions
29 #include <linux/errno.h>
30 #include <linux/kernel.h>
31 #include <linux/string.h>
32 #include <linux/socket.h>
33 #include <linux/net.h>
34 #include <linux/netdevice.h>
35 #include <linux/if_arp.h>
36 #include <linux/in6.h>
37 #include <linux/tcp.h>
38 #include <linux/route.h>
39 #include <linux/module.h>
40 #include <linux/slab.h>
42 #include <linux/netfilter.h>
43 #include <linux/netfilter_ipv6.h>
49 #include <net/ndisc.h>
50 #include <net/protocol.h>
51 #include <net/ip6_route.h>
52 #include <net/addrconf.h>
53 #include <net/rawv6.h>
56 #include <net/checksum.h>
57 #include <linux/mroute6.h>
59 static int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*));
61 int __ip6_local_out(struct sk_buff
*skb
)
65 len
= skb
->len
- sizeof(struct ipv6hdr
);
66 if (len
> IPV6_MAXPLEN
)
68 ipv6_hdr(skb
)->payload_len
= htons(len
);
70 return nf_hook(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, skb
, NULL
,
71 skb_dst(skb
)->dev
, dst_output
);
74 int ip6_local_out(struct sk_buff
*skb
)
78 err
= __ip6_local_out(skb
);
80 err
= dst_output(skb
);
84 EXPORT_SYMBOL_GPL(ip6_local_out
);
86 /* dev_loopback_xmit for use with netfilter. */
87 static int ip6_dev_loopback_xmit(struct sk_buff
*newskb
)
89 skb_reset_mac_header(newskb
);
90 __skb_pull(newskb
, skb_network_offset(newskb
));
91 newskb
->pkt_type
= PACKET_LOOPBACK
;
92 newskb
->ip_summed
= CHECKSUM_UNNECESSARY
;
93 WARN_ON(!skb_dst(newskb
));
99 static int ip6_finish_output2(struct sk_buff
*skb
)
101 struct dst_entry
*dst
= skb_dst(skb
);
102 struct net_device
*dev
= dst
->dev
;
104 skb
->protocol
= htons(ETH_P_IPV6
);
107 if (ipv6_addr_is_multicast(&ipv6_hdr(skb
)->daddr
)) {
108 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
110 if (!(dev
->flags
& IFF_LOOPBACK
) && sk_mc_loop(skb
->sk
) &&
111 ((mroute6_socket(dev_net(dev
), skb
) &&
112 !(IP6CB(skb
)->flags
& IP6SKB_FORWARDED
)) ||
113 ipv6_chk_mcast_addr(dev
, &ipv6_hdr(skb
)->daddr
,
114 &ipv6_hdr(skb
)->saddr
))) {
115 struct sk_buff
*newskb
= skb_clone(skb
, GFP_ATOMIC
);
117 /* Do not check for IFF_ALLMULTI; multicast routing
118 is not supported in any case.
121 NF_HOOK(NFPROTO_IPV6
, NF_INET_POST_ROUTING
,
122 newskb
, NULL
, newskb
->dev
,
123 ip6_dev_loopback_xmit
);
125 if (ipv6_hdr(skb
)->hop_limit
== 0) {
126 IP6_INC_STATS(dev_net(dev
), idev
,
127 IPSTATS_MIB_OUTDISCARDS
);
133 IP6_UPD_PO_STATS(dev_net(dev
), idev
, IPSTATS_MIB_OUTMCAST
,
138 return neigh_hh_output(dst
->hh
, skb
);
139 else if (dst
->neighbour
)
140 return dst
->neighbour
->output(skb
);
142 IP6_INC_STATS_BH(dev_net(dst
->dev
),
143 ip6_dst_idev(dst
), IPSTATS_MIB_OUTNOROUTES
);
148 static inline int ip6_skb_dst_mtu(struct sk_buff
*skb
)
150 struct ipv6_pinfo
*np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
152 return (np
&& np
->pmtudisc
== IPV6_PMTUDISC_PROBE
) ?
153 skb_dst(skb
)->dev
->mtu
: dst_mtu(skb_dst(skb
));
156 static int ip6_finish_output(struct sk_buff
*skb
)
158 if ((skb
->len
> ip6_skb_dst_mtu(skb
) && !skb_is_gso(skb
)) ||
159 dst_allfrag(skb_dst(skb
)))
160 return ip6_fragment(skb
, ip6_finish_output2
);
162 return ip6_finish_output2(skb
);
165 int ip6_output(struct sk_buff
*skb
)
167 struct net_device
*dev
= skb_dst(skb
)->dev
;
168 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
169 if (unlikely(idev
->cnf
.disable_ipv6
)) {
170 IP6_INC_STATS(dev_net(dev
), idev
,
171 IPSTATS_MIB_OUTDISCARDS
);
176 return NF_HOOK_COND(NFPROTO_IPV6
, NF_INET_POST_ROUTING
, skb
, NULL
, dev
,
178 !(IP6CB(skb
)->flags
& IP6SKB_REROUTED
));
182 * xmit an sk_buff (used by TCP, SCTP and DCCP)
185 int ip6_xmit(struct sock
*sk
, struct sk_buff
*skb
, struct flowi
*fl
,
186 struct ipv6_txoptions
*opt
)
188 struct net
*net
= sock_net(sk
);
189 struct ipv6_pinfo
*np
= inet6_sk(sk
);
190 struct in6_addr
*first_hop
= &fl
->fl6_dst
;
191 struct dst_entry
*dst
= skb_dst(skb
);
193 u8 proto
= fl
->proto
;
194 int seg_len
= skb
->len
;
200 unsigned int head_room
;
202 /* First: exthdrs may take lots of space (~8K for now)
203 MAX_HEADER is not enough.
205 head_room
= opt
->opt_nflen
+ opt
->opt_flen
;
206 seg_len
+= head_room
;
207 head_room
+= sizeof(struct ipv6hdr
) + LL_RESERVED_SPACE(dst
->dev
);
209 if (skb_headroom(skb
) < head_room
) {
210 struct sk_buff
*skb2
= skb_realloc_headroom(skb
, head_room
);
212 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
213 IPSTATS_MIB_OUTDISCARDS
);
219 skb_set_owner_w(skb
, sk
);
222 ipv6_push_frag_opts(skb
, opt
, &proto
);
224 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &first_hop
);
227 skb_push(skb
, sizeof(struct ipv6hdr
));
228 skb_reset_network_header(skb
);
232 * Fill in the IPv6 header
236 hlimit
= np
->hop_limit
;
239 hlimit
= ip6_dst_hoplimit(dst
);
241 *(__be32
*)hdr
= htonl(0x60000000 | (tclass
<< 20)) | fl
->fl6_flowlabel
;
243 hdr
->payload_len
= htons(seg_len
);
244 hdr
->nexthdr
= proto
;
245 hdr
->hop_limit
= hlimit
;
247 ipv6_addr_copy(&hdr
->saddr
, &fl
->fl6_src
);
248 ipv6_addr_copy(&hdr
->daddr
, first_hop
);
250 skb
->priority
= sk
->sk_priority
;
251 skb
->mark
= sk
->sk_mark
;
254 if ((skb
->len
<= mtu
) || skb
->local_df
|| skb_is_gso(skb
)) {
255 IP6_UPD_PO_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
256 IPSTATS_MIB_OUT
, skb
->len
);
257 return NF_HOOK(NFPROTO_IPV6
, NF_INET_LOCAL_OUT
, skb
, NULL
,
258 dst
->dev
, dst_output
);
262 printk(KERN_DEBUG
"IPv6: sending pkt_too_big to self\n");
264 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
265 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)), IPSTATS_MIB_FRAGFAILS
);
270 EXPORT_SYMBOL(ip6_xmit
);
273 * To avoid extra problems ND packets are send through this
274 * routine. It's code duplication but I really want to avoid
275 * extra checks since ipv6_build_header is used by TCP (which
276 * is for us performance critical)
279 int ip6_nd_hdr(struct sock
*sk
, struct sk_buff
*skb
, struct net_device
*dev
,
280 const struct in6_addr
*saddr
, const struct in6_addr
*daddr
,
283 struct ipv6_pinfo
*np
= inet6_sk(sk
);
287 skb
->protocol
= htons(ETH_P_IPV6
);
290 totlen
= len
+ sizeof(struct ipv6hdr
);
292 skb_reset_network_header(skb
);
293 skb_put(skb
, sizeof(struct ipv6hdr
));
296 *(__be32
*)hdr
= htonl(0x60000000);
298 hdr
->payload_len
= htons(len
);
299 hdr
->nexthdr
= proto
;
300 hdr
->hop_limit
= np
->hop_limit
;
302 ipv6_addr_copy(&hdr
->saddr
, saddr
);
303 ipv6_addr_copy(&hdr
->daddr
, daddr
);
308 static int ip6_call_ra_chain(struct sk_buff
*skb
, int sel
)
310 struct ip6_ra_chain
*ra
;
311 struct sock
*last
= NULL
;
313 read_lock(&ip6_ra_lock
);
314 for (ra
= ip6_ra_chain
; ra
; ra
= ra
->next
) {
315 struct sock
*sk
= ra
->sk
;
316 if (sk
&& ra
->sel
== sel
&&
317 (!sk
->sk_bound_dev_if
||
318 sk
->sk_bound_dev_if
== skb
->dev
->ifindex
)) {
320 struct sk_buff
*skb2
= skb_clone(skb
, GFP_ATOMIC
);
322 rawv6_rcv(last
, skb2
);
329 rawv6_rcv(last
, skb
);
330 read_unlock(&ip6_ra_lock
);
333 read_unlock(&ip6_ra_lock
);
337 static int ip6_forward_proxy_check(struct sk_buff
*skb
)
339 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
340 u8 nexthdr
= hdr
->nexthdr
;
343 if (ipv6_ext_hdr(nexthdr
)) {
344 offset
= ipv6_skip_exthdr(skb
, sizeof(*hdr
), &nexthdr
);
348 offset
= sizeof(struct ipv6hdr
);
350 if (nexthdr
== IPPROTO_ICMPV6
) {
351 struct icmp6hdr
*icmp6
;
353 if (!pskb_may_pull(skb
, (skb_network_header(skb
) +
354 offset
+ 1 - skb
->data
)))
357 icmp6
= (struct icmp6hdr
*)(skb_network_header(skb
) + offset
);
359 switch (icmp6
->icmp6_type
) {
360 case NDISC_ROUTER_SOLICITATION
:
361 case NDISC_ROUTER_ADVERTISEMENT
:
362 case NDISC_NEIGHBOUR_SOLICITATION
:
363 case NDISC_NEIGHBOUR_ADVERTISEMENT
:
365 /* For reaction involving unicast neighbor discovery
366 * message destined to the proxied address, pass it to
376 * The proxying router can't forward traffic sent to a link-local
377 * address, so signal the sender and discard the packet. This
378 * behavior is clarified by the MIPv6 specification.
380 if (ipv6_addr_type(&hdr
->daddr
) & IPV6_ADDR_LINKLOCAL
) {
381 dst_link_failure(skb
);
388 static inline int ip6_forward_finish(struct sk_buff
*skb
)
390 return dst_output(skb
);
393 int ip6_forward(struct sk_buff
*skb
)
395 struct dst_entry
*dst
= skb_dst(skb
);
396 struct ipv6hdr
*hdr
= ipv6_hdr(skb
);
397 struct inet6_skb_parm
*opt
= IP6CB(skb
);
398 struct net
*net
= dev_net(dst
->dev
);
401 if (net
->ipv6
.devconf_all
->forwarding
== 0)
404 if (skb_warn_if_lro(skb
))
407 if (!xfrm6_policy_check(NULL
, XFRM_POLICY_FWD
, skb
)) {
408 IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_INDISCARDS
);
412 skb_forward_csum(skb
);
415 * We DO NOT make any processing on
416 * RA packets, pushing them to user level AS IS
417 * without ane WARRANTY that application will be able
418 * to interpret them. The reason is that we
419 * cannot make anything clever here.
421 * We are not end-node, so that if packet contains
422 * AH/ESP, we cannot make anything.
423 * Defragmentation also would be mistake, RA packets
424 * cannot be fragmented, because there is no warranty
425 * that different fragments will go along one path. --ANK
428 u8
*ptr
= skb_network_header(skb
) + opt
->ra
;
429 if (ip6_call_ra_chain(skb
, (ptr
[2]<<8) + ptr
[3]))
434 * check and decrement ttl
436 if (hdr
->hop_limit
<= 1) {
437 /* Force OUTPUT device used as source address */
439 icmpv6_send(skb
, ICMPV6_TIME_EXCEED
, ICMPV6_EXC_HOPLIMIT
, 0);
440 IP6_INC_STATS_BH(net
,
441 ip6_dst_idev(dst
), IPSTATS_MIB_INHDRERRORS
);
447 /* XXX: idev->cnf.proxy_ndp? */
448 if (net
->ipv6
.devconf_all
->proxy_ndp
&&
449 pneigh_lookup(&nd_tbl
, net
, &hdr
->daddr
, skb
->dev
, 0)) {
450 int proxied
= ip6_forward_proxy_check(skb
);
452 return ip6_input(skb
);
453 else if (proxied
< 0) {
454 IP6_INC_STATS(net
, ip6_dst_idev(dst
),
455 IPSTATS_MIB_INDISCARDS
);
460 if (!xfrm6_route_forward(skb
)) {
461 IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_INDISCARDS
);
466 /* IPv6 specs say nothing about it, but it is clear that we cannot
467 send redirects to source routed frames.
468 We don't send redirects to frames decapsulated from IPsec.
470 if (skb
->dev
== dst
->dev
&& dst
->neighbour
&& opt
->srcrt
== 0 &&
471 !skb_sec_path(skb
)) {
472 struct in6_addr
*target
= NULL
;
474 struct neighbour
*n
= dst
->neighbour
;
477 * incoming and outgoing devices are the same
481 rt
= (struct rt6_info
*) dst
;
482 if ((rt
->rt6i_flags
& RTF_GATEWAY
))
483 target
= (struct in6_addr
*)&n
->primary_key
;
485 target
= &hdr
->daddr
;
487 /* Limit redirects both by destination (here)
488 and by source (inside ndisc_send_redirect)
490 if (xrlim_allow(dst
, 1*HZ
))
491 ndisc_send_redirect(skb
, n
, target
);
493 int addrtype
= ipv6_addr_type(&hdr
->saddr
);
495 /* This check is security critical. */
496 if (addrtype
== IPV6_ADDR_ANY
||
497 addrtype
& (IPV6_ADDR_MULTICAST
| IPV6_ADDR_LOOPBACK
))
499 if (addrtype
& IPV6_ADDR_LINKLOCAL
) {
500 icmpv6_send(skb
, ICMPV6_DEST_UNREACH
,
501 ICMPV6_NOT_NEIGHBOUR
, 0);
507 if (mtu
< IPV6_MIN_MTU
)
510 if (skb
->len
> mtu
&& !skb_is_gso(skb
)) {
511 /* Again, force OUTPUT device used as source address */
513 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
514 IP6_INC_STATS_BH(net
,
515 ip6_dst_idev(dst
), IPSTATS_MIB_INTOOBIGERRORS
);
516 IP6_INC_STATS_BH(net
,
517 ip6_dst_idev(dst
), IPSTATS_MIB_FRAGFAILS
);
522 if (skb_cow(skb
, dst
->dev
->hard_header_len
)) {
523 IP6_INC_STATS(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTDISCARDS
);
529 /* Mangling hops number delayed to point after skb COW */
533 IP6_INC_STATS_BH(net
, ip6_dst_idev(dst
), IPSTATS_MIB_OUTFORWDATAGRAMS
);
534 return NF_HOOK(NFPROTO_IPV6
, NF_INET_FORWARD
, skb
, skb
->dev
, dst
->dev
,
538 IP6_INC_STATS_BH(net
, ip6_dst_idev(dst
), IPSTATS_MIB_INADDRERRORS
);
544 static void ip6_copy_metadata(struct sk_buff
*to
, struct sk_buff
*from
)
546 to
->pkt_type
= from
->pkt_type
;
547 to
->priority
= from
->priority
;
548 to
->protocol
= from
->protocol
;
550 skb_dst_set(to
, dst_clone(skb_dst(from
)));
552 to
->mark
= from
->mark
;
554 #ifdef CONFIG_NET_SCHED
555 to
->tc_index
= from
->tc_index
;
558 #if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \
559 defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE)
560 to
->nf_trace
= from
->nf_trace
;
562 skb_copy_secmark(to
, from
);
565 int ip6_find_1stfragopt(struct sk_buff
*skb
, u8
**nexthdr
)
567 u16 offset
= sizeof(struct ipv6hdr
);
568 struct ipv6_opt_hdr
*exthdr
=
569 (struct ipv6_opt_hdr
*)(ipv6_hdr(skb
) + 1);
570 unsigned int packet_len
= skb
->tail
- skb
->network_header
;
572 *nexthdr
= &ipv6_hdr(skb
)->nexthdr
;
574 while (offset
+ 1 <= packet_len
) {
580 case NEXTHDR_ROUTING
:
584 #if defined(CONFIG_IPV6_MIP6) || defined(CONFIG_IPV6_MIP6_MODULE)
585 if (ipv6_find_tlv(skb
, offset
, IPV6_TLV_HAO
) >= 0)
595 offset
+= ipv6_optlen(exthdr
);
596 *nexthdr
= &exthdr
->nexthdr
;
597 exthdr
= (struct ipv6_opt_hdr
*)(skb_network_header(skb
) +
604 static int ip6_fragment(struct sk_buff
*skb
, int (*output
)(struct sk_buff
*))
606 struct sk_buff
*frag
;
607 struct rt6_info
*rt
= (struct rt6_info
*)skb_dst(skb
);
608 struct ipv6_pinfo
*np
= skb
->sk
? inet6_sk(skb
->sk
) : NULL
;
609 struct ipv6hdr
*tmp_hdr
;
611 unsigned int mtu
, hlen
, left
, len
;
613 int ptr
, offset
= 0, err
=0;
614 u8
*prevhdr
, nexthdr
= 0;
615 struct net
*net
= dev_net(skb_dst(skb
)->dev
);
617 hlen
= ip6_find_1stfragopt(skb
, &prevhdr
);
620 mtu
= ip6_skb_dst_mtu(skb
);
622 /* We must not fragment if the socket is set to force MTU discovery
623 * or if the skb it not generated by a local socket.
625 if (!skb
->local_df
&& skb
->len
> mtu
) {
626 skb
->dev
= skb_dst(skb
)->dev
;
627 icmpv6_send(skb
, ICMPV6_PKT_TOOBIG
, 0, mtu
);
628 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
629 IPSTATS_MIB_FRAGFAILS
);
634 if (np
&& np
->frag_size
< mtu
) {
638 mtu
-= hlen
+ sizeof(struct frag_hdr
);
640 if (skb_has_frag_list(skb
)) {
641 int first_len
= skb_pagelen(skb
);
642 struct sk_buff
*frag2
;
644 if (first_len
- hlen
> mtu
||
645 ((first_len
- hlen
) & 7) ||
649 skb_walk_frags(skb
, frag
) {
650 /* Correct geometry. */
651 if (frag
->len
> mtu
||
652 ((frag
->len
& 7) && frag
->next
) ||
653 skb_headroom(frag
) < hlen
)
654 goto slow_path_clean
;
656 /* Partially cloned skb? */
657 if (skb_shared(frag
))
658 goto slow_path_clean
;
663 frag
->destructor
= sock_wfree
;
665 skb
->truesize
-= frag
->truesize
;
670 frag
= skb_shinfo(skb
)->frag_list
;
671 skb_frag_list_init(skb
);
674 *prevhdr
= NEXTHDR_FRAGMENT
;
675 tmp_hdr
= kmemdup(skb_network_header(skb
), hlen
, GFP_ATOMIC
);
677 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
678 IPSTATS_MIB_FRAGFAILS
);
682 __skb_pull(skb
, hlen
);
683 fh
= (struct frag_hdr
*)__skb_push(skb
, sizeof(struct frag_hdr
));
684 __skb_push(skb
, hlen
);
685 skb_reset_network_header(skb
);
686 memcpy(skb_network_header(skb
), tmp_hdr
, hlen
);
688 ipv6_select_ident(fh
);
689 fh
->nexthdr
= nexthdr
;
691 fh
->frag_off
= htons(IP6_MF
);
692 frag_id
= fh
->identification
;
694 first_len
= skb_pagelen(skb
);
695 skb
->data_len
= first_len
- skb_headlen(skb
);
696 skb
->len
= first_len
;
697 ipv6_hdr(skb
)->payload_len
= htons(first_len
-
698 sizeof(struct ipv6hdr
));
703 /* Prepare header of the next frame,
704 * before previous one went down. */
706 frag
->ip_summed
= CHECKSUM_NONE
;
707 skb_reset_transport_header(frag
);
708 fh
= (struct frag_hdr
*)__skb_push(frag
, sizeof(struct frag_hdr
));
709 __skb_push(frag
, hlen
);
710 skb_reset_network_header(frag
);
711 memcpy(skb_network_header(frag
), tmp_hdr
,
713 offset
+= skb
->len
- hlen
- sizeof(struct frag_hdr
);
714 fh
->nexthdr
= nexthdr
;
716 fh
->frag_off
= htons(offset
);
717 if (frag
->next
!= NULL
)
718 fh
->frag_off
|= htons(IP6_MF
);
719 fh
->identification
= frag_id
;
720 ipv6_hdr(frag
)->payload_len
=
722 sizeof(struct ipv6hdr
));
723 ip6_copy_metadata(frag
, skb
);
728 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
729 IPSTATS_MIB_FRAGCREATES
);
742 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
743 IPSTATS_MIB_FRAGOKS
);
744 dst_release(&rt
->dst
);
754 IP6_INC_STATS(net
, ip6_dst_idev(&rt
->dst
),
755 IPSTATS_MIB_FRAGFAILS
);
756 dst_release(&rt
->dst
);
760 skb_walk_frags(skb
, frag2
) {
764 frag2
->destructor
= NULL
;
765 skb
->truesize
+= frag2
->truesize
;
770 left
= skb
->len
- hlen
; /* Space per frame */
771 ptr
= hlen
; /* Where to start from */
774 * Fragment the datagram.
777 *prevhdr
= NEXTHDR_FRAGMENT
;
780 * Keep copying data until we run out.
784 /* IF: it doesn't fit, use 'mtu' - the data space left */
787 /* IF: we are not sending upto and including the packet end
788 then align the next start on an eight byte boundary */
796 if ((frag
= alloc_skb(len
+hlen
+sizeof(struct frag_hdr
)+LL_ALLOCATED_SPACE(rt
->dst
.dev
), GFP_ATOMIC
)) == NULL
) {
797 NETDEBUG(KERN_INFO
"IPv6: frag: no memory for new fragment!\n");
798 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
799 IPSTATS_MIB_FRAGFAILS
);
805 * Set up data on packet
808 ip6_copy_metadata(frag
, skb
);
809 skb_reserve(frag
, LL_RESERVED_SPACE(rt
->dst
.dev
));
810 skb_put(frag
, len
+ hlen
+ sizeof(struct frag_hdr
));
811 skb_reset_network_header(frag
);
812 fh
= (struct frag_hdr
*)(skb_network_header(frag
) + hlen
);
813 frag
->transport_header
= (frag
->network_header
+ hlen
+
814 sizeof(struct frag_hdr
));
817 * Charge the memory for the fragment to any owner
821 skb_set_owner_w(frag
, skb
->sk
);
824 * Copy the packet header into the new buffer.
826 skb_copy_from_linear_data(skb
, skb_network_header(frag
), hlen
);
829 * Build fragment header.
831 fh
->nexthdr
= nexthdr
;
834 ipv6_select_ident(fh
);
835 frag_id
= fh
->identification
;
837 fh
->identification
= frag_id
;
840 * Copy a block of the IP datagram.
842 if (skb_copy_bits(skb
, ptr
, skb_transport_header(frag
), len
))
846 fh
->frag_off
= htons(offset
);
848 fh
->frag_off
|= htons(IP6_MF
);
849 ipv6_hdr(frag
)->payload_len
= htons(frag
->len
-
850 sizeof(struct ipv6hdr
));
856 * Put this fragment into the sending queue.
862 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
863 IPSTATS_MIB_FRAGCREATES
);
865 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
866 IPSTATS_MIB_FRAGOKS
);
871 IP6_INC_STATS(net
, ip6_dst_idev(skb_dst(skb
)),
872 IPSTATS_MIB_FRAGFAILS
);
877 static inline int ip6_rt_check(struct rt6key
*rt_key
,
878 struct in6_addr
*fl_addr
,
879 struct in6_addr
*addr_cache
)
881 return (rt_key
->plen
!= 128 || !ipv6_addr_equal(fl_addr
, &rt_key
->addr
)) &&
882 (addr_cache
== NULL
|| !ipv6_addr_equal(fl_addr
, addr_cache
));
885 static struct dst_entry
*ip6_sk_dst_check(struct sock
*sk
,
886 struct dst_entry
*dst
,
889 struct ipv6_pinfo
*np
= inet6_sk(sk
);
890 struct rt6_info
*rt
= (struct rt6_info
*)dst
;
895 /* Yes, checking route validity in not connected
896 * case is not very simple. Take into account,
897 * that we do not support routing by source, TOS,
898 * and MSG_DONTROUTE --ANK (980726)
900 * 1. ip6_rt_check(): If route was host route,
901 * check that cached destination is current.
902 * If it is network route, we still may
903 * check its validity using saved pointer
904 * to the last used address: daddr_cache.
905 * We do not want to save whole address now,
906 * (because main consumer of this service
907 * is tcp, which has not this problem),
908 * so that the last trick works only on connected
910 * 2. oif also should be the same.
912 if (ip6_rt_check(&rt
->rt6i_dst
, &fl
->fl6_dst
, np
->daddr_cache
) ||
913 #ifdef CONFIG_IPV6_SUBTREES
914 ip6_rt_check(&rt
->rt6i_src
, &fl
->fl6_src
, np
->saddr_cache
) ||
916 (fl
->oif
&& fl
->oif
!= dst
->dev
->ifindex
)) {
925 static int ip6_dst_lookup_tail(struct sock
*sk
,
926 struct dst_entry
**dst
, struct flowi
*fl
)
929 struct net
*net
= sock_net(sk
);
932 *dst
= ip6_route_output(net
, sk
, fl
);
934 if ((err
= (*dst
)->error
))
935 goto out_err_release
;
937 if (ipv6_addr_any(&fl
->fl6_src
)) {
938 err
= ipv6_dev_get_saddr(net
, ip6_dst_idev(*dst
)->dev
,
940 sk
? inet6_sk(sk
)->srcprefs
: 0,
943 goto out_err_release
;
946 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
948 * Here if the dst entry we've looked up
949 * has a neighbour entry that is in the INCOMPLETE
950 * state and the src address from the flow is
951 * marked as OPTIMISTIC, we release the found
952 * dst entry and replace it instead with the
953 * dst entry of the nexthop router
955 if ((*dst
)->neighbour
&& !((*dst
)->neighbour
->nud_state
& NUD_VALID
)) {
956 struct inet6_ifaddr
*ifp
;
960 ifp
= ipv6_get_ifaddr(net
, &fl
->fl6_src
,
963 redirect
= (ifp
&& ifp
->flags
& IFA_F_OPTIMISTIC
);
969 * We need to get the dst entry for the
970 * default router instead
973 memcpy(&fl_gw
, fl
, sizeof(struct flowi
));
974 memset(&fl_gw
.fl6_dst
, 0, sizeof(struct in6_addr
));
975 *dst
= ip6_route_output(net
, sk
, &fl_gw
);
976 if ((err
= (*dst
)->error
))
977 goto out_err_release
;
985 if (err
== -ENETUNREACH
)
986 IP6_INC_STATS_BH(net
, NULL
, IPSTATS_MIB_OUTNOROUTES
);
993 * ip6_dst_lookup - perform route lookup on flow
994 * @sk: socket which provides route info
995 * @dst: pointer to dst_entry * for result
996 * @fl: flow to lookup
998 * This function performs a route lookup on the given flow.
1000 * It returns zero on success, or a standard errno code on error.
1002 int ip6_dst_lookup(struct sock
*sk
, struct dst_entry
**dst
, struct flowi
*fl
)
1005 return ip6_dst_lookup_tail(sk
, dst
, fl
);
1007 EXPORT_SYMBOL_GPL(ip6_dst_lookup
);
1010 * ip6_sk_dst_lookup - perform socket cached route lookup on flow
1011 * @sk: socket which provides the dst cache and route info
1012 * @dst: pointer to dst_entry * for result
1013 * @fl: flow to lookup
1015 * This function performs a route lookup on the given flow with the
1016 * possibility of using the cached route in the socket if it is valid.
1017 * It will take the socket dst lock when operating on the dst cache.
1018 * As a result, this function can only be used in process context.
1020 * It returns zero on success, or a standard errno code on error.
1022 int ip6_sk_dst_lookup(struct sock
*sk
, struct dst_entry
**dst
, struct flowi
*fl
)
1026 *dst
= sk_dst_check(sk
, inet6_sk(sk
)->dst_cookie
);
1027 *dst
= ip6_sk_dst_check(sk
, *dst
, fl
);
1030 return ip6_dst_lookup_tail(sk
, dst
, fl
);
1032 EXPORT_SYMBOL_GPL(ip6_sk_dst_lookup
);
1034 static inline int ip6_ufo_append_data(struct sock
*sk
,
1035 int getfrag(void *from
, char *to
, int offset
, int len
,
1036 int odd
, struct sk_buff
*skb
),
1037 void *from
, int length
, int hh_len
, int fragheaderlen
,
1038 int transhdrlen
, int mtu
,unsigned int flags
)
1041 struct sk_buff
*skb
;
1044 /* There is support for UDP large send offload by network
1045 * device, so create one single skb packet containing complete
1048 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
) {
1049 skb
= sock_alloc_send_skb(sk
,
1050 hh_len
+ fragheaderlen
+ transhdrlen
+ 20,
1051 (flags
& MSG_DONTWAIT
), &err
);
1055 /* reserve space for Hardware header */
1056 skb_reserve(skb
, hh_len
);
1058 /* create space for UDP/IP header */
1059 skb_put(skb
,fragheaderlen
+ transhdrlen
);
1061 /* initialize network header pointer */
1062 skb_reset_network_header(skb
);
1064 /* initialize protocol header pointer */
1065 skb
->transport_header
= skb
->network_header
+ fragheaderlen
;
1067 skb
->ip_summed
= CHECKSUM_PARTIAL
;
1069 sk
->sk_sndmsg_off
= 0;
1072 err
= skb_append_datato_frags(sk
,skb
, getfrag
, from
,
1073 (length
- transhdrlen
));
1075 struct frag_hdr fhdr
;
1077 /* Specify the length of each IPv6 datagram fragment.
1078 * It has to be a multiple of 8.
1080 skb_shinfo(skb
)->gso_size
= (mtu
- fragheaderlen
-
1081 sizeof(struct frag_hdr
)) & ~7;
1082 skb_shinfo(skb
)->gso_type
= SKB_GSO_UDP
;
1083 ipv6_select_ident(&fhdr
);
1084 skb_shinfo(skb
)->ip6_frag_id
= fhdr
.identification
;
1085 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1089 /* There is not enough support do UPD LSO,
1090 * so follow normal path
1097 static inline struct ipv6_opt_hdr
*ip6_opt_dup(struct ipv6_opt_hdr
*src
,
1100 return src
? kmemdup(src
, (src
->hdrlen
+ 1) * 8, gfp
) : NULL
;
1103 static inline struct ipv6_rt_hdr
*ip6_rthdr_dup(struct ipv6_rt_hdr
*src
,
1106 return src
? kmemdup(src
, (src
->hdrlen
+ 1) * 8, gfp
) : NULL
;
1109 int ip6_append_data(struct sock
*sk
, int getfrag(void *from
, char *to
,
1110 int offset
, int len
, int odd
, struct sk_buff
*skb
),
1111 void *from
, int length
, int transhdrlen
,
1112 int hlimit
, int tclass
, struct ipv6_txoptions
*opt
, struct flowi
*fl
,
1113 struct rt6_info
*rt
, unsigned int flags
, int dontfrag
)
1115 struct inet_sock
*inet
= inet_sk(sk
);
1116 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1117 struct sk_buff
*skb
;
1118 unsigned int maxfraglen
, fragheaderlen
;
1125 int csummode
= CHECKSUM_NONE
;
1127 if (flags
&MSG_PROBE
)
1129 if (skb_queue_empty(&sk
->sk_write_queue
)) {
1134 if (WARN_ON(np
->cork
.opt
))
1137 np
->cork
.opt
= kmalloc(opt
->tot_len
, sk
->sk_allocation
);
1138 if (unlikely(np
->cork
.opt
== NULL
))
1141 np
->cork
.opt
->tot_len
= opt
->tot_len
;
1142 np
->cork
.opt
->opt_flen
= opt
->opt_flen
;
1143 np
->cork
.opt
->opt_nflen
= opt
->opt_nflen
;
1145 np
->cork
.opt
->dst0opt
= ip6_opt_dup(opt
->dst0opt
,
1147 if (opt
->dst0opt
&& !np
->cork
.opt
->dst0opt
)
1150 np
->cork
.opt
->dst1opt
= ip6_opt_dup(opt
->dst1opt
,
1152 if (opt
->dst1opt
&& !np
->cork
.opt
->dst1opt
)
1155 np
->cork
.opt
->hopopt
= ip6_opt_dup(opt
->hopopt
,
1157 if (opt
->hopopt
&& !np
->cork
.opt
->hopopt
)
1160 np
->cork
.opt
->srcrt
= ip6_rthdr_dup(opt
->srcrt
,
1162 if (opt
->srcrt
&& !np
->cork
.opt
->srcrt
)
1165 /* need source address above miyazawa*/
1168 inet
->cork
.dst
= &rt
->dst
;
1169 inet
->cork
.fl
= *fl
;
1170 np
->cork
.hop_limit
= hlimit
;
1171 np
->cork
.tclass
= tclass
;
1172 mtu
= np
->pmtudisc
== IPV6_PMTUDISC_PROBE
?
1173 rt
->dst
.dev
->mtu
: dst_mtu(rt
->dst
.path
);
1174 if (np
->frag_size
< mtu
) {
1176 mtu
= np
->frag_size
;
1178 inet
->cork
.fragsize
= mtu
;
1179 if (dst_allfrag(rt
->dst
.path
))
1180 inet
->cork
.flags
|= IPCORK_ALLFRAG
;
1181 inet
->cork
.length
= 0;
1182 sk
->sk_sndmsg_page
= NULL
;
1183 sk
->sk_sndmsg_off
= 0;
1184 exthdrlen
= rt
->dst
.header_len
+ (opt
? opt
->opt_flen
: 0) -
1185 rt
->rt6i_nfheader_len
;
1186 length
+= exthdrlen
;
1187 transhdrlen
+= exthdrlen
;
1189 rt
= (struct rt6_info
*)inet
->cork
.dst
;
1190 fl
= &inet
->cork
.fl
;
1194 mtu
= inet
->cork
.fragsize
;
1197 hh_len
= LL_RESERVED_SPACE(rt
->dst
.dev
);
1199 fragheaderlen
= sizeof(struct ipv6hdr
) + rt
->rt6i_nfheader_len
+
1200 (opt
? opt
->opt_nflen
: 0);
1201 maxfraglen
= ((mtu
- fragheaderlen
) & ~7) + fragheaderlen
- sizeof(struct frag_hdr
);
1203 if (mtu
<= sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
) {
1204 if (inet
->cork
.length
+ length
> sizeof(struct ipv6hdr
) + IPV6_MAXPLEN
- fragheaderlen
) {
1205 ipv6_local_error(sk
, EMSGSIZE
, fl
, mtu
-exthdrlen
);
1211 * Let's try using as much space as possible.
1212 * Use MTU if total length of the message fits into the MTU.
1213 * Otherwise, we need to reserve fragment header and
1214 * fragment alignment (= 8-15 octects, in total).
1216 * Note that we may need to "move" the data from the tail of
1217 * of the buffer to the new fragment when we split
1220 * FIXME: It may be fragmented into multiple chunks
1221 * at once if non-fragmentable extension headers
1226 inet
->cork
.length
+= length
;
1228 int proto
= sk
->sk_protocol
;
1229 if (dontfrag
&& (proto
== IPPROTO_UDP
|| proto
== IPPROTO_RAW
)){
1230 ipv6_local_rxpmtu(sk
, fl
, mtu
-exthdrlen
);
1234 if (proto
== IPPROTO_UDP
&&
1235 (rt
->dst
.dev
->features
& NETIF_F_UFO
)) {
1237 err
= ip6_ufo_append_data(sk
, getfrag
, from
, length
,
1238 hh_len
, fragheaderlen
,
1239 transhdrlen
, mtu
, flags
);
1246 if ((skb
= skb_peek_tail(&sk
->sk_write_queue
)) == NULL
)
1249 while (length
> 0) {
1250 /* Check if the remaining data fits into current packet. */
1251 copy
= (inet
->cork
.length
<= mtu
&& !(inet
->cork
.flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - skb
->len
;
1253 copy
= maxfraglen
- skb
->len
;
1257 unsigned int datalen
;
1258 unsigned int fraglen
;
1259 unsigned int fraggap
;
1260 unsigned int alloclen
;
1261 struct sk_buff
*skb_prev
;
1265 /* There's no room in the current skb */
1267 fraggap
= skb_prev
->len
- maxfraglen
;
1272 * If remaining data exceeds the mtu,
1273 * we know we need more fragment(s).
1275 datalen
= length
+ fraggap
;
1276 if (datalen
> (inet
->cork
.length
<= mtu
&& !(inet
->cork
.flags
& IPCORK_ALLFRAG
) ? mtu
: maxfraglen
) - fragheaderlen
)
1277 datalen
= maxfraglen
- fragheaderlen
;
1279 fraglen
= datalen
+ fragheaderlen
;
1280 if ((flags
& MSG_MORE
) &&
1281 !(rt
->dst
.dev
->features
&NETIF_F_SG
))
1284 alloclen
= datalen
+ fragheaderlen
;
1287 * The last fragment gets additional space at tail.
1288 * Note: we overallocate on fragments with MSG_MODE
1289 * because we have no idea if we're the last one.
1291 if (datalen
== length
+ fraggap
)
1292 alloclen
+= rt
->dst
.trailer_len
;
1295 * We just reserve space for fragment header.
1296 * Note: this may be overallocation if the message
1297 * (without MSG_MORE) fits into the MTU.
1299 alloclen
+= sizeof(struct frag_hdr
);
1302 skb
= sock_alloc_send_skb(sk
,
1304 (flags
& MSG_DONTWAIT
), &err
);
1307 if (atomic_read(&sk
->sk_wmem_alloc
) <=
1309 skb
= sock_wmalloc(sk
,
1310 alloclen
+ hh_len
, 1,
1312 if (unlikely(skb
== NULL
))
1318 * Fill in the control structures
1320 skb
->ip_summed
= csummode
;
1322 /* reserve for fragmentation */
1323 skb_reserve(skb
, hh_len
+sizeof(struct frag_hdr
));
1326 * Find where to start putting bytes
1328 data
= skb_put(skb
, fraglen
);
1329 skb_set_network_header(skb
, exthdrlen
);
1330 data
+= fragheaderlen
;
1331 skb
->transport_header
= (skb
->network_header
+
1334 skb
->csum
= skb_copy_and_csum_bits(
1335 skb_prev
, maxfraglen
,
1336 data
+ transhdrlen
, fraggap
, 0);
1337 skb_prev
->csum
= csum_sub(skb_prev
->csum
,
1340 pskb_trim_unique(skb_prev
, maxfraglen
);
1342 copy
= datalen
- transhdrlen
- fraggap
;
1347 } else if (copy
> 0 && getfrag(from
, data
+ transhdrlen
, offset
, copy
, fraggap
, skb
) < 0) {
1354 length
-= datalen
- fraggap
;
1357 csummode
= CHECKSUM_NONE
;
1360 * Put the packet on the pending queue
1362 __skb_queue_tail(&sk
->sk_write_queue
, skb
);
1369 if (!(rt
->dst
.dev
->features
&NETIF_F_SG
)) {
1373 if (getfrag(from
, skb_put(skb
, copy
),
1374 offset
, copy
, off
, skb
) < 0) {
1375 __skb_trim(skb
, off
);
1380 int i
= skb_shinfo(skb
)->nr_frags
;
1381 skb_frag_t
*frag
= &skb_shinfo(skb
)->frags
[i
-1];
1382 struct page
*page
= sk
->sk_sndmsg_page
;
1383 int off
= sk
->sk_sndmsg_off
;
1386 if (page
&& (left
= PAGE_SIZE
- off
) > 0) {
1389 if (page
!= frag
->page
) {
1390 if (i
== MAX_SKB_FRAGS
) {
1395 skb_fill_page_desc(skb
, i
, page
, sk
->sk_sndmsg_off
, 0);
1396 frag
= &skb_shinfo(skb
)->frags
[i
];
1398 } else if(i
< MAX_SKB_FRAGS
) {
1399 if (copy
> PAGE_SIZE
)
1401 page
= alloc_pages(sk
->sk_allocation
, 0);
1406 sk
->sk_sndmsg_page
= page
;
1407 sk
->sk_sndmsg_off
= 0;
1409 skb_fill_page_desc(skb
, i
, page
, 0, 0);
1410 frag
= &skb_shinfo(skb
)->frags
[i
];
1415 if (getfrag(from
, page_address(frag
->page
)+frag
->page_offset
+frag
->size
, offset
, copy
, skb
->len
, skb
) < 0) {
1419 sk
->sk_sndmsg_off
+= copy
;
1422 skb
->data_len
+= copy
;
1423 skb
->truesize
+= copy
;
1424 atomic_add(copy
, &sk
->sk_wmem_alloc
);
1431 inet
->cork
.length
-= length
;
1432 IP6_INC_STATS(sock_net(sk
), rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
1436 static void ip6_cork_release(struct inet_sock
*inet
, struct ipv6_pinfo
*np
)
1439 kfree(np
->cork
.opt
->dst0opt
);
1440 kfree(np
->cork
.opt
->dst1opt
);
1441 kfree(np
->cork
.opt
->hopopt
);
1442 kfree(np
->cork
.opt
->srcrt
);
1443 kfree(np
->cork
.opt
);
1444 np
->cork
.opt
= NULL
;
1447 if (inet
->cork
.dst
) {
1448 dst_release(inet
->cork
.dst
);
1449 inet
->cork
.dst
= NULL
;
1450 inet
->cork
.flags
&= ~IPCORK_ALLFRAG
;
1452 memset(&inet
->cork
.fl
, 0, sizeof(inet
->cork
.fl
));
1455 int ip6_push_pending_frames(struct sock
*sk
)
1457 struct sk_buff
*skb
, *tmp_skb
;
1458 struct sk_buff
**tail_skb
;
1459 struct in6_addr final_dst_buf
, *final_dst
= &final_dst_buf
;
1460 struct inet_sock
*inet
= inet_sk(sk
);
1461 struct ipv6_pinfo
*np
= inet6_sk(sk
);
1462 struct net
*net
= sock_net(sk
);
1463 struct ipv6hdr
*hdr
;
1464 struct ipv6_txoptions
*opt
= np
->cork
.opt
;
1465 struct rt6_info
*rt
= (struct rt6_info
*)inet
->cork
.dst
;
1466 struct flowi
*fl
= &inet
->cork
.fl
;
1467 unsigned char proto
= fl
->proto
;
1470 if ((skb
= __skb_dequeue(&sk
->sk_write_queue
)) == NULL
)
1472 tail_skb
= &(skb_shinfo(skb
)->frag_list
);
1474 /* move skb->data to ip header from ext header */
1475 if (skb
->data
< skb_network_header(skb
))
1476 __skb_pull(skb
, skb_network_offset(skb
));
1477 while ((tmp_skb
= __skb_dequeue(&sk
->sk_write_queue
)) != NULL
) {
1478 __skb_pull(tmp_skb
, skb_network_header_len(skb
));
1479 *tail_skb
= tmp_skb
;
1480 tail_skb
= &(tmp_skb
->next
);
1481 skb
->len
+= tmp_skb
->len
;
1482 skb
->data_len
+= tmp_skb
->len
;
1483 skb
->truesize
+= tmp_skb
->truesize
;
1484 tmp_skb
->destructor
= NULL
;
1488 /* Allow local fragmentation. */
1489 if (np
->pmtudisc
< IPV6_PMTUDISC_DO
)
1492 ipv6_addr_copy(final_dst
, &fl
->fl6_dst
);
1493 __skb_pull(skb
, skb_network_header_len(skb
));
1494 if (opt
&& opt
->opt_flen
)
1495 ipv6_push_frag_opts(skb
, opt
, &proto
);
1496 if (opt
&& opt
->opt_nflen
)
1497 ipv6_push_nfrag_opts(skb
, opt
, &proto
, &final_dst
);
1499 skb_push(skb
, sizeof(struct ipv6hdr
));
1500 skb_reset_network_header(skb
);
1501 hdr
= ipv6_hdr(skb
);
1503 *(__be32
*)hdr
= fl
->fl6_flowlabel
|
1504 htonl(0x60000000 | ((int)np
->cork
.tclass
<< 20));
1506 hdr
->hop_limit
= np
->cork
.hop_limit
;
1507 hdr
->nexthdr
= proto
;
1508 ipv6_addr_copy(&hdr
->saddr
, &fl
->fl6_src
);
1509 ipv6_addr_copy(&hdr
->daddr
, final_dst
);
1511 skb
->priority
= sk
->sk_priority
;
1512 skb
->mark
= sk
->sk_mark
;
1514 skb_dst_set(skb
, dst_clone(&rt
->dst
));
1515 IP6_UPD_PO_STATS(net
, rt
->rt6i_idev
, IPSTATS_MIB_OUT
, skb
->len
);
1516 if (proto
== IPPROTO_ICMPV6
) {
1517 struct inet6_dev
*idev
= ip6_dst_idev(skb_dst(skb
));
1519 ICMP6MSGOUT_INC_STATS_BH(net
, idev
, icmp6_hdr(skb
)->icmp6_type
);
1520 ICMP6_INC_STATS_BH(net
, idev
, ICMP6_MIB_OUTMSGS
);
1523 err
= ip6_local_out(skb
);
1526 err
= net_xmit_errno(err
);
1532 ip6_cork_release(inet
, np
);
1535 IP6_INC_STATS(net
, rt
->rt6i_idev
, IPSTATS_MIB_OUTDISCARDS
);
1539 void ip6_flush_pending_frames(struct sock
*sk
)
1541 struct sk_buff
*skb
;
1543 while ((skb
= __skb_dequeue_tail(&sk
->sk_write_queue
)) != NULL
) {
1545 IP6_INC_STATS(sock_net(sk
), ip6_dst_idev(skb_dst(skb
)),
1546 IPSTATS_MIB_OUTDISCARDS
);
1550 ip6_cork_release(inet_sk(sk
), inet6_sk(sk
));