Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/davem/net...
[linux-2.6.git] / net / netfilter / ipvs / ip_vs_xmit.c
blob1f2a4e35fb115bef5da5eba86df4500733c4213c
1 /*
2 * ip_vs_xmit.c: various packet transmitters for IPVS
4 * Authors: Wensong Zhang <wensong@linuxvirtualserver.org>
5 * Julian Anastasov <ja@ssi.bg>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; either version
10 * 2 of the License, or (at your option) any later version.
12 * Changes:
14 * Description of forwarding methods:
15 * - all transmitters are called from LOCAL_IN (remote clients) and
16 * LOCAL_OUT (local clients) but for ICMP can be called from FORWARD
17 * - not all connections have destination server, for example,
18 * connections in backup server when fwmark is used
19 * - bypass connections use daddr from packet
20 * LOCAL_OUT rules:
21 * - skb->dev is NULL, skb->protocol is not set (both are set in POST_ROUTING)
22 * - skb->pkt_type is not set yet
23 * - the only place where we can see skb->sk != NULL
26 #define KMSG_COMPONENT "IPVS"
27 #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
29 #include <linux/kernel.h>
30 #include <linux/slab.h>
31 #include <linux/tcp.h> /* for tcphdr */
32 #include <net/ip.h>
33 #include <net/tcp.h> /* for csum_tcpudp_magic */
34 #include <net/udp.h>
35 #include <net/icmp.h> /* for icmp_send */
36 #include <net/route.h> /* for ip_route_output */
37 #include <net/ipv6.h>
38 #include <net/ip6_route.h>
39 #include <net/addrconf.h>
40 #include <linux/icmpv6.h>
41 #include <linux/netfilter.h>
42 #include <linux/netfilter_ipv4.h>
44 #include <net/ip_vs.h>
48 * Destination cache to speed up outgoing route lookup
50 static inline void
51 __ip_vs_dst_set(struct ip_vs_dest *dest, u32 rtos, struct dst_entry *dst,
52 u32 dst_cookie)
54 struct dst_entry *old_dst;
56 old_dst = dest->dst_cache;
57 dest->dst_cache = dst;
58 dest->dst_rtos = rtos;
59 dest->dst_cookie = dst_cookie;
60 dst_release(old_dst);
63 static inline struct dst_entry *
64 __ip_vs_dst_check(struct ip_vs_dest *dest, u32 rtos)
66 struct dst_entry *dst = dest->dst_cache;
68 if (!dst)
69 return NULL;
70 if ((dst->obsolete || rtos != dest->dst_rtos) &&
71 dst->ops->check(dst, dest->dst_cookie) == NULL) {
72 dest->dst_cache = NULL;
73 dst_release(dst);
74 return NULL;
76 dst_hold(dst);
77 return dst;
81 * Get route to destination or remote server
82 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
83 * &4=Allow redirect from remote daddr to local
85 static struct rtable *
86 __ip_vs_get_out_rt(struct sk_buff *skb, struct ip_vs_dest *dest,
87 __be32 daddr, u32 rtos, int rt_mode)
89 struct net *net = dev_net(skb_dst(skb)->dev);
90 struct rtable *rt; /* Route to the other host */
91 struct rtable *ort; /* Original route */
92 int local;
94 if (dest) {
95 spin_lock(&dest->dst_lock);
96 if (!(rt = (struct rtable *)
97 __ip_vs_dst_check(dest, rtos))) {
98 struct flowi fl = {
99 .fl4_dst = dest->addr.ip,
100 .fl4_tos = rtos,
103 if (ip_route_output_key(net, &rt, &fl)) {
104 spin_unlock(&dest->dst_lock);
105 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
106 &dest->addr.ip);
107 return NULL;
109 __ip_vs_dst_set(dest, rtos, dst_clone(&rt->dst), 0);
110 IP_VS_DBG(10, "new dst %pI4, refcnt=%d, rtos=%X\n",
111 &dest->addr.ip,
112 atomic_read(&rt->dst.__refcnt), rtos);
114 spin_unlock(&dest->dst_lock);
115 } else {
116 struct flowi fl = {
117 .fl4_dst = daddr,
118 .fl4_tos = rtos,
121 if (ip_route_output_key(net, &rt, &fl)) {
122 IP_VS_DBG_RL("ip_route_output error, dest: %pI4\n",
123 &daddr);
124 return NULL;
128 local = rt->rt_flags & RTCF_LOCAL;
129 if (!((local ? 1 : 2) & rt_mode)) {
130 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI4\n",
131 (rt->rt_flags & RTCF_LOCAL) ?
132 "local":"non-local", &rt->rt_dst);
133 ip_rt_put(rt);
134 return NULL;
136 if (local && !(rt_mode & 4) && !((ort = skb_rtable(skb)) &&
137 ort->rt_flags & RTCF_LOCAL)) {
138 IP_VS_DBG_RL("Redirect from non-local address %pI4 to local "
139 "requires NAT method, dest: %pI4\n",
140 &ip_hdr(skb)->daddr, &rt->rt_dst);
141 ip_rt_put(rt);
142 return NULL;
144 if (unlikely(!local && ipv4_is_loopback(ip_hdr(skb)->saddr))) {
145 IP_VS_DBG_RL("Stopping traffic from loopback address %pI4 "
146 "to non-local address, dest: %pI4\n",
147 &ip_hdr(skb)->saddr, &rt->rt_dst);
148 ip_rt_put(rt);
149 return NULL;
152 return rt;
155 /* Reroute packet to local IPv4 stack after DNAT */
156 static int
157 __ip_vs_reroute_locally(struct sk_buff *skb)
159 struct rtable *rt = skb_rtable(skb);
160 struct net_device *dev = rt->dst.dev;
161 struct net *net = dev_net(dev);
162 struct iphdr *iph = ip_hdr(skb);
164 if (rt_is_input_route(rt)) {
165 unsigned long orefdst = skb->_skb_refdst;
167 if (ip_route_input(skb, iph->daddr, iph->saddr,
168 iph->tos, skb->dev))
169 return 0;
170 refdst_drop(orefdst);
171 } else {
172 struct flowi fl = {
173 .fl4_dst = iph->daddr,
174 .fl4_src = iph->saddr,
175 .fl4_tos = RT_TOS(iph->tos),
176 .mark = skb->mark,
179 if (ip_route_output_key(net, &rt, &fl))
180 return 0;
181 if (!(rt->rt_flags & RTCF_LOCAL)) {
182 ip_rt_put(rt);
183 return 0;
185 /* Drop old route. */
186 skb_dst_drop(skb);
187 skb_dst_set(skb, &rt->dst);
189 return 1;
192 #ifdef CONFIG_IP_VS_IPV6
194 static inline int __ip_vs_is_local_route6(struct rt6_info *rt)
196 return rt->rt6i_dev && rt->rt6i_dev->flags & IFF_LOOPBACK;
199 static struct dst_entry *
200 __ip_vs_route_output_v6(struct net *net, struct in6_addr *daddr,
201 struct in6_addr *ret_saddr, int do_xfrm)
203 struct dst_entry *dst;
204 struct flowi fl = {
205 .fl6_dst = *daddr,
208 dst = ip6_route_output(net, NULL, &fl);
209 if (dst->error)
210 goto out_err;
211 if (!ret_saddr)
212 return dst;
213 if (ipv6_addr_any(&fl.fl6_src) &&
214 ipv6_dev_get_saddr(net, ip6_dst_idev(dst)->dev,
215 &fl.fl6_dst, 0, &fl.fl6_src) < 0)
216 goto out_err;
217 if (do_xfrm && xfrm_lookup(net, &dst, &fl, NULL, 0) < 0)
218 goto out_err;
219 ipv6_addr_copy(ret_saddr, &fl.fl6_src);
220 return dst;
222 out_err:
223 dst_release(dst);
224 IP_VS_DBG_RL("ip6_route_output error, dest: %pI6\n", daddr);
225 return NULL;
229 * Get route to destination or remote server
230 * rt_mode: flags, &1=Allow local dest, &2=Allow non-local dest,
231 * &4=Allow redirect from remote daddr to local
233 static struct rt6_info *
234 __ip_vs_get_out_rt_v6(struct sk_buff *skb, struct ip_vs_dest *dest,
235 struct in6_addr *daddr, struct in6_addr *ret_saddr,
236 int do_xfrm, int rt_mode)
238 struct net *net = dev_net(skb_dst(skb)->dev);
239 struct rt6_info *rt; /* Route to the other host */
240 struct rt6_info *ort; /* Original route */
241 struct dst_entry *dst;
242 int local;
244 if (dest) {
245 spin_lock(&dest->dst_lock);
246 rt = (struct rt6_info *)__ip_vs_dst_check(dest, 0);
247 if (!rt) {
248 u32 cookie;
250 dst = __ip_vs_route_output_v6(net, &dest->addr.in6,
251 &dest->dst_saddr,
252 do_xfrm);
253 if (!dst) {
254 spin_unlock(&dest->dst_lock);
255 return NULL;
257 rt = (struct rt6_info *) dst;
258 cookie = rt->rt6i_node ? rt->rt6i_node->fn_sernum : 0;
259 __ip_vs_dst_set(dest, 0, dst_clone(&rt->dst), cookie);
260 IP_VS_DBG(10, "new dst %pI6, src %pI6, refcnt=%d\n",
261 &dest->addr.in6, &dest->dst_saddr,
262 atomic_read(&rt->dst.__refcnt));
264 if (ret_saddr)
265 ipv6_addr_copy(ret_saddr, &dest->dst_saddr);
266 spin_unlock(&dest->dst_lock);
267 } else {
268 dst = __ip_vs_route_output_v6(net, daddr, ret_saddr, do_xfrm);
269 if (!dst)
270 return NULL;
271 rt = (struct rt6_info *) dst;
274 local = __ip_vs_is_local_route6(rt);
275 if (!((local ? 1 : 2) & rt_mode)) {
276 IP_VS_DBG_RL("Stopping traffic to %s address, dest: %pI6\n",
277 local ? "local":"non-local", daddr);
278 dst_release(&rt->dst);
279 return NULL;
281 if (local && !(rt_mode & 4) &&
282 !((ort = (struct rt6_info *) skb_dst(skb)) &&
283 __ip_vs_is_local_route6(ort))) {
284 IP_VS_DBG_RL("Redirect from non-local address %pI6 to local "
285 "requires NAT method, dest: %pI6\n",
286 &ipv6_hdr(skb)->daddr, daddr);
287 dst_release(&rt->dst);
288 return NULL;
290 if (unlikely(!local && (!skb->dev || skb->dev->flags & IFF_LOOPBACK) &&
291 ipv6_addr_type(&ipv6_hdr(skb)->saddr) &
292 IPV6_ADDR_LOOPBACK)) {
293 IP_VS_DBG_RL("Stopping traffic from loopback address %pI6 "
294 "to non-local address, dest: %pI6\n",
295 &ipv6_hdr(skb)->saddr, daddr);
296 dst_release(&rt->dst);
297 return NULL;
300 return rt;
302 #endif
306 * Release dest->dst_cache before a dest is removed
308 void
309 ip_vs_dst_reset(struct ip_vs_dest *dest)
311 struct dst_entry *old_dst;
313 old_dst = dest->dst_cache;
314 dest->dst_cache = NULL;
315 dst_release(old_dst);
318 #define IP_VS_XMIT_TUNNEL(skb, cp) \
319 ({ \
320 int __ret = NF_ACCEPT; \
322 (skb)->ipvs_property = 1; \
323 if (unlikely((cp)->flags & IP_VS_CONN_F_NFCT)) \
324 __ret = ip_vs_confirm_conntrack(skb, cp); \
325 if (__ret == NF_ACCEPT) { \
326 nf_reset(skb); \
327 skb_forward_csum(skb); \
329 __ret; \
332 #define IP_VS_XMIT_NAT(pf, skb, cp, local) \
333 do { \
334 (skb)->ipvs_property = 1; \
335 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
336 ip_vs_notrack(skb); \
337 else \
338 ip_vs_update_conntrack(skb, cp, 1); \
339 if (local) \
340 return NF_ACCEPT; \
341 skb_forward_csum(skb); \
342 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
343 skb_dst(skb)->dev, dst_output); \
344 } while (0)
346 #define IP_VS_XMIT(pf, skb, cp, local) \
347 do { \
348 (skb)->ipvs_property = 1; \
349 if (likely(!((cp)->flags & IP_VS_CONN_F_NFCT))) \
350 ip_vs_notrack(skb); \
351 if (local) \
352 return NF_ACCEPT; \
353 skb_forward_csum(skb); \
354 NF_HOOK(pf, NF_INET_LOCAL_OUT, (skb), NULL, \
355 skb_dst(skb)->dev, dst_output); \
356 } while (0)
360 * NULL transmitter (do nothing except return NF_ACCEPT)
363 ip_vs_null_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
364 struct ip_vs_protocol *pp)
366 /* we do not touch skb and do not need pskb ptr */
367 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
372 * Bypass transmitter
373 * Let packets bypass the destination when the destination is not
374 * available, it may be only used in transparent cache cluster.
377 ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
378 struct ip_vs_protocol *pp)
380 struct rtable *rt; /* Route to the other host */
381 struct iphdr *iph = ip_hdr(skb);
382 int mtu;
384 EnterFunction(10);
386 if (!(rt = __ip_vs_get_out_rt(skb, NULL, iph->daddr,
387 RT_TOS(iph->tos), 2)))
388 goto tx_error_icmp;
390 /* MTU checking */
391 mtu = dst_mtu(&rt->dst);
392 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
393 !skb_is_gso(skb)) {
394 ip_rt_put(rt);
395 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
396 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
397 goto tx_error;
401 * Call ip_send_check because we are not sure it is called
402 * after ip_defrag. Is copy-on-write needed?
404 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
405 ip_rt_put(rt);
406 return NF_STOLEN;
408 ip_send_check(ip_hdr(skb));
410 /* drop old route */
411 skb_dst_drop(skb);
412 skb_dst_set(skb, &rt->dst);
414 /* Another hack: avoid icmp_send in ip_fragment */
415 skb->local_df = 1;
417 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
419 LeaveFunction(10);
420 return NF_STOLEN;
422 tx_error_icmp:
423 dst_link_failure(skb);
424 tx_error:
425 kfree_skb(skb);
426 LeaveFunction(10);
427 return NF_STOLEN;
430 #ifdef CONFIG_IP_VS_IPV6
432 ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
433 struct ip_vs_protocol *pp)
435 struct rt6_info *rt; /* Route to the other host */
436 struct ipv6hdr *iph = ipv6_hdr(skb);
437 int mtu;
439 EnterFunction(10);
441 if (!(rt = __ip_vs_get_out_rt_v6(skb, NULL, &iph->daddr, NULL, 0, 2)))
442 goto tx_error_icmp;
444 /* MTU checking */
445 mtu = dst_mtu(&rt->dst);
446 if (skb->len > mtu && !skb_is_gso(skb)) {
447 if (!skb->dev) {
448 struct net *net = dev_net(skb_dst(skb)->dev);
450 skb->dev = net->loopback_dev;
452 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
453 dst_release(&rt->dst);
454 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
455 goto tx_error;
459 * Call ip_send_check because we are not sure it is called
460 * after ip_defrag. Is copy-on-write needed?
462 skb = skb_share_check(skb, GFP_ATOMIC);
463 if (unlikely(skb == NULL)) {
464 dst_release(&rt->dst);
465 return NF_STOLEN;
468 /* drop old route */
469 skb_dst_drop(skb);
470 skb_dst_set(skb, &rt->dst);
472 /* Another hack: avoid icmp_send in ip_fragment */
473 skb->local_df = 1;
475 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
477 LeaveFunction(10);
478 return NF_STOLEN;
480 tx_error_icmp:
481 dst_link_failure(skb);
482 tx_error:
483 kfree_skb(skb);
484 LeaveFunction(10);
485 return NF_STOLEN;
487 #endif
490 * NAT transmitter (only for outside-to-inside nat forwarding)
491 * Not used for related ICMP
494 ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
495 struct ip_vs_protocol *pp)
497 struct rtable *rt; /* Route to the other host */
498 int mtu;
499 struct iphdr *iph = ip_hdr(skb);
500 int local;
502 EnterFunction(10);
504 /* check if it is a connection of no-client-port */
505 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
506 __be16 _pt, *p;
507 p = skb_header_pointer(skb, iph->ihl*4, sizeof(_pt), &_pt);
508 if (p == NULL)
509 goto tx_error;
510 ip_vs_conn_fill_cport(cp, *p);
511 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
514 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
515 RT_TOS(iph->tos), 1|2|4)))
516 goto tx_error_icmp;
517 local = rt->rt_flags & RTCF_LOCAL;
519 * Avoid duplicate tuple in reply direction for NAT traffic
520 * to local address when connection is sync-ed
522 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
523 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
524 enum ip_conntrack_info ctinfo;
525 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
527 if (ct && !nf_ct_is_untracked(ct)) {
528 IP_VS_DBG_RL_PKT(10, AF_INET, pp, skb, 0,
529 "ip_vs_nat_xmit(): "
530 "stopping DNAT to local address");
531 goto tx_error_put;
534 #endif
536 /* From world but DNAT to loopback address? */
537 if (local && ipv4_is_loopback(rt->rt_dst) &&
538 rt_is_input_route(skb_rtable(skb))) {
539 IP_VS_DBG_RL_PKT(1, AF_INET, pp, skb, 0, "ip_vs_nat_xmit(): "
540 "stopping DNAT to loopback address");
541 goto tx_error_put;
544 /* MTU checking */
545 mtu = dst_mtu(&rt->dst);
546 if ((skb->len > mtu) && (iph->frag_off & htons(IP_DF)) &&
547 !skb_is_gso(skb)) {
548 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
549 IP_VS_DBG_RL_PKT(0, AF_INET, pp, skb, 0,
550 "ip_vs_nat_xmit(): frag needed for");
551 goto tx_error_put;
554 /* copy-on-write the packet before mangling it */
555 if (!skb_make_writable(skb, sizeof(struct iphdr)))
556 goto tx_error_put;
558 if (skb_cow(skb, rt->dst.dev->hard_header_len))
559 goto tx_error_put;
561 /* mangle the packet */
562 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
563 goto tx_error_put;
564 ip_hdr(skb)->daddr = cp->daddr.ip;
565 ip_send_check(ip_hdr(skb));
567 if (!local) {
568 /* drop old route */
569 skb_dst_drop(skb);
570 skb_dst_set(skb, &rt->dst);
571 } else {
572 ip_rt_put(rt);
574 * Some IPv4 replies get local address from routes,
575 * not from iph, so while we DNAT after routing
576 * we need this second input/output route.
578 if (!__ip_vs_reroute_locally(skb))
579 goto tx_error;
582 IP_VS_DBG_PKT(10, AF_INET, pp, skb, 0, "After DNAT");
584 /* FIXME: when application helper enlarges the packet and the length
585 is larger than the MTU of outgoing device, there will be still
586 MTU problem. */
588 /* Another hack: avoid icmp_send in ip_fragment */
589 skb->local_df = 1;
591 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
593 LeaveFunction(10);
594 return NF_STOLEN;
596 tx_error_icmp:
597 dst_link_failure(skb);
598 tx_error:
599 kfree_skb(skb);
600 LeaveFunction(10);
601 return NF_STOLEN;
602 tx_error_put:
603 ip_rt_put(rt);
604 goto tx_error;
607 #ifdef CONFIG_IP_VS_IPV6
609 ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
610 struct ip_vs_protocol *pp)
612 struct rt6_info *rt; /* Route to the other host */
613 int mtu;
614 int local;
616 EnterFunction(10);
618 /* check if it is a connection of no-client-port */
619 if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) {
620 __be16 _pt, *p;
621 p = skb_header_pointer(skb, sizeof(struct ipv6hdr),
622 sizeof(_pt), &_pt);
623 if (p == NULL)
624 goto tx_error;
625 ip_vs_conn_fill_cport(cp, *p);
626 IP_VS_DBG(10, "filled cport=%d\n", ntohs(*p));
629 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
630 0, 1|2|4)))
631 goto tx_error_icmp;
632 local = __ip_vs_is_local_route6(rt);
634 * Avoid duplicate tuple in reply direction for NAT traffic
635 * to local address when connection is sync-ed
637 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
638 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
639 enum ip_conntrack_info ctinfo;
640 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
642 if (ct && !nf_ct_is_untracked(ct)) {
643 IP_VS_DBG_RL_PKT(10, AF_INET6, pp, skb, 0,
644 "ip_vs_nat_xmit_v6(): "
645 "stopping DNAT to local address");
646 goto tx_error_put;
649 #endif
651 /* From world but DNAT to loopback address? */
652 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
653 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
654 IP_VS_DBG_RL_PKT(1, AF_INET6, pp, skb, 0,
655 "ip_vs_nat_xmit_v6(): "
656 "stopping DNAT to loopback address");
657 goto tx_error_put;
660 /* MTU checking */
661 mtu = dst_mtu(&rt->dst);
662 if (skb->len > mtu && !skb_is_gso(skb)) {
663 if (!skb->dev) {
664 struct net *net = dev_net(skb_dst(skb)->dev);
666 skb->dev = net->loopback_dev;
668 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
669 IP_VS_DBG_RL_PKT(0, AF_INET6, pp, skb, 0,
670 "ip_vs_nat_xmit_v6(): frag needed for");
671 goto tx_error_put;
674 /* copy-on-write the packet before mangling it */
675 if (!skb_make_writable(skb, sizeof(struct ipv6hdr)))
676 goto tx_error_put;
678 if (skb_cow(skb, rt->dst.dev->hard_header_len))
679 goto tx_error_put;
681 /* mangle the packet */
682 if (pp->dnat_handler && !pp->dnat_handler(skb, pp, cp))
683 goto tx_error;
684 ipv6_addr_copy(&ipv6_hdr(skb)->daddr, &cp->daddr.in6);
686 if (!local || !skb->dev) {
687 /* drop the old route when skb is not shared */
688 skb_dst_drop(skb);
689 skb_dst_set(skb, &rt->dst);
690 } else {
691 /* destined to loopback, do we need to change route? */
692 dst_release(&rt->dst);
695 IP_VS_DBG_PKT(10, AF_INET6, pp, skb, 0, "After DNAT");
697 /* FIXME: when application helper enlarges the packet and the length
698 is larger than the MTU of outgoing device, there will be still
699 MTU problem. */
701 /* Another hack: avoid icmp_send in ip_fragment */
702 skb->local_df = 1;
704 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
706 LeaveFunction(10);
707 return NF_STOLEN;
709 tx_error_icmp:
710 dst_link_failure(skb);
711 tx_error:
712 LeaveFunction(10);
713 kfree_skb(skb);
714 return NF_STOLEN;
715 tx_error_put:
716 dst_release(&rt->dst);
717 goto tx_error;
719 #endif
723 * IP Tunneling transmitter
725 * This function encapsulates the packet in a new IP packet, its
726 * destination will be set to cp->daddr. Most code of this function
727 * is taken from ipip.c.
729 * It is used in VS/TUN cluster. The load balancer selects a real
730 * server from a cluster based on a scheduling algorithm,
731 * encapsulates the request packet and forwards it to the selected
732 * server. For example, all real servers are configured with
733 * "ifconfig tunl0 <Virtual IP Address> up". When the server receives
734 * the encapsulated packet, it will decapsulate the packet, processe
735 * the request and return the response packets directly to the client
736 * without passing the load balancer. This can greatly increase the
737 * scalability of virtual server.
739 * Used for ANY protocol
742 ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
743 struct ip_vs_protocol *pp)
745 struct rtable *rt; /* Route to the other host */
746 struct net_device *tdev; /* Device to other host */
747 struct iphdr *old_iph = ip_hdr(skb);
748 u8 tos = old_iph->tos;
749 __be16 df = old_iph->frag_off;
750 struct iphdr *iph; /* Our new IP header */
751 unsigned int max_headroom; /* The extra header space needed */
752 int mtu;
753 int ret;
755 EnterFunction(10);
757 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
758 RT_TOS(tos), 1|2)))
759 goto tx_error_icmp;
760 if (rt->rt_flags & RTCF_LOCAL) {
761 ip_rt_put(rt);
762 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
765 tdev = rt->dst.dev;
767 mtu = dst_mtu(&rt->dst) - sizeof(struct iphdr);
768 if (mtu < 68) {
769 IP_VS_DBG_RL("%s(): mtu less than 68\n", __func__);
770 goto tx_error_put;
772 if (skb_dst(skb))
773 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
775 df |= (old_iph->frag_off & htons(IP_DF));
777 if ((old_iph->frag_off & htons(IP_DF) &&
778 mtu < ntohs(old_iph->tot_len) && !skb_is_gso(skb))) {
779 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
780 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
781 goto tx_error_put;
785 * Okay, now see if we can stuff it in the buffer as-is.
787 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct iphdr);
789 if (skb_headroom(skb) < max_headroom
790 || skb_cloned(skb) || skb_shared(skb)) {
791 struct sk_buff *new_skb =
792 skb_realloc_headroom(skb, max_headroom);
793 if (!new_skb) {
794 ip_rt_put(rt);
795 kfree_skb(skb);
796 IP_VS_ERR_RL("%s(): no memory\n", __func__);
797 return NF_STOLEN;
799 kfree_skb(skb);
800 skb = new_skb;
801 old_iph = ip_hdr(skb);
804 skb->transport_header = skb->network_header;
806 /* fix old IP header checksum */
807 ip_send_check(old_iph);
809 skb_push(skb, sizeof(struct iphdr));
810 skb_reset_network_header(skb);
811 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
813 /* drop old route */
814 skb_dst_drop(skb);
815 skb_dst_set(skb, &rt->dst);
818 * Push down and install the IPIP header.
820 iph = ip_hdr(skb);
821 iph->version = 4;
822 iph->ihl = sizeof(struct iphdr)>>2;
823 iph->frag_off = df;
824 iph->protocol = IPPROTO_IPIP;
825 iph->tos = tos;
826 iph->daddr = rt->rt_dst;
827 iph->saddr = rt->rt_src;
828 iph->ttl = old_iph->ttl;
829 ip_select_ident(iph, &rt->dst, NULL);
831 /* Another hack: avoid icmp_send in ip_fragment */
832 skb->local_df = 1;
834 ret = IP_VS_XMIT_TUNNEL(skb, cp);
835 if (ret == NF_ACCEPT)
836 ip_local_out(skb);
837 else if (ret == NF_DROP)
838 kfree_skb(skb);
840 LeaveFunction(10);
842 return NF_STOLEN;
844 tx_error_icmp:
845 dst_link_failure(skb);
846 tx_error:
847 kfree_skb(skb);
848 LeaveFunction(10);
849 return NF_STOLEN;
850 tx_error_put:
851 ip_rt_put(rt);
852 goto tx_error;
855 #ifdef CONFIG_IP_VS_IPV6
857 ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
858 struct ip_vs_protocol *pp)
860 struct rt6_info *rt; /* Route to the other host */
861 struct in6_addr saddr; /* Source for tunnel */
862 struct net_device *tdev; /* Device to other host */
863 struct ipv6hdr *old_iph = ipv6_hdr(skb);
864 struct ipv6hdr *iph; /* Our new IP header */
865 unsigned int max_headroom; /* The extra header space needed */
866 int mtu;
867 int ret;
869 EnterFunction(10);
871 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6,
872 &saddr, 1, 1|2)))
873 goto tx_error_icmp;
874 if (__ip_vs_is_local_route6(rt)) {
875 dst_release(&rt->dst);
876 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
879 tdev = rt->dst.dev;
881 mtu = dst_mtu(&rt->dst) - sizeof(struct ipv6hdr);
882 if (mtu < IPV6_MIN_MTU) {
883 IP_VS_DBG_RL("%s(): mtu less than %d\n", __func__,
884 IPV6_MIN_MTU);
885 goto tx_error_put;
887 if (skb_dst(skb))
888 skb_dst(skb)->ops->update_pmtu(skb_dst(skb), mtu);
890 if (mtu < ntohs(old_iph->payload_len) + sizeof(struct ipv6hdr) &&
891 !skb_is_gso(skb)) {
892 if (!skb->dev) {
893 struct net *net = dev_net(skb_dst(skb)->dev);
895 skb->dev = net->loopback_dev;
897 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
898 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
899 goto tx_error_put;
903 * Okay, now see if we can stuff it in the buffer as-is.
905 max_headroom = LL_RESERVED_SPACE(tdev) + sizeof(struct ipv6hdr);
907 if (skb_headroom(skb) < max_headroom
908 || skb_cloned(skb) || skb_shared(skb)) {
909 struct sk_buff *new_skb =
910 skb_realloc_headroom(skb, max_headroom);
911 if (!new_skb) {
912 dst_release(&rt->dst);
913 kfree_skb(skb);
914 IP_VS_ERR_RL("%s(): no memory\n", __func__);
915 return NF_STOLEN;
917 kfree_skb(skb);
918 skb = new_skb;
919 old_iph = ipv6_hdr(skb);
922 skb->transport_header = skb->network_header;
924 skb_push(skb, sizeof(struct ipv6hdr));
925 skb_reset_network_header(skb);
926 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
928 /* drop old route */
929 skb_dst_drop(skb);
930 skb_dst_set(skb, &rt->dst);
933 * Push down and install the IPIP header.
935 iph = ipv6_hdr(skb);
936 iph->version = 6;
937 iph->nexthdr = IPPROTO_IPV6;
938 iph->payload_len = old_iph->payload_len;
939 be16_add_cpu(&iph->payload_len, sizeof(*old_iph));
940 iph->priority = old_iph->priority;
941 memset(&iph->flow_lbl, 0, sizeof(iph->flow_lbl));
942 ipv6_addr_copy(&iph->daddr, &cp->daddr.in6);
943 ipv6_addr_copy(&iph->saddr, &saddr);
944 iph->hop_limit = old_iph->hop_limit;
946 /* Another hack: avoid icmp_send in ip_fragment */
947 skb->local_df = 1;
949 ret = IP_VS_XMIT_TUNNEL(skb, cp);
950 if (ret == NF_ACCEPT)
951 ip6_local_out(skb);
952 else if (ret == NF_DROP)
953 kfree_skb(skb);
955 LeaveFunction(10);
957 return NF_STOLEN;
959 tx_error_icmp:
960 dst_link_failure(skb);
961 tx_error:
962 kfree_skb(skb);
963 LeaveFunction(10);
964 return NF_STOLEN;
965 tx_error_put:
966 dst_release(&rt->dst);
967 goto tx_error;
969 #endif
973 * Direct Routing transmitter
974 * Used for ANY protocol
977 ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
978 struct ip_vs_protocol *pp)
980 struct rtable *rt; /* Route to the other host */
981 struct iphdr *iph = ip_hdr(skb);
982 int mtu;
984 EnterFunction(10);
986 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
987 RT_TOS(iph->tos), 1|2)))
988 goto tx_error_icmp;
989 if (rt->rt_flags & RTCF_LOCAL) {
990 ip_rt_put(rt);
991 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 1);
994 /* MTU checking */
995 mtu = dst_mtu(&rt->dst);
996 if ((iph->frag_off & htons(IP_DF)) && skb->len > mtu &&
997 !skb_is_gso(skb)) {
998 icmp_send(skb, ICMP_DEST_UNREACH,ICMP_FRAG_NEEDED, htonl(mtu));
999 ip_rt_put(rt);
1000 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1001 goto tx_error;
1005 * Call ip_send_check because we are not sure it is called
1006 * after ip_defrag. Is copy-on-write needed?
1008 if (unlikely((skb = skb_share_check(skb, GFP_ATOMIC)) == NULL)) {
1009 ip_rt_put(rt);
1010 return NF_STOLEN;
1012 ip_send_check(ip_hdr(skb));
1014 /* drop old route */
1015 skb_dst_drop(skb);
1016 skb_dst_set(skb, &rt->dst);
1018 /* Another hack: avoid icmp_send in ip_fragment */
1019 skb->local_df = 1;
1021 IP_VS_XMIT(NFPROTO_IPV4, skb, cp, 0);
1023 LeaveFunction(10);
1024 return NF_STOLEN;
1026 tx_error_icmp:
1027 dst_link_failure(skb);
1028 tx_error:
1029 kfree_skb(skb);
1030 LeaveFunction(10);
1031 return NF_STOLEN;
1034 #ifdef CONFIG_IP_VS_IPV6
1036 ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1037 struct ip_vs_protocol *pp)
1039 struct rt6_info *rt; /* Route to the other host */
1040 int mtu;
1042 EnterFunction(10);
1044 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1045 0, 1|2)))
1046 goto tx_error_icmp;
1047 if (__ip_vs_is_local_route6(rt)) {
1048 dst_release(&rt->dst);
1049 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 1);
1052 /* MTU checking */
1053 mtu = dst_mtu(&rt->dst);
1054 if (skb->len > mtu) {
1055 if (!skb->dev) {
1056 struct net *net = dev_net(skb_dst(skb)->dev);
1058 skb->dev = net->loopback_dev;
1060 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1061 dst_release(&rt->dst);
1062 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1063 goto tx_error;
1067 * Call ip_send_check because we are not sure it is called
1068 * after ip_defrag. Is copy-on-write needed?
1070 skb = skb_share_check(skb, GFP_ATOMIC);
1071 if (unlikely(skb == NULL)) {
1072 dst_release(&rt->dst);
1073 return NF_STOLEN;
1076 /* drop old route */
1077 skb_dst_drop(skb);
1078 skb_dst_set(skb, &rt->dst);
1080 /* Another hack: avoid icmp_send in ip_fragment */
1081 skb->local_df = 1;
1083 IP_VS_XMIT(NFPROTO_IPV6, skb, cp, 0);
1085 LeaveFunction(10);
1086 return NF_STOLEN;
1088 tx_error_icmp:
1089 dst_link_failure(skb);
1090 tx_error:
1091 kfree_skb(skb);
1092 LeaveFunction(10);
1093 return NF_STOLEN;
1095 #endif
1099 * ICMP packet transmitter
1100 * called by the ip_vs_in_icmp
1103 ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
1104 struct ip_vs_protocol *pp, int offset)
1106 struct rtable *rt; /* Route to the other host */
1107 int mtu;
1108 int rc;
1109 int local;
1111 EnterFunction(10);
1113 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1114 forwarded directly here, because there is no need to
1115 translate address/port back */
1116 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1117 if (cp->packet_xmit)
1118 rc = cp->packet_xmit(skb, cp, pp);
1119 else
1120 rc = NF_ACCEPT;
1121 /* do not touch skb anymore */
1122 atomic_inc(&cp->in_pkts);
1123 goto out;
1127 * mangle and send the packet here (only for VS/NAT)
1130 if (!(rt = __ip_vs_get_out_rt(skb, cp->dest, cp->daddr.ip,
1131 RT_TOS(ip_hdr(skb)->tos), 1|2|4)))
1132 goto tx_error_icmp;
1133 local = rt->rt_flags & RTCF_LOCAL;
1136 * Avoid duplicate tuple in reply direction for NAT traffic
1137 * to local address when connection is sync-ed
1139 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1140 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1141 enum ip_conntrack_info ctinfo;
1142 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1144 if (ct && !nf_ct_is_untracked(ct)) {
1145 IP_VS_DBG(10, "%s(): "
1146 "stopping DNAT to local address %pI4\n",
1147 __func__, &cp->daddr.ip);
1148 goto tx_error_put;
1151 #endif
1153 /* From world but DNAT to loopback address? */
1154 if (local && ipv4_is_loopback(rt->rt_dst) &&
1155 rt_is_input_route(skb_rtable(skb))) {
1156 IP_VS_DBG(1, "%s(): "
1157 "stopping DNAT to loopback %pI4\n",
1158 __func__, &cp->daddr.ip);
1159 goto tx_error_put;
1162 /* MTU checking */
1163 mtu = dst_mtu(&rt->dst);
1164 if ((skb->len > mtu) && (ip_hdr(skb)->frag_off & htons(IP_DF)) &&
1165 !skb_is_gso(skb)) {
1166 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED, htonl(mtu));
1167 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1168 goto tx_error_put;
1171 /* copy-on-write the packet before mangling it */
1172 if (!skb_make_writable(skb, offset))
1173 goto tx_error_put;
1175 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1176 goto tx_error_put;
1178 ip_vs_nat_icmp(skb, pp, cp, 0);
1180 if (!local) {
1181 /* drop the old route when skb is not shared */
1182 skb_dst_drop(skb);
1183 skb_dst_set(skb, &rt->dst);
1184 } else {
1185 ip_rt_put(rt);
1187 * Some IPv4 replies get local address from routes,
1188 * not from iph, so while we DNAT after routing
1189 * we need this second input/output route.
1191 if (!__ip_vs_reroute_locally(skb))
1192 goto tx_error;
1195 /* Another hack: avoid icmp_send in ip_fragment */
1196 skb->local_df = 1;
1198 IP_VS_XMIT_NAT(NFPROTO_IPV4, skb, cp, local);
1200 rc = NF_STOLEN;
1201 goto out;
1203 tx_error_icmp:
1204 dst_link_failure(skb);
1205 tx_error:
1206 dev_kfree_skb(skb);
1207 rc = NF_STOLEN;
1208 out:
1209 LeaveFunction(10);
1210 return rc;
1211 tx_error_put:
1212 ip_rt_put(rt);
1213 goto tx_error;
1216 #ifdef CONFIG_IP_VS_IPV6
1218 ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
1219 struct ip_vs_protocol *pp, int offset)
1221 struct rt6_info *rt; /* Route to the other host */
1222 int mtu;
1223 int rc;
1224 int local;
1226 EnterFunction(10);
1228 /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be
1229 forwarded directly here, because there is no need to
1230 translate address/port back */
1231 if (IP_VS_FWD_METHOD(cp) != IP_VS_CONN_F_MASQ) {
1232 if (cp->packet_xmit)
1233 rc = cp->packet_xmit(skb, cp, pp);
1234 else
1235 rc = NF_ACCEPT;
1236 /* do not touch skb anymore */
1237 atomic_inc(&cp->in_pkts);
1238 goto out;
1242 * mangle and send the packet here (only for VS/NAT)
1245 if (!(rt = __ip_vs_get_out_rt_v6(skb, cp->dest, &cp->daddr.in6, NULL,
1246 0, 1|2|4)))
1247 goto tx_error_icmp;
1249 local = __ip_vs_is_local_route6(rt);
1251 * Avoid duplicate tuple in reply direction for NAT traffic
1252 * to local address when connection is sync-ed
1254 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
1255 if (cp->flags & IP_VS_CONN_F_SYNC && local) {
1256 enum ip_conntrack_info ctinfo;
1257 struct nf_conn *ct = ct = nf_ct_get(skb, &ctinfo);
1259 if (ct && !nf_ct_is_untracked(ct)) {
1260 IP_VS_DBG(10, "%s(): "
1261 "stopping DNAT to local address %pI6\n",
1262 __func__, &cp->daddr.in6);
1263 goto tx_error_put;
1266 #endif
1268 /* From world but DNAT to loopback address? */
1269 if (local && skb->dev && !(skb->dev->flags & IFF_LOOPBACK) &&
1270 ipv6_addr_type(&rt->rt6i_dst.addr) & IPV6_ADDR_LOOPBACK) {
1271 IP_VS_DBG(1, "%s(): "
1272 "stopping DNAT to loopback %pI6\n",
1273 __func__, &cp->daddr.in6);
1274 goto tx_error_put;
1277 /* MTU checking */
1278 mtu = dst_mtu(&rt->dst);
1279 if (skb->len > mtu && !skb_is_gso(skb)) {
1280 if (!skb->dev) {
1281 struct net *net = dev_net(skb_dst(skb)->dev);
1283 skb->dev = net->loopback_dev;
1285 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
1286 IP_VS_DBG_RL("%s(): frag needed\n", __func__);
1287 goto tx_error_put;
1290 /* copy-on-write the packet before mangling it */
1291 if (!skb_make_writable(skb, offset))
1292 goto tx_error_put;
1294 if (skb_cow(skb, rt->dst.dev->hard_header_len))
1295 goto tx_error_put;
1297 ip_vs_nat_icmp_v6(skb, pp, cp, 0);
1299 if (!local || !skb->dev) {
1300 /* drop the old route when skb is not shared */
1301 skb_dst_drop(skb);
1302 skb_dst_set(skb, &rt->dst);
1303 } else {
1304 /* destined to loopback, do we need to change route? */
1305 dst_release(&rt->dst);
1308 /* Another hack: avoid icmp_send in ip_fragment */
1309 skb->local_df = 1;
1311 IP_VS_XMIT_NAT(NFPROTO_IPV6, skb, cp, local);
1313 rc = NF_STOLEN;
1314 goto out;
1316 tx_error_icmp:
1317 dst_link_failure(skb);
1318 tx_error:
1319 dev_kfree_skb(skb);
1320 rc = NF_STOLEN;
1321 out:
1322 LeaveFunction(10);
1323 return rc;
1324 tx_error_put:
1325 dst_release(&rt->dst);
1326 goto tx_error;
1328 #endif