net: fix ip_mr_init() error path
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sched / act_nat.c
blob7b39ed485bca0beeb44da1572ff053ed61395d5f
1 /*
2 * Stateless NAT actions
4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netfilter.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/tc_act/tc_nat.h>
23 #include <net/act_api.h>
24 #include <net/icmp.h>
25 #include <net/ip.h>
26 #include <net/netlink.h>
27 #include <net/tc_act/tc_nat.h>
28 #include <net/tcp.h>
29 #include <net/udp.h>
32 #define NAT_TAB_MASK 15
33 static struct tcf_common *tcf_nat_ht[NAT_TAB_MASK + 1];
34 static u32 nat_idx_gen;
35 static DEFINE_RWLOCK(nat_lock);
37 static struct tcf_hashinfo nat_hash_info = {
38 .htab = tcf_nat_ht,
39 .hmask = NAT_TAB_MASK,
40 .lock = &nat_lock,
43 static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
44 [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
47 static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
48 struct tc_action *a, int ovr, int bind)
50 struct nlattr *tb[TCA_NAT_MAX + 1];
51 struct tc_nat *parm;
52 int ret = 0, err;
53 struct tcf_nat *p;
54 struct tcf_common *pc;
56 if (nla == NULL)
57 return -EINVAL;
59 err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy);
60 if (err < 0)
61 return err;
63 if (tb[TCA_NAT_PARMS] == NULL)
64 return -EINVAL;
65 parm = nla_data(tb[TCA_NAT_PARMS]);
67 pc = tcf_hash_check(parm->index, a, bind, &nat_hash_info);
68 if (!pc) {
69 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
70 &nat_idx_gen, &nat_hash_info);
71 if (unlikely(!pc))
72 return -ENOMEM;
73 p = to_tcf_nat(pc);
74 ret = ACT_P_CREATED;
75 } else {
76 p = to_tcf_nat(pc);
77 if (!ovr) {
78 tcf_hash_release(pc, bind, &nat_hash_info);
79 return -EEXIST;
83 spin_lock_bh(&p->tcf_lock);
84 p->old_addr = parm->old_addr;
85 p->new_addr = parm->new_addr;
86 p->mask = parm->mask;
87 p->flags = parm->flags;
89 p->tcf_action = parm->action;
90 spin_unlock_bh(&p->tcf_lock);
92 if (ret == ACT_P_CREATED)
93 tcf_hash_insert(pc, &nat_hash_info);
95 return ret;
98 static int tcf_nat_cleanup(struct tc_action *a, int bind)
100 struct tcf_nat *p = a->priv;
102 return tcf_hash_release(&p->common, bind, &nat_hash_info);
105 static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
106 struct tcf_result *res)
108 struct tcf_nat *p = a->priv;
109 struct iphdr *iph;
110 __be32 old_addr;
111 __be32 new_addr;
112 __be32 mask;
113 __be32 addr;
114 int egress;
115 int action;
116 int ihl;
118 spin_lock(&p->tcf_lock);
120 p->tcf_tm.lastuse = jiffies;
121 old_addr = p->old_addr;
122 new_addr = p->new_addr;
123 mask = p->mask;
124 egress = p->flags & TCA_NAT_FLAG_EGRESS;
125 action = p->tcf_action;
127 p->tcf_bstats.bytes += qdisc_pkt_len(skb);
128 p->tcf_bstats.packets++;
130 spin_unlock(&p->tcf_lock);
132 if (unlikely(action == TC_ACT_SHOT))
133 goto drop;
135 if (!pskb_may_pull(skb, sizeof(*iph)))
136 goto drop;
138 iph = ip_hdr(skb);
140 if (egress)
141 addr = iph->saddr;
142 else
143 addr = iph->daddr;
145 if (!((old_addr ^ addr) & mask)) {
146 if (skb_cloned(skb) &&
147 !skb_clone_writable(skb, sizeof(*iph)) &&
148 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
149 goto drop;
151 new_addr &= mask;
152 new_addr |= addr & ~mask;
154 /* Rewrite IP header */
155 iph = ip_hdr(skb);
156 if (egress)
157 iph->saddr = new_addr;
158 else
159 iph->daddr = new_addr;
161 csum_replace4(&iph->check, addr, new_addr);
164 ihl = iph->ihl * 4;
166 /* It would be nice to share code with stateful NAT. */
167 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
168 case IPPROTO_TCP:
170 struct tcphdr *tcph;
172 if (!pskb_may_pull(skb, ihl + sizeof(*tcph)) ||
173 (skb_cloned(skb) &&
174 !skb_clone_writable(skb, ihl + sizeof(*tcph)) &&
175 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
176 goto drop;
178 tcph = (void *)(skb_network_header(skb) + ihl);
179 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
180 break;
182 case IPPROTO_UDP:
184 struct udphdr *udph;
186 if (!pskb_may_pull(skb, ihl + sizeof(*udph)) ||
187 (skb_cloned(skb) &&
188 !skb_clone_writable(skb, ihl + sizeof(*udph)) &&
189 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
190 goto drop;
192 udph = (void *)(skb_network_header(skb) + ihl);
193 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
194 inet_proto_csum_replace4(&udph->check, skb, addr,
195 new_addr, 1);
196 if (!udph->check)
197 udph->check = CSUM_MANGLED_0;
199 break;
201 case IPPROTO_ICMP:
203 struct icmphdr *icmph;
205 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
206 goto drop;
208 icmph = (void *)(skb_network_header(skb) + ihl);
210 if ((icmph->type != ICMP_DEST_UNREACH) &&
211 (icmph->type != ICMP_TIME_EXCEEDED) &&
212 (icmph->type != ICMP_PARAMETERPROB))
213 break;
215 iph = (void *)(icmph + 1);
216 if (egress)
217 addr = iph->daddr;
218 else
219 addr = iph->saddr;
221 if ((old_addr ^ addr) & mask)
222 break;
224 if (skb_cloned(skb) &&
225 !skb_clone_writable(skb,
226 ihl + sizeof(*icmph) + sizeof(*iph)) &&
227 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
228 goto drop;
230 icmph = (void *)(skb_network_header(skb) + ihl);
231 iph = (void *)(icmph + 1);
233 new_addr &= mask;
234 new_addr |= addr & ~mask;
236 /* XXX Fix up the inner checksums. */
237 if (egress)
238 iph->daddr = new_addr;
239 else
240 iph->saddr = new_addr;
242 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
244 break;
246 default:
247 break;
250 return action;
252 drop:
253 spin_lock(&p->tcf_lock);
254 p->tcf_qstats.drops++;
255 spin_unlock(&p->tcf_lock);
256 return TC_ACT_SHOT;
259 static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
260 int bind, int ref)
262 unsigned char *b = skb_tail_pointer(skb);
263 struct tcf_nat *p = a->priv;
264 struct tc_nat *opt;
265 struct tcf_t t;
266 int s;
268 s = sizeof(*opt);
270 /* netlink spinlocks held above us - must use ATOMIC */
271 opt = kzalloc(s, GFP_ATOMIC);
272 if (unlikely(!opt))
273 return -ENOBUFS;
275 opt->old_addr = p->old_addr;
276 opt->new_addr = p->new_addr;
277 opt->mask = p->mask;
278 opt->flags = p->flags;
280 opt->index = p->tcf_index;
281 opt->action = p->tcf_action;
282 opt->refcnt = p->tcf_refcnt - ref;
283 opt->bindcnt = p->tcf_bindcnt - bind;
285 NLA_PUT(skb, TCA_NAT_PARMS, s, opt);
286 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
287 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
288 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
289 NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
291 kfree(opt);
293 return skb->len;
295 nla_put_failure:
296 nlmsg_trim(skb, b);
297 kfree(opt);
298 return -1;
301 static struct tc_action_ops act_nat_ops = {
302 .kind = "nat",
303 .hinfo = &nat_hash_info,
304 .type = TCA_ACT_NAT,
305 .capab = TCA_CAP_NONE,
306 .owner = THIS_MODULE,
307 .act = tcf_nat,
308 .dump = tcf_nat_dump,
309 .cleanup = tcf_nat_cleanup,
310 .lookup = tcf_hash_search,
311 .init = tcf_nat_init,
312 .walk = tcf_generic_walker
315 MODULE_DESCRIPTION("Stateless NAT actions");
316 MODULE_LICENSE("GPL");
318 static int __init nat_init_module(void)
320 return tcf_register_action(&act_nat_ops);
323 static void __exit nat_cleanup_module(void)
325 tcf_unregister_action(&act_nat_ops);
328 module_init(nat_init_module);
329 module_exit(nat_cleanup_module);