TOMOYO: Use callback for updating entries.
[linux-2.6/linux-acpi-2.6/ibm-acpi-2.6.git] / net / sched / act_nat.c
blob724553e8ed7bc9d8ecd668c71ab9373936a3d2d3
1 /*
2 * Stateless NAT actions
4 * Copyright (c) 2007 Herbert Xu <herbert@gondor.apana.org.au>
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the Free
8 * Software Foundation; either version 2 of the License, or (at your option)
9 * any later version.
12 #include <linux/errno.h>
13 #include <linux/init.h>
14 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/netfilter.h>
17 #include <linux/rtnetlink.h>
18 #include <linux/skbuff.h>
19 #include <linux/slab.h>
20 #include <linux/spinlock.h>
21 #include <linux/string.h>
22 #include <linux/tc_act/tc_nat.h>
23 #include <net/act_api.h>
24 #include <net/icmp.h>
25 #include <net/ip.h>
26 #include <net/netlink.h>
27 #include <net/tc_act/tc_nat.h>
28 #include <net/tcp.h>
29 #include <net/udp.h>
32 #define NAT_TAB_MASK 15
33 static struct tcf_common *tcf_nat_ht[NAT_TAB_MASK + 1];
34 static u32 nat_idx_gen;
35 static DEFINE_RWLOCK(nat_lock);
37 static struct tcf_hashinfo nat_hash_info = {
38 .htab = tcf_nat_ht,
39 .hmask = NAT_TAB_MASK,
40 .lock = &nat_lock,
43 static const struct nla_policy nat_policy[TCA_NAT_MAX + 1] = {
44 [TCA_NAT_PARMS] = { .len = sizeof(struct tc_nat) },
47 static int tcf_nat_init(struct nlattr *nla, struct nlattr *est,
48 struct tc_action *a, int ovr, int bind)
50 struct nlattr *tb[TCA_NAT_MAX + 1];
51 struct tc_nat *parm;
52 int ret = 0, err;
53 struct tcf_nat *p;
54 struct tcf_common *pc;
56 if (nla == NULL)
57 return -EINVAL;
59 err = nla_parse_nested(tb, TCA_NAT_MAX, nla, nat_policy);
60 if (err < 0)
61 return err;
63 if (tb[TCA_NAT_PARMS] == NULL)
64 return -EINVAL;
65 parm = nla_data(tb[TCA_NAT_PARMS]);
67 pc = tcf_hash_check(parm->index, a, bind, &nat_hash_info);
68 if (!pc) {
69 pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind,
70 &nat_idx_gen, &nat_hash_info);
71 if (IS_ERR(pc))
72 return PTR_ERR(pc);
73 p = to_tcf_nat(pc);
74 ret = ACT_P_CREATED;
75 } else {
76 p = to_tcf_nat(pc);
77 if (!ovr) {
78 tcf_hash_release(pc, bind, &nat_hash_info);
79 return -EEXIST;
83 spin_lock_bh(&p->tcf_lock);
84 p->old_addr = parm->old_addr;
85 p->new_addr = parm->new_addr;
86 p->mask = parm->mask;
87 p->flags = parm->flags;
89 p->tcf_action = parm->action;
90 spin_unlock_bh(&p->tcf_lock);
92 if (ret == ACT_P_CREATED)
93 tcf_hash_insert(pc, &nat_hash_info);
95 return ret;
98 static int tcf_nat_cleanup(struct tc_action *a, int bind)
100 struct tcf_nat *p = a->priv;
102 return tcf_hash_release(&p->common, bind, &nat_hash_info);
105 static int tcf_nat(struct sk_buff *skb, struct tc_action *a,
106 struct tcf_result *res)
108 struct tcf_nat *p = a->priv;
109 struct iphdr *iph;
110 __be32 old_addr;
111 __be32 new_addr;
112 __be32 mask;
113 __be32 addr;
114 int egress;
115 int action;
116 int ihl;
118 spin_lock(&p->tcf_lock);
120 p->tcf_tm.lastuse = jiffies;
121 old_addr = p->old_addr;
122 new_addr = p->new_addr;
123 mask = p->mask;
124 egress = p->flags & TCA_NAT_FLAG_EGRESS;
125 action = p->tcf_action;
127 p->tcf_bstats.bytes += qdisc_pkt_len(skb);
128 p->tcf_bstats.packets++;
130 spin_unlock(&p->tcf_lock);
132 if (unlikely(action == TC_ACT_SHOT))
133 goto drop;
135 if (!pskb_may_pull(skb, sizeof(*iph)))
136 goto drop;
138 iph = ip_hdr(skb);
140 if (egress)
141 addr = iph->saddr;
142 else
143 addr = iph->daddr;
145 if (!((old_addr ^ addr) & mask)) {
146 if (skb_cloned(skb) &&
147 !skb_clone_writable(skb, sizeof(*iph)) &&
148 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
149 goto drop;
151 new_addr &= mask;
152 new_addr |= addr & ~mask;
154 /* Rewrite IP header */
155 iph = ip_hdr(skb);
156 if (egress)
157 iph->saddr = new_addr;
158 else
159 iph->daddr = new_addr;
161 csum_replace4(&iph->check, addr, new_addr);
162 } else if ((iph->frag_off & htons(IP_OFFSET)) ||
163 iph->protocol != IPPROTO_ICMP) {
164 goto out;
167 ihl = iph->ihl * 4;
169 /* It would be nice to share code with stateful NAT. */
170 switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) {
171 case IPPROTO_TCP:
173 struct tcphdr *tcph;
175 if (!pskb_may_pull(skb, ihl + sizeof(*tcph)) ||
176 (skb_cloned(skb) &&
177 !skb_clone_writable(skb, ihl + sizeof(*tcph)) &&
178 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
179 goto drop;
181 tcph = (void *)(skb_network_header(skb) + ihl);
182 inet_proto_csum_replace4(&tcph->check, skb, addr, new_addr, 1);
183 break;
185 case IPPROTO_UDP:
187 struct udphdr *udph;
189 if (!pskb_may_pull(skb, ihl + sizeof(*udph)) ||
190 (skb_cloned(skb) &&
191 !skb_clone_writable(skb, ihl + sizeof(*udph)) &&
192 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)))
193 goto drop;
195 udph = (void *)(skb_network_header(skb) + ihl);
196 if (udph->check || skb->ip_summed == CHECKSUM_PARTIAL) {
197 inet_proto_csum_replace4(&udph->check, skb, addr,
198 new_addr, 1);
199 if (!udph->check)
200 udph->check = CSUM_MANGLED_0;
202 break;
204 case IPPROTO_ICMP:
206 struct icmphdr *icmph;
208 if (!pskb_may_pull(skb, ihl + sizeof(*icmph)))
209 goto drop;
211 icmph = (void *)(skb_network_header(skb) + ihl);
213 if ((icmph->type != ICMP_DEST_UNREACH) &&
214 (icmph->type != ICMP_TIME_EXCEEDED) &&
215 (icmph->type != ICMP_PARAMETERPROB))
216 break;
218 if (!pskb_may_pull(skb, ihl + sizeof(*icmph) + sizeof(*iph)))
219 goto drop;
221 iph = (void *)(icmph + 1);
222 if (egress)
223 addr = iph->daddr;
224 else
225 addr = iph->saddr;
227 if ((old_addr ^ addr) & mask)
228 break;
230 if (skb_cloned(skb) &&
231 !skb_clone_writable(skb,
232 ihl + sizeof(*icmph) + sizeof(*iph)) &&
233 pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
234 goto drop;
236 icmph = (void *)(skb_network_header(skb) + ihl);
237 iph = (void *)(icmph + 1);
239 new_addr &= mask;
240 new_addr |= addr & ~mask;
242 /* XXX Fix up the inner checksums. */
243 if (egress)
244 iph->daddr = new_addr;
245 else
246 iph->saddr = new_addr;
248 inet_proto_csum_replace4(&icmph->checksum, skb, addr, new_addr,
250 break;
252 default:
253 break;
256 out:
257 return action;
259 drop:
260 spin_lock(&p->tcf_lock);
261 p->tcf_qstats.drops++;
262 spin_unlock(&p->tcf_lock);
263 return TC_ACT_SHOT;
266 static int tcf_nat_dump(struct sk_buff *skb, struct tc_action *a,
267 int bind, int ref)
269 unsigned char *b = skb_tail_pointer(skb);
270 struct tcf_nat *p = a->priv;
271 struct tc_nat *opt;
272 struct tcf_t t;
273 int s;
275 s = sizeof(*opt);
277 /* netlink spinlocks held above us - must use ATOMIC */
278 opt = kzalloc(s, GFP_ATOMIC);
279 if (unlikely(!opt))
280 return -ENOBUFS;
282 opt->old_addr = p->old_addr;
283 opt->new_addr = p->new_addr;
284 opt->mask = p->mask;
285 opt->flags = p->flags;
287 opt->index = p->tcf_index;
288 opt->action = p->tcf_action;
289 opt->refcnt = p->tcf_refcnt - ref;
290 opt->bindcnt = p->tcf_bindcnt - bind;
292 NLA_PUT(skb, TCA_NAT_PARMS, s, opt);
293 t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install);
294 t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse);
295 t.expires = jiffies_to_clock_t(p->tcf_tm.expires);
296 NLA_PUT(skb, TCA_NAT_TM, sizeof(t), &t);
298 kfree(opt);
300 return skb->len;
302 nla_put_failure:
303 nlmsg_trim(skb, b);
304 kfree(opt);
305 return -1;
308 static struct tc_action_ops act_nat_ops = {
309 .kind = "nat",
310 .hinfo = &nat_hash_info,
311 .type = TCA_ACT_NAT,
312 .capab = TCA_CAP_NONE,
313 .owner = THIS_MODULE,
314 .act = tcf_nat,
315 .dump = tcf_nat_dump,
316 .cleanup = tcf_nat_cleanup,
317 .lookup = tcf_hash_search,
318 .init = tcf_nat_init,
319 .walk = tcf_generic_walker
322 MODULE_DESCRIPTION("Stateless NAT actions");
323 MODULE_LICENSE("GPL");
325 static int __init nat_init_module(void)
327 return tcf_register_action(&act_nat_ops);
330 static void __exit nat_cleanup_module(void)
332 tcf_unregister_action(&act_nat_ops);
335 module_init(nat_init_module);
336 module_exit(nat_cleanup_module);