dm thin: wake worker when discard is prepared
[linux-2.6.git] / net / sched / cls_fw.c
blob4075a0aef2aa2e83fdf167c056e216c3abfc6204
1 /*
2 * net/sched/cls_fw.c Classifier mapping ipchains' fwmark to traffic class.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * Changes:
12 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_walk off by one
13 * Karlis Peisenieks <karlis@mt.lv> : 990415 : fw_delete killed all the filter (and kernel).
14 * Alex <alex@pilotsoft.com> : 2004xxyy: Added Action extension
16 * JHS: We should remove the CONFIG_NET_CLS_IND from here
17 * eventually when the meta match extension is made available
21 #include <linux/module.h>
22 #include <linux/slab.h>
23 #include <linux/types.h>
24 #include <linux/kernel.h>
25 #include <linux/string.h>
26 #include <linux/errno.h>
27 #include <linux/skbuff.h>
28 #include <net/netlink.h>
29 #include <net/act_api.h>
30 #include <net/pkt_cls.h>
32 #define HTSIZE (PAGE_SIZE/sizeof(struct fw_filter *))
34 struct fw_head {
35 struct fw_filter *ht[HTSIZE];
36 u32 mask;
39 struct fw_filter {
40 struct fw_filter *next;
41 u32 id;
42 struct tcf_result res;
43 #ifdef CONFIG_NET_CLS_IND
44 char indev[IFNAMSIZ];
45 #endif /* CONFIG_NET_CLS_IND */
46 struct tcf_exts exts;
49 static const struct tcf_ext_map fw_ext_map = {
50 .action = TCA_FW_ACT,
51 .police = TCA_FW_POLICE
54 static inline int fw_hash(u32 handle)
56 if (HTSIZE == 4096)
57 return ((handle >> 24) & 0xFFF) ^
58 ((handle >> 12) & 0xFFF) ^
59 (handle & 0xFFF);
60 else if (HTSIZE == 2048)
61 return ((handle >> 22) & 0x7FF) ^
62 ((handle >> 11) & 0x7FF) ^
63 (handle & 0x7FF);
64 else if (HTSIZE == 1024)
65 return ((handle >> 20) & 0x3FF) ^
66 ((handle >> 10) & 0x3FF) ^
67 (handle & 0x3FF);
68 else if (HTSIZE == 512)
69 return (handle >> 27) ^
70 ((handle >> 18) & 0x1FF) ^
71 ((handle >> 9) & 0x1FF) ^
72 (handle & 0x1FF);
73 else if (HTSIZE == 256) {
74 u8 *t = (u8 *) &handle;
75 return t[0] ^ t[1] ^ t[2] ^ t[3];
76 } else
77 return handle & (HTSIZE - 1);
80 static int fw_classify(struct sk_buff *skb, const struct tcf_proto *tp,
81 struct tcf_result *res)
83 struct fw_head *head = (struct fw_head *)tp->root;
84 struct fw_filter *f;
85 int r;
86 u32 id = skb->mark;
88 if (head != NULL) {
89 id &= head->mask;
90 for (f = head->ht[fw_hash(id)]; f; f = f->next) {
91 if (f->id == id) {
92 *res = f->res;
93 #ifdef CONFIG_NET_CLS_IND
94 if (!tcf_match_indev(skb, f->indev))
95 continue;
96 #endif /* CONFIG_NET_CLS_IND */
97 r = tcf_exts_exec(skb, &f->exts, res);
98 if (r < 0)
99 continue;
101 return r;
104 } else {
105 /* old method */
106 if (id && (TC_H_MAJ(id) == 0 ||
107 !(TC_H_MAJ(id ^ tp->q->handle)))) {
108 res->classid = id;
109 res->class = 0;
110 return 0;
114 return -1;
117 static unsigned long fw_get(struct tcf_proto *tp, u32 handle)
119 struct fw_head *head = (struct fw_head *)tp->root;
120 struct fw_filter *f;
122 if (head == NULL)
123 return 0;
125 for (f = head->ht[fw_hash(handle)]; f; f = f->next) {
126 if (f->id == handle)
127 return (unsigned long)f;
129 return 0;
132 static void fw_put(struct tcf_proto *tp, unsigned long f)
136 static int fw_init(struct tcf_proto *tp)
138 return 0;
141 static void fw_delete_filter(struct tcf_proto *tp, struct fw_filter *f)
143 tcf_unbind_filter(tp, &f->res);
144 tcf_exts_destroy(tp, &f->exts);
145 kfree(f);
148 static void fw_destroy(struct tcf_proto *tp)
150 struct fw_head *head = tp->root;
151 struct fw_filter *f;
152 int h;
154 if (head == NULL)
155 return;
157 for (h = 0; h < HTSIZE; h++) {
158 while ((f = head->ht[h]) != NULL) {
159 head->ht[h] = f->next;
160 fw_delete_filter(tp, f);
163 kfree(head);
166 static int fw_delete(struct tcf_proto *tp, unsigned long arg)
168 struct fw_head *head = (struct fw_head *)tp->root;
169 struct fw_filter *f = (struct fw_filter *)arg;
170 struct fw_filter **fp;
172 if (head == NULL || f == NULL)
173 goto out;
175 for (fp = &head->ht[fw_hash(f->id)]; *fp; fp = &(*fp)->next) {
176 if (*fp == f) {
177 tcf_tree_lock(tp);
178 *fp = f->next;
179 tcf_tree_unlock(tp);
180 fw_delete_filter(tp, f);
181 return 0;
184 out:
185 return -EINVAL;
188 static const struct nla_policy fw_policy[TCA_FW_MAX + 1] = {
189 [TCA_FW_CLASSID] = { .type = NLA_U32 },
190 [TCA_FW_INDEV] = { .type = NLA_STRING, .len = IFNAMSIZ },
191 [TCA_FW_MASK] = { .type = NLA_U32 },
194 static int
195 fw_change_attrs(struct tcf_proto *tp, struct fw_filter *f,
196 struct nlattr **tb, struct nlattr **tca, unsigned long base)
198 struct fw_head *head = (struct fw_head *)tp->root;
199 struct tcf_exts e;
200 u32 mask;
201 int err;
203 err = tcf_exts_validate(tp, tb, tca[TCA_RATE], &e, &fw_ext_map);
204 if (err < 0)
205 return err;
207 err = -EINVAL;
208 if (tb[TCA_FW_CLASSID]) {
209 f->res.classid = nla_get_u32(tb[TCA_FW_CLASSID]);
210 tcf_bind_filter(tp, &f->res, base);
213 #ifdef CONFIG_NET_CLS_IND
214 if (tb[TCA_FW_INDEV]) {
215 err = tcf_change_indev(tp, f->indev, tb[TCA_FW_INDEV]);
216 if (err < 0)
217 goto errout;
219 #endif /* CONFIG_NET_CLS_IND */
221 if (tb[TCA_FW_MASK]) {
222 mask = nla_get_u32(tb[TCA_FW_MASK]);
223 if (mask != head->mask)
224 goto errout;
225 } else if (head->mask != 0xFFFFFFFF)
226 goto errout;
228 tcf_exts_change(tp, &f->exts, &e);
230 return 0;
231 errout:
232 tcf_exts_destroy(tp, &e);
233 return err;
236 static int fw_change(struct sk_buff *in_skb,
237 struct tcf_proto *tp, unsigned long base,
238 u32 handle,
239 struct nlattr **tca,
240 unsigned long *arg)
242 struct fw_head *head = (struct fw_head *)tp->root;
243 struct fw_filter *f = (struct fw_filter *) *arg;
244 struct nlattr *opt = tca[TCA_OPTIONS];
245 struct nlattr *tb[TCA_FW_MAX + 1];
246 int err;
248 if (!opt)
249 return handle ? -EINVAL : 0;
251 err = nla_parse_nested(tb, TCA_FW_MAX, opt, fw_policy);
252 if (err < 0)
253 return err;
255 if (f != NULL) {
256 if (f->id != handle && handle)
257 return -EINVAL;
258 return fw_change_attrs(tp, f, tb, tca, base);
261 if (!handle)
262 return -EINVAL;
264 if (head == NULL) {
265 u32 mask = 0xFFFFFFFF;
266 if (tb[TCA_FW_MASK])
267 mask = nla_get_u32(tb[TCA_FW_MASK]);
269 head = kzalloc(sizeof(struct fw_head), GFP_KERNEL);
270 if (head == NULL)
271 return -ENOBUFS;
272 head->mask = mask;
274 tcf_tree_lock(tp);
275 tp->root = head;
276 tcf_tree_unlock(tp);
279 f = kzalloc(sizeof(struct fw_filter), GFP_KERNEL);
280 if (f == NULL)
281 return -ENOBUFS;
283 f->id = handle;
285 err = fw_change_attrs(tp, f, tb, tca, base);
286 if (err < 0)
287 goto errout;
289 f->next = head->ht[fw_hash(handle)];
290 tcf_tree_lock(tp);
291 head->ht[fw_hash(handle)] = f;
292 tcf_tree_unlock(tp);
294 *arg = (unsigned long)f;
295 return 0;
297 errout:
298 kfree(f);
299 return err;
302 static void fw_walk(struct tcf_proto *tp, struct tcf_walker *arg)
304 struct fw_head *head = (struct fw_head *)tp->root;
305 int h;
307 if (head == NULL)
308 arg->stop = 1;
310 if (arg->stop)
311 return;
313 for (h = 0; h < HTSIZE; h++) {
314 struct fw_filter *f;
316 for (f = head->ht[h]; f; f = f->next) {
317 if (arg->count < arg->skip) {
318 arg->count++;
319 continue;
321 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
322 arg->stop = 1;
323 return;
325 arg->count++;
330 static int fw_dump(struct tcf_proto *tp, unsigned long fh,
331 struct sk_buff *skb, struct tcmsg *t)
333 struct fw_head *head = (struct fw_head *)tp->root;
334 struct fw_filter *f = (struct fw_filter *)fh;
335 unsigned char *b = skb_tail_pointer(skb);
336 struct nlattr *nest;
338 if (f == NULL)
339 return skb->len;
341 t->tcm_handle = f->id;
343 if (!f->res.classid && !tcf_exts_is_available(&f->exts))
344 return skb->len;
346 nest = nla_nest_start(skb, TCA_OPTIONS);
347 if (nest == NULL)
348 goto nla_put_failure;
350 if (f->res.classid &&
351 nla_put_u32(skb, TCA_FW_CLASSID, f->res.classid))
352 goto nla_put_failure;
353 #ifdef CONFIG_NET_CLS_IND
354 if (strlen(f->indev) &&
355 nla_put_string(skb, TCA_FW_INDEV, f->indev))
356 goto nla_put_failure;
357 #endif /* CONFIG_NET_CLS_IND */
358 if (head->mask != 0xFFFFFFFF &&
359 nla_put_u32(skb, TCA_FW_MASK, head->mask))
360 goto nla_put_failure;
362 if (tcf_exts_dump(skb, &f->exts, &fw_ext_map) < 0)
363 goto nla_put_failure;
365 nla_nest_end(skb, nest);
367 if (tcf_exts_dump_stats(skb, &f->exts, &fw_ext_map) < 0)
368 goto nla_put_failure;
370 return skb->len;
372 nla_put_failure:
373 nlmsg_trim(skb, b);
374 return -1;
377 static struct tcf_proto_ops cls_fw_ops __read_mostly = {
378 .kind = "fw",
379 .classify = fw_classify,
380 .init = fw_init,
381 .destroy = fw_destroy,
382 .get = fw_get,
383 .put = fw_put,
384 .change = fw_change,
385 .delete = fw_delete,
386 .walk = fw_walk,
387 .dump = fw_dump,
388 .owner = THIS_MODULE,
391 static int __init init_fw(void)
393 return register_tcf_proto_ops(&cls_fw_ops);
396 static void __exit exit_fw(void)
398 unregister_tcf_proto_ops(&cls_fw_ops);
401 module_init(init_fw)
402 module_exit(exit_fw)
403 MODULE_LICENSE("GPL");