sysfs: Remove double free sysfs_get_sb
[linux-2.6/kvm.git] / net / sched / cls_route.c
blob694dcd85dec83bda586f8a96843a1f8fac6a4259
1 /*
2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
19 #include <net/dst.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
26 1. For now we assume that route tags < 256.
27 It allows to use direct table lookups, instead of hash tables.
28 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 are mutually exclusive.
30 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
33 struct route4_fastmap
35 struct route4_filter *filter;
36 u32 id;
37 int iif;
40 struct route4_head
42 struct route4_fastmap fastmap[16];
43 struct route4_bucket *table[256+1];
46 struct route4_bucket
48 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
49 struct route4_filter *ht[16+16+1];
52 struct route4_filter
54 struct route4_filter *next;
55 u32 id;
56 int iif;
58 struct tcf_result res;
59 struct tcf_exts exts;
60 u32 handle;
61 struct route4_bucket *bkt;
64 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
66 static const struct tcf_ext_map route_ext_map = {
67 .police = TCA_ROUTE4_POLICE,
68 .action = TCA_ROUTE4_ACT
71 static __inline__ int route4_fastmap_hash(u32 id, int iif)
73 return id&0xF;
76 static inline
77 void route4_reset_fastmap(struct Qdisc *q, struct route4_head *head, u32 id)
79 spinlock_t *root_lock = qdisc_root_sleeping_lock(q);
81 spin_lock_bh(root_lock);
82 memset(head->fastmap, 0, sizeof(head->fastmap));
83 spin_unlock_bh(root_lock);
86 static inline void
87 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
88 struct route4_filter *f)
90 int h = route4_fastmap_hash(id, iif);
91 head->fastmap[h].id = id;
92 head->fastmap[h].iif = iif;
93 head->fastmap[h].filter = f;
96 static __inline__ int route4_hash_to(u32 id)
98 return id&0xFF;
101 static __inline__ int route4_hash_from(u32 id)
103 return (id>>16)&0xF;
106 static __inline__ int route4_hash_iif(int iif)
108 return 16 + ((iif>>16)&0xF);
111 static __inline__ int route4_hash_wild(void)
113 return 32;
116 #define ROUTE4_APPLY_RESULT() \
118 *res = f->res; \
119 if (tcf_exts_is_available(&f->exts)) { \
120 int r = tcf_exts_exec(skb, &f->exts, res); \
121 if (r < 0) { \
122 dont_cache = 1; \
123 continue; \
125 return r; \
126 } else if (!dont_cache) \
127 route4_set_fastmap(head, id, iif, f); \
128 return 0; \
131 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
132 struct tcf_result *res)
134 struct route4_head *head = (struct route4_head*)tp->root;
135 struct dst_entry *dst;
136 struct route4_bucket *b;
137 struct route4_filter *f;
138 u32 id, h;
139 int iif, dont_cache = 0;
141 if ((dst = skb_dst(skb)) == NULL)
142 goto failure;
144 id = dst->tclassid;
145 if (head == NULL)
146 goto old_method;
148 iif = ((struct rtable*)dst)->fl.iif;
150 h = route4_fastmap_hash(id, iif);
151 if (id == head->fastmap[h].id &&
152 iif == head->fastmap[h].iif &&
153 (f = head->fastmap[h].filter) != NULL) {
154 if (f == ROUTE4_FAILURE)
155 goto failure;
157 *res = f->res;
158 return 0;
161 h = route4_hash_to(id);
163 restart:
164 if ((b = head->table[h]) != NULL) {
165 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
166 if (f->id == id)
167 ROUTE4_APPLY_RESULT();
169 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
170 if (f->iif == iif)
171 ROUTE4_APPLY_RESULT();
173 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
174 ROUTE4_APPLY_RESULT();
177 if (h < 256) {
178 h = 256;
179 id &= ~0xFFFF;
180 goto restart;
183 if (!dont_cache)
184 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
185 failure:
186 return -1;
188 old_method:
189 if (id && (TC_H_MAJ(id) == 0 ||
190 !(TC_H_MAJ(id^tp->q->handle)))) {
191 res->classid = id;
192 res->class = 0;
193 return 0;
195 return -1;
198 static inline u32 to_hash(u32 id)
200 u32 h = id&0xFF;
201 if (id&0x8000)
202 h += 256;
203 return h;
206 static inline u32 from_hash(u32 id)
208 id &= 0xFFFF;
209 if (id == 0xFFFF)
210 return 32;
211 if (!(id & 0x8000)) {
212 if (id > 255)
213 return 256;
214 return id&0xF;
216 return 16 + (id&0xF);
219 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
221 struct route4_head *head = (struct route4_head*)tp->root;
222 struct route4_bucket *b;
223 struct route4_filter *f;
224 unsigned h1, h2;
226 if (!head)
227 return 0;
229 h1 = to_hash(handle);
230 if (h1 > 256)
231 return 0;
233 h2 = from_hash(handle>>16);
234 if (h2 > 32)
235 return 0;
237 if ((b = head->table[h1]) != NULL) {
238 for (f = b->ht[h2]; f; f = f->next)
239 if (f->handle == handle)
240 return (unsigned long)f;
242 return 0;
245 static void route4_put(struct tcf_proto *tp, unsigned long f)
249 static int route4_init(struct tcf_proto *tp)
251 return 0;
254 static inline void
255 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
257 tcf_unbind_filter(tp, &f->res);
258 tcf_exts_destroy(tp, &f->exts);
259 kfree(f);
262 static void route4_destroy(struct tcf_proto *tp)
264 struct route4_head *head = tp->root;
265 int h1, h2;
267 if (head == NULL)
268 return;
270 for (h1=0; h1<=256; h1++) {
271 struct route4_bucket *b;
273 if ((b = head->table[h1]) != NULL) {
274 for (h2=0; h2<=32; h2++) {
275 struct route4_filter *f;
277 while ((f = b->ht[h2]) != NULL) {
278 b->ht[h2] = f->next;
279 route4_delete_filter(tp, f);
282 kfree(b);
285 kfree(head);
288 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
290 struct route4_head *head = (struct route4_head*)tp->root;
291 struct route4_filter **fp, *f = (struct route4_filter*)arg;
292 unsigned h = 0;
293 struct route4_bucket *b;
294 int i;
296 if (!head || !f)
297 return -EINVAL;
299 h = f->handle;
300 b = f->bkt;
302 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
303 if (*fp == f) {
304 tcf_tree_lock(tp);
305 *fp = f->next;
306 tcf_tree_unlock(tp);
308 route4_reset_fastmap(tp->q, head, f->id);
309 route4_delete_filter(tp, f);
311 /* Strip tree */
313 for (i=0; i<=32; i++)
314 if (b->ht[i])
315 return 0;
317 /* OK, session has no flows */
318 tcf_tree_lock(tp);
319 head->table[to_hash(h)] = NULL;
320 tcf_tree_unlock(tp);
322 kfree(b);
323 return 0;
326 return 0;
329 static const struct nla_policy route4_policy[TCA_ROUTE4_MAX + 1] = {
330 [TCA_ROUTE4_CLASSID] = { .type = NLA_U32 },
331 [TCA_ROUTE4_TO] = { .type = NLA_U32 },
332 [TCA_ROUTE4_FROM] = { .type = NLA_U32 },
333 [TCA_ROUTE4_IIF] = { .type = NLA_U32 },
336 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
337 struct route4_filter *f, u32 handle, struct route4_head *head,
338 struct nlattr **tb, struct nlattr *est, int new)
340 int err;
341 u32 id = 0, to = 0, nhandle = 0x8000;
342 struct route4_filter *fp;
343 unsigned int h1;
344 struct route4_bucket *b;
345 struct tcf_exts e;
347 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
348 if (err < 0)
349 return err;
351 err = -EINVAL;
352 if (tb[TCA_ROUTE4_TO]) {
353 if (new && handle & 0x8000)
354 goto errout;
355 to = nla_get_u32(tb[TCA_ROUTE4_TO]);
356 if (to > 0xFF)
357 goto errout;
358 nhandle = to;
361 if (tb[TCA_ROUTE4_FROM]) {
362 if (tb[TCA_ROUTE4_IIF])
363 goto errout;
364 id = nla_get_u32(tb[TCA_ROUTE4_FROM]);
365 if (id > 0xFF)
366 goto errout;
367 nhandle |= id << 16;
368 } else if (tb[TCA_ROUTE4_IIF]) {
369 id = nla_get_u32(tb[TCA_ROUTE4_IIF]);
370 if (id > 0x7FFF)
371 goto errout;
372 nhandle |= (id | 0x8000) << 16;
373 } else
374 nhandle |= 0xFFFF << 16;
376 if (handle && new) {
377 nhandle |= handle & 0x7F00;
378 if (nhandle != handle)
379 goto errout;
382 h1 = to_hash(nhandle);
383 if ((b = head->table[h1]) == NULL) {
384 err = -ENOBUFS;
385 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
386 if (b == NULL)
387 goto errout;
389 tcf_tree_lock(tp);
390 head->table[h1] = b;
391 tcf_tree_unlock(tp);
392 } else {
393 unsigned int h2 = from_hash(nhandle >> 16);
394 err = -EEXIST;
395 for (fp = b->ht[h2]; fp; fp = fp->next)
396 if (fp->handle == f->handle)
397 goto errout;
400 tcf_tree_lock(tp);
401 if (tb[TCA_ROUTE4_TO])
402 f->id = to;
404 if (tb[TCA_ROUTE4_FROM])
405 f->id = to | id<<16;
406 else if (tb[TCA_ROUTE4_IIF])
407 f->iif = id;
409 f->handle = nhandle;
410 f->bkt = b;
411 tcf_tree_unlock(tp);
413 if (tb[TCA_ROUTE4_CLASSID]) {
414 f->res.classid = nla_get_u32(tb[TCA_ROUTE4_CLASSID]);
415 tcf_bind_filter(tp, &f->res, base);
418 tcf_exts_change(tp, &f->exts, &e);
420 return 0;
421 errout:
422 tcf_exts_destroy(tp, &e);
423 return err;
426 static int route4_change(struct tcf_proto *tp, unsigned long base,
427 u32 handle,
428 struct nlattr **tca,
429 unsigned long *arg)
431 struct route4_head *head = tp->root;
432 struct route4_filter *f, *f1, **fp;
433 struct route4_bucket *b;
434 struct nlattr *opt = tca[TCA_OPTIONS];
435 struct nlattr *tb[TCA_ROUTE4_MAX + 1];
436 unsigned int h, th;
437 u32 old_handle = 0;
438 int err;
440 if (opt == NULL)
441 return handle ? -EINVAL : 0;
443 err = nla_parse_nested(tb, TCA_ROUTE4_MAX, opt, route4_policy);
444 if (err < 0)
445 return err;
447 if ((f = (struct route4_filter*)*arg) != NULL) {
448 if (f->handle != handle && handle)
449 return -EINVAL;
451 if (f->bkt)
452 old_handle = f->handle;
454 err = route4_set_parms(tp, base, f, handle, head, tb,
455 tca[TCA_RATE], 0);
456 if (err < 0)
457 return err;
459 goto reinsert;
462 err = -ENOBUFS;
463 if (head == NULL) {
464 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
465 if (head == NULL)
466 goto errout;
468 tcf_tree_lock(tp);
469 tp->root = head;
470 tcf_tree_unlock(tp);
473 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
474 if (f == NULL)
475 goto errout;
477 err = route4_set_parms(tp, base, f, handle, head, tb,
478 tca[TCA_RATE], 1);
479 if (err < 0)
480 goto errout;
482 reinsert:
483 h = from_hash(f->handle >> 16);
484 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
485 if (f->handle < f1->handle)
486 break;
488 f->next = f1;
489 tcf_tree_lock(tp);
490 *fp = f;
492 if (old_handle && f->handle != old_handle) {
493 th = to_hash(old_handle);
494 h = from_hash(old_handle >> 16);
495 if ((b = head->table[th]) != NULL) {
496 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
497 if (*fp == f) {
498 *fp = f->next;
499 break;
504 tcf_tree_unlock(tp);
506 route4_reset_fastmap(tp->q, head, f->id);
507 *arg = (unsigned long)f;
508 return 0;
510 errout:
511 kfree(f);
512 return err;
515 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
517 struct route4_head *head = tp->root;
518 unsigned h, h1;
520 if (head == NULL)
521 arg->stop = 1;
523 if (arg->stop)
524 return;
526 for (h = 0; h <= 256; h++) {
527 struct route4_bucket *b = head->table[h];
529 if (b) {
530 for (h1 = 0; h1 <= 32; h1++) {
531 struct route4_filter *f;
533 for (f = b->ht[h1]; f; f = f->next) {
534 if (arg->count < arg->skip) {
535 arg->count++;
536 continue;
538 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
539 arg->stop = 1;
540 return;
542 arg->count++;
549 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
550 struct sk_buff *skb, struct tcmsg *t)
552 struct route4_filter *f = (struct route4_filter*)fh;
553 unsigned char *b = skb_tail_pointer(skb);
554 struct nlattr *nest;
555 u32 id;
557 if (f == NULL)
558 return skb->len;
560 t->tcm_handle = f->handle;
562 nest = nla_nest_start(skb, TCA_OPTIONS);
563 if (nest == NULL)
564 goto nla_put_failure;
566 if (!(f->handle&0x8000)) {
567 id = f->id&0xFF;
568 NLA_PUT_U32(skb, TCA_ROUTE4_TO, id);
570 if (f->handle&0x80000000) {
571 if ((f->handle>>16) != 0xFFFF)
572 NLA_PUT_U32(skb, TCA_ROUTE4_IIF, f->iif);
573 } else {
574 id = f->id>>16;
575 NLA_PUT_U32(skb, TCA_ROUTE4_FROM, id);
577 if (f->res.classid)
578 NLA_PUT_U32(skb, TCA_ROUTE4_CLASSID, f->res.classid);
580 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
581 goto nla_put_failure;
583 nla_nest_end(skb, nest);
585 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
586 goto nla_put_failure;
588 return skb->len;
590 nla_put_failure:
591 nlmsg_trim(skb, b);
592 return -1;
595 static struct tcf_proto_ops cls_route4_ops __read_mostly = {
596 .kind = "route",
597 .classify = route4_classify,
598 .init = route4_init,
599 .destroy = route4_destroy,
600 .get = route4_get,
601 .put = route4_put,
602 .change = route4_change,
603 .delete = route4_delete,
604 .walk = route4_walk,
605 .dump = route4_dump,
606 .owner = THIS_MODULE,
609 static int __init init_route4(void)
611 return register_tcf_proto_ops(&cls_route4_ops);
614 static void __exit exit_route4(void)
616 unregister_tcf_proto_ops(&cls_route4_ops);
619 module_init(init_route4)
620 module_exit(exit_route4)
621 MODULE_LICENSE("GPL");