Kill st_fstype member.
[linux-2.6/linux-mips.git] / net / sched / cls_u32.c
blob550519e0b138e7a17e3e75a411c8ba6f55ee44b4
1 /*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
28 #include <asm/uaccess.h>
29 #include <asm/system.h>
30 #include <asm/bitops.h>
31 #include <linux/config.h>
32 #include <linux/module.h>
33 #include <linux/types.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/string.h>
37 #include <linux/mm.h>
38 #include <linux/socket.h>
39 #include <linux/sockios.h>
40 #include <linux/in.h>
41 #include <linux/errno.h>
42 #include <linux/interrupt.h>
43 #include <linux/if_ether.h>
44 #include <linux/inet.h>
45 #include <linux/netdevice.h>
46 #include <linux/etherdevice.h>
47 #include <linux/notifier.h>
48 #include <linux/rtnetlink.h>
49 #include <net/ip.h>
50 #include <net/route.h>
51 #include <linux/skbuff.h>
52 #include <net/sock.h>
53 #include <net/pkt_sched.h>
56 struct tc_u_knode
58 struct tc_u_knode *next;
59 u32 handle;
60 struct tc_u_hnode *ht_up;
61 #ifdef CONFIG_NET_CLS_POLICE
62 struct tcf_police *police;
63 #endif
64 struct tcf_result res;
65 struct tc_u_hnode *ht_down;
66 struct tc_u32_sel sel;
69 struct tc_u_hnode
71 struct tc_u_hnode *next;
72 u32 handle;
73 struct tc_u_common *tp_c;
74 int refcnt;
75 unsigned divisor;
76 u32 hgenerator;
77 struct tc_u_knode *ht[1];
80 struct tc_u_common
82 struct tc_u_common *next;
83 struct tc_u_hnode *hlist;
84 struct Qdisc *q;
85 int refcnt;
86 u32 hgenerator;
89 static struct tc_u_common *u32_list;
91 static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel)
93 unsigned h = key & sel->hmask;
95 h ^= h>>16;
96 h ^= h>>8;
97 return h;
100 static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
102 struct {
103 struct tc_u_knode *knode;
104 u8 *ptr;
105 } stack[TC_U32_MAXDEPTH];
107 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
108 u8 *ptr = skb->nh.raw;
109 struct tc_u_knode *n;
110 int sdepth = 0;
111 int off2 = 0;
112 int sel = 0;
113 int i;
115 #if !defined(__i386__) && !defined(__mc68000__)
116 if ((unsigned long)ptr & 3)
117 return -1;
118 #endif
120 next_ht:
121 n = ht->ht[sel];
123 next_knode:
124 if (n) {
125 struct tc_u32_key *key = n->sel.keys;
127 for (i = n->sel.nkeys; i>0; i--, key++) {
128 if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
129 n = n->next;
130 goto next_knode;
133 if (n->ht_down == NULL) {
134 check_terminal:
135 if (n->sel.flags&TC_U32_TERMINAL) {
136 *res = n->res;
137 #ifdef CONFIG_NET_CLS_POLICE
138 if (n->police) {
139 int pol_res = tcf_police(skb, n->police);
140 if (pol_res >= 0)
141 return pol_res;
142 } else
143 #endif
144 return 0;
146 n = n->next;
147 goto next_knode;
150 /* PUSH */
151 if (sdepth >= TC_U32_MAXDEPTH)
152 goto deadloop;
153 stack[sdepth].knode = n;
154 stack[sdepth].ptr = ptr;
155 sdepth++;
157 ht = n->ht_down;
158 sel = 0;
159 if (ht->divisor)
160 sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel);
162 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
163 goto next_ht;
165 if (n->sel.flags&(TC_U32_EAT|TC_U32_VAROFFSET)) {
166 off2 = n->sel.off + 3;
167 if (n->sel.flags&TC_U32_VAROFFSET)
168 off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
169 off2 &= ~3;
171 if (n->sel.flags&TC_U32_EAT) {
172 ptr += off2;
173 off2 = 0;
176 if (ptr < skb->tail)
177 goto next_ht;
180 /* POP */
181 if (sdepth--) {
182 n = stack[sdepth].knode;
183 ht = n->ht_up;
184 ptr = stack[sdepth].ptr;
185 goto check_terminal;
187 return -1;
189 deadloop:
190 if (net_ratelimit())
191 printk("cls_u32: dead loop\n");
192 return -1;
195 static __inline__ struct tc_u_hnode *
196 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
198 struct tc_u_hnode *ht;
200 for (ht = tp_c->hlist; ht; ht = ht->next)
201 if (ht->handle == handle)
202 break;
204 return ht;
207 static __inline__ struct tc_u_knode *
208 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
210 unsigned sel;
211 struct tc_u_knode *n;
213 sel = TC_U32_HASH(handle);
214 if (sel > ht->divisor)
215 return 0;
217 for (n = ht->ht[sel]; n; n = n->next)
218 if (n->handle == handle)
219 return n;
221 return NULL;
225 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
227 struct tc_u_hnode *ht;
228 struct tc_u_common *tp_c = tp->data;
230 if (TC_U32_HTID(handle) == TC_U32_ROOT)
231 ht = tp->root;
232 else
233 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
235 if (!ht)
236 return 0;
238 if (TC_U32_KEY(handle) == 0)
239 return (unsigned long)ht;
241 return (unsigned long)u32_lookup_key(ht, handle);
244 static void u32_put(struct tcf_proto *tp, unsigned long f)
248 static u32 gen_new_htid(struct tc_u_common *tp_c)
250 int i = 0x800;
252 do {
253 if (++tp_c->hgenerator == 0x7FF)
254 tp_c->hgenerator = 1;
255 } while (i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
257 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
260 static int u32_init(struct tcf_proto *tp)
262 struct tc_u_hnode *root_ht;
263 struct tc_u_common *tp_c;
265 MOD_INC_USE_COUNT;
267 for (tp_c = u32_list; tp_c; tp_c = tp_c->next)
268 if (tp_c->q == tp->q)
269 break;
271 root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL);
272 if (root_ht == NULL) {
273 MOD_DEC_USE_COUNT;
274 return -ENOBUFS;
276 memset(root_ht, 0, sizeof(*root_ht));
277 root_ht->divisor = 0;
278 root_ht->refcnt++;
279 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
281 if (tp_c == NULL) {
282 tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL);
283 if (tp_c == NULL) {
284 kfree(root_ht);
285 MOD_DEC_USE_COUNT;
286 return -ENOBUFS;
288 memset(tp_c, 0, sizeof(*tp_c));
289 tp_c->q = tp->q;
290 tp_c->next = u32_list;
291 u32_list = tp_c;
294 tp_c->refcnt++;
295 root_ht->next = tp_c->hlist;
296 tp_c->hlist = root_ht;
297 root_ht->tp_c = tp_c;
299 tp->root = root_ht;
300 tp->data = tp_c;
301 return 0;
304 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
306 unsigned long cl;
308 if ((cl = __cls_set_class(&n->res.class, 0)) != 0)
309 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
310 #ifdef CONFIG_NET_CLS_POLICE
311 tcf_police_release(n->police);
312 #endif
313 if (n->ht_down)
314 n->ht_down->refcnt--;
315 kfree(n);
316 return 0;
319 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
321 struct tc_u_knode **kp;
322 struct tc_u_hnode *ht = key->ht_up;
324 if (ht) {
325 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
326 if (*kp == key) {
327 tcf_tree_lock(tp);
328 *kp = key->next;
329 tcf_tree_unlock(tp);
331 u32_destroy_key(tp, key);
332 return 0;
336 BUG_TRAP(0);
337 return 0;
340 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
342 struct tc_u_knode *n;
343 unsigned h;
345 for (h=0; h<=ht->divisor; h++) {
346 while ((n = ht->ht[h]) != NULL) {
347 ht->ht[h] = n->next;
349 u32_destroy_key(tp, n);
354 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
356 struct tc_u_common *tp_c = tp->data;
357 struct tc_u_hnode **hn;
359 BUG_TRAP(!ht->refcnt);
361 u32_clear_hnode(tp, ht);
363 for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
364 if (*hn == ht) {
365 *hn = ht->next;
366 kfree(ht);
367 return 0;
371 BUG_TRAP(0);
372 return -ENOENT;
375 static void u32_destroy(struct tcf_proto *tp)
377 struct tc_u_common *tp_c = tp->data;
378 struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
380 BUG_TRAP(root_ht != NULL);
382 if (root_ht && --root_ht->refcnt == 0)
383 u32_destroy_hnode(tp, root_ht);
385 if (--tp_c->refcnt == 0) {
386 struct tc_u_hnode *ht;
387 struct tc_u_common **tp_cp;
389 for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) {
390 if (*tp_cp == tp_c) {
391 *tp_cp = tp_c->next;
392 break;
396 for (ht=tp_c->hlist; ht; ht = ht->next)
397 u32_clear_hnode(tp, ht);
399 while ((ht = tp_c->hlist) != NULL) {
400 tp_c->hlist = ht->next;
402 BUG_TRAP(ht->refcnt == 0);
404 kfree(ht);
407 kfree(tp_c);
410 MOD_DEC_USE_COUNT;
411 tp->data = NULL;
414 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
416 struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
418 if (ht == NULL)
419 return 0;
421 if (TC_U32_KEY(ht->handle))
422 return u32_delete_key(tp, (struct tc_u_knode*)ht);
424 if (tp->root == ht)
425 return -EINVAL;
427 if (--ht->refcnt == 0)
428 u32_destroy_hnode(tp, ht);
430 return 0;
433 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
435 struct tc_u_knode *n;
436 unsigned i = 0x7FF;
438 for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
439 if (i < TC_U32_NODE(n->handle))
440 i = TC_U32_NODE(n->handle);
441 i++;
443 return handle|(i>0xFFF ? 0xFFF : i);
446 static int u32_set_parms(struct Qdisc *q, unsigned long base,
447 struct tc_u_hnode *ht,
448 struct tc_u_knode *n, struct rtattr **tb,
449 struct rtattr *est)
451 if (tb[TCA_U32_LINK-1]) {
452 u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]);
453 struct tc_u_hnode *ht_down = NULL;
455 if (TC_U32_KEY(handle))
456 return -EINVAL;
458 if (handle) {
459 ht_down = u32_lookup_ht(ht->tp_c, handle);
461 if (ht_down == NULL)
462 return -EINVAL;
463 ht_down->refcnt++;
466 sch_tree_lock(q);
467 ht_down = xchg(&n->ht_down, ht_down);
468 sch_tree_unlock(q);
470 if (ht_down)
471 ht_down->refcnt--;
473 if (tb[TCA_U32_CLASSID-1]) {
474 unsigned long cl;
476 n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
477 sch_tree_lock(q);
478 cl = __cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid));
479 sch_tree_unlock(q);
480 if (cl)
481 q->ops->cl_ops->unbind_tcf(q, cl);
483 #ifdef CONFIG_NET_CLS_POLICE
484 if (tb[TCA_U32_POLICE-1]) {
485 struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
487 sch_tree_lock(q);
488 police = xchg(&n->police, police);
489 sch_tree_unlock(q);
491 tcf_police_release(police);
493 #endif
494 return 0;
497 static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
498 struct rtattr **tca,
499 unsigned long *arg)
501 struct tc_u_common *tp_c = tp->data;
502 struct tc_u_hnode *ht;
503 struct tc_u_knode *n;
504 struct tc_u32_sel *s;
505 struct rtattr *opt = tca[TCA_OPTIONS-1];
506 struct rtattr *tb[TCA_U32_MAX];
507 u32 htid;
508 int err;
510 if (opt == NULL)
511 return handle ? -EINVAL : 0;
513 if (rtattr_parse(tb, TCA_U32_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0)
514 return -EINVAL;
516 if ((n = (struct tc_u_knode*)*arg) != NULL) {
517 if (TC_U32_KEY(n->handle) == 0)
518 return -EINVAL;
520 return u32_set_parms(tp->q, base, n->ht_up, n, tb, tca[TCA_RATE-1]);
523 if (tb[TCA_U32_DIVISOR-1]) {
524 unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]);
526 if (--divisor > 0x100)
527 return -EINVAL;
528 if (TC_U32_KEY(handle))
529 return -EINVAL;
530 if (handle == 0) {
531 handle = gen_new_htid(tp->data);
532 if (handle == 0)
533 return -ENOMEM;
535 ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
536 if (ht == NULL)
537 return -ENOBUFS;
538 memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*));
539 ht->tp_c = tp_c;
540 ht->refcnt = 0;
541 ht->divisor = divisor;
542 ht->handle = handle;
543 ht->next = tp_c->hlist;
544 tp_c->hlist = ht;
545 *arg = (unsigned long)ht;
546 return 0;
549 if (tb[TCA_U32_HASH-1]) {
550 htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]);
551 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
552 ht = tp->root;
553 htid = ht->handle;
554 } else {
555 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
556 if (ht == NULL)
557 return -EINVAL;
559 } else {
560 ht = tp->root;
561 htid = ht->handle;
564 if (ht->divisor < TC_U32_HASH(htid))
565 return -EINVAL;
567 if (handle) {
568 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
569 return -EINVAL;
570 handle = htid | TC_U32_NODE(handle);
571 } else
572 handle = gen_new_kid(ht, htid);
574 if (tb[TCA_U32_SEL-1] == 0 ||
575 RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel))
576 return -EINVAL;
578 s = RTA_DATA(tb[TCA_U32_SEL-1]);
579 n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
580 if (n == NULL)
581 return -ENOBUFS;
582 memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
583 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
584 n->ht_up = ht;
585 n->handle = handle;
586 err = u32_set_parms(tp->q, base, ht, n, tb, tca[TCA_RATE-1]);
587 if (err == 0) {
588 struct tc_u_knode **ins;
589 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
590 if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
591 break;
593 n->next = *ins;
594 wmb();
595 *ins = n;
597 *arg = (unsigned long)n;
598 return 0;
600 kfree(n);
601 return err;
604 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
606 struct tc_u_common *tp_c = tp->data;
607 struct tc_u_hnode *ht;
608 struct tc_u_knode *n;
609 unsigned h;
611 if (arg->stop)
612 return;
614 for (ht = tp_c->hlist; ht; ht = ht->next) {
615 if (arg->count >= arg->skip) {
616 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
617 arg->stop = 1;
618 return;
621 arg->count++;
622 for (h = 0; h <= ht->divisor; h++) {
623 for (n = ht->ht[h]; n; n = n->next) {
624 if (arg->count < arg->skip) {
625 arg->count++;
626 continue;
628 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
629 arg->stop = 1;
630 return;
632 arg->count++;
638 #ifdef CONFIG_RTNETLINK
639 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
640 struct sk_buff *skb, struct tcmsg *t)
642 struct tc_u_knode *n = (struct tc_u_knode*)fh;
643 unsigned char *b = skb->tail;
644 struct rtattr *rta;
646 if (n == NULL)
647 return skb->len;
649 t->tcm_handle = n->handle;
651 rta = (struct rtattr*)b;
652 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
654 if (TC_U32_KEY(n->handle) == 0) {
655 struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
656 u32 divisor = ht->divisor+1;
657 RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor);
658 } else {
659 RTA_PUT(skb, TCA_U32_SEL,
660 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
661 &n->sel);
662 if (n->ht_up) {
663 u32 htid = n->handle & 0xFFFFF000;
664 RTA_PUT(skb, TCA_U32_HASH, 4, &htid);
666 if (n->res.classid)
667 RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid);
668 if (n->ht_down)
669 RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle);
670 #ifdef CONFIG_NET_CLS_POLICE
671 if (n->police) {
672 struct rtattr * p_rta = (struct rtattr*)skb->tail;
674 RTA_PUT(skb, TCA_U32_POLICE, 0, NULL);
676 if (tcf_police_dump(skb, n->police) < 0)
677 goto rtattr_failure;
679 p_rta->rta_len = skb->tail - (u8*)p_rta;
681 #endif
684 rta->rta_len = skb->tail - b;
685 #ifdef CONFIG_NET_CLS_POLICE
686 if (TC_U32_KEY(n->handle) && n->police) {
687 if (qdisc_copy_stats(skb, &n->police->stats))
688 goto rtattr_failure;
690 #endif
691 return skb->len;
693 rtattr_failure:
694 skb_trim(skb, b - skb->data);
695 return -1;
697 #endif
699 struct tcf_proto_ops cls_u32_ops = {
700 NULL,
701 "u32",
702 u32_classify,
703 u32_init,
704 u32_destroy,
706 u32_get,
707 u32_put,
708 u32_change,
709 u32_delete,
710 u32_walk,
711 #ifdef CONFIG_RTNETLINK
712 u32_dump
713 #else
714 NULL
715 #endif
718 #ifdef MODULE
719 int init_module(void)
721 return register_tcf_proto_ops(&cls_u32_ops);
724 void cleanup_module(void)
726 unregister_tcf_proto_ops(&cls_u32_ops);
728 #endif