initial commit with v2.6.9
[linux-2.6.9-moxart.git] / net / sched / cls_u32.c
blobbfe785eb0f573f67aac799d8e899c57ddc88c792
1 /*
2 * net/sched/cls_u32.c Ugly (or Universal) 32bit key Packet Classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * The filters are packed to hash tables of key nodes
12 * with a set of 32bit key/mask pairs at every node.
13 * Nodes reference next level hash tables etc.
15 * This scheme is the best universal classifier I managed to
16 * invent; it is not super-fast, but it is not slow (provided you
17 * program it correctly), and general enough. And its relative
18 * speed grows as the number of rules becomes larger.
20 * It seems that it represents the best middle point between
21 * speed and manageability both by human and by machine.
23 * It is especially useful for link sharing combined with QoS;
24 * pure RSVP doesn't need such a general approach and can use
25 * much simpler (and faster) schemes, sort of cls_rsvp.c.
27 * JHS: We should remove the CONFIG_NET_CLS_IND from here
28 * eventually when the meta match extension is made available
32 #include <asm/uaccess.h>
33 #include <asm/system.h>
34 #include <asm/bitops.h>
35 #include <linux/config.h>
36 #include <linux/module.h>
37 #include <linux/types.h>
38 #include <linux/kernel.h>
39 #include <linux/sched.h>
40 #include <linux/string.h>
41 #include <linux/mm.h>
42 #include <linux/socket.h>
43 #include <linux/sockios.h>
44 #include <linux/in.h>
45 #include <linux/errno.h>
46 #include <linux/interrupt.h>
47 #include <linux/if_ether.h>
48 #include <linux/inet.h>
49 #include <linux/netdevice.h>
50 #include <linux/etherdevice.h>
51 #include <linux/notifier.h>
52 #include <linux/rtnetlink.h>
53 #include <net/ip.h>
54 #include <net/route.h>
55 #include <linux/skbuff.h>
56 #include <net/sock.h>
57 #include <net/pkt_sched.h>
60 struct tc_u_knode
62 struct tc_u_knode *next;
63 u32 handle;
64 struct tc_u_hnode *ht_up;
65 #ifdef CONFIG_NET_CLS_ACT
66 struct tc_action *action;
67 #else
68 #ifdef CONFIG_NET_CLS_POLICE
69 struct tcf_police *police;
70 #endif
71 #endif
72 #ifdef CONFIG_NET_CLS_IND
73 char indev[IFNAMSIZ];
74 #endif
75 u8 fshift;
76 struct tcf_result res;
77 struct tc_u_hnode *ht_down;
78 #ifdef CONFIG_CLS_U32_PERF
79 struct tc_u32_pcnt *pf;
80 #endif
81 struct tc_u32_sel sel;
84 struct tc_u_hnode
86 struct tc_u_hnode *next;
87 u32 handle;
88 struct tc_u_common *tp_c;
89 int refcnt;
90 unsigned divisor;
91 u32 hgenerator;
92 struct tc_u_knode *ht[1];
95 struct tc_u_common
97 struct tc_u_common *next;
98 struct tc_u_hnode *hlist;
99 struct Qdisc *q;
100 int refcnt;
101 u32 hgenerator;
104 static struct tc_u_common *u32_list;
106 static __inline__ unsigned u32_hash_fold(u32 key, struct tc_u32_sel *sel, u8 fshift)
108 unsigned h = (key & sel->hmask)>>fshift;
110 return h;
113 static int u32_classify(struct sk_buff *skb, struct tcf_proto *tp, struct tcf_result *res)
115 struct {
116 struct tc_u_knode *knode;
117 u8 *ptr;
118 } stack[TC_U32_MAXDEPTH];
120 struct tc_u_hnode *ht = (struct tc_u_hnode*)tp->root;
121 u8 *ptr = skb->nh.raw;
122 struct tc_u_knode *n;
123 int sdepth = 0;
124 int off2 = 0;
125 int sel = 0;
126 #ifdef CONFIG_CLS_U32_PERF
127 int j;
128 #endif
129 int i;
131 next_ht:
132 n = ht->ht[sel];
134 next_knode:
135 if (n) {
136 struct tc_u32_key *key = n->sel.keys;
138 #ifdef CONFIG_CLS_U32_PERF
139 n->pf->rcnt +=1;
140 j = 0;
141 #endif
142 for (i = n->sel.nkeys; i>0; i--, key++) {
144 if ((*(u32*)(ptr+key->off+(off2&key->offmask))^key->val)&key->mask) {
145 n = n->next;
146 goto next_knode;
148 #ifdef CONFIG_CLS_U32_PERF
149 n->pf->kcnts[j] +=1;
150 j++;
151 #endif
153 if (n->ht_down == NULL) {
154 check_terminal:
155 if (n->sel.flags&TC_U32_TERMINAL) {
157 *res = n->res;
158 #ifdef CONFIG_NET_CLS_IND
159 /* yes, i know it sucks but the feature is
160 ** optional dammit! - JHS */
161 if (0 != n->indev[0]) {
162 if (NULL == skb->input_dev) {
163 n = n->next;
164 goto next_knode;
165 } else {
166 if (0 != strcmp(n->indev, skb->input_dev->name)) {
167 n = n->next;
168 goto next_knode;
172 #endif
173 #ifdef CONFIG_CLS_U32_PERF
174 n->pf->rhit +=1;
175 #endif
176 #ifdef CONFIG_NET_CLS_ACT
177 if (n->action) {
178 int pol_res = tcf_action_exec(skb, n->action, res);
179 if (pol_res >= 0)
180 return pol_res;
181 } else
182 #else
183 #ifdef CONFIG_NET_CLS_POLICE
184 if (n->police) {
185 int pol_res = tcf_police(skb, n->police);
186 if (pol_res >= 0)
187 return pol_res;
188 } else
189 #endif
190 #endif
191 return 0;
193 n = n->next;
194 goto next_knode;
197 /* PUSH */
198 if (sdepth >= TC_U32_MAXDEPTH)
199 goto deadloop;
200 stack[sdepth].knode = n;
201 stack[sdepth].ptr = ptr;
202 sdepth++;
204 ht = n->ht_down;
205 sel = 0;
206 if (ht->divisor)
207 sel = ht->divisor&u32_hash_fold(*(u32*)(ptr+n->sel.hoff), &n->sel,n->fshift);
209 if (!(n->sel.flags&(TC_U32_VAROFFSET|TC_U32_OFFSET|TC_U32_EAT)))
210 goto next_ht;
212 if (n->sel.flags&(TC_U32_OFFSET|TC_U32_VAROFFSET)) {
213 off2 = n->sel.off + 3;
214 if (n->sel.flags&TC_U32_VAROFFSET)
215 off2 += ntohs(n->sel.offmask & *(u16*)(ptr+n->sel.offoff)) >>n->sel.offshift;
216 off2 &= ~3;
218 if (n->sel.flags&TC_U32_EAT) {
219 ptr += off2;
220 off2 = 0;
223 if (ptr < skb->tail)
224 goto next_ht;
227 /* POP */
228 if (sdepth--) {
229 n = stack[sdepth].knode;
230 ht = n->ht_up;
231 ptr = stack[sdepth].ptr;
232 goto check_terminal;
234 return -1;
236 deadloop:
237 if (net_ratelimit())
238 printk("cls_u32: dead loop\n");
239 return -1;
242 static __inline__ struct tc_u_hnode *
243 u32_lookup_ht(struct tc_u_common *tp_c, u32 handle)
245 struct tc_u_hnode *ht;
247 for (ht = tp_c->hlist; ht; ht = ht->next)
248 if (ht->handle == handle)
249 break;
251 return ht;
254 static __inline__ struct tc_u_knode *
255 u32_lookup_key(struct tc_u_hnode *ht, u32 handle)
257 unsigned sel;
258 struct tc_u_knode *n = NULL;
260 sel = TC_U32_HASH(handle);
261 if (sel > ht->divisor)
262 goto out;
264 for (n = ht->ht[sel]; n; n = n->next)
265 if (n->handle == handle)
266 break;
267 out:
268 return n;
272 static unsigned long u32_get(struct tcf_proto *tp, u32 handle)
274 struct tc_u_hnode *ht;
275 struct tc_u_common *tp_c = tp->data;
277 if (TC_U32_HTID(handle) == TC_U32_ROOT)
278 ht = tp->root;
279 else
280 ht = u32_lookup_ht(tp_c, TC_U32_HTID(handle));
282 if (!ht)
283 return 0;
285 if (TC_U32_KEY(handle) == 0)
286 return (unsigned long)ht;
288 return (unsigned long)u32_lookup_key(ht, handle);
291 static void u32_put(struct tcf_proto *tp, unsigned long f)
295 static u32 gen_new_htid(struct tc_u_common *tp_c)
297 int i = 0x800;
299 do {
300 if (++tp_c->hgenerator == 0x7FF)
301 tp_c->hgenerator = 1;
302 } while (--i>0 && u32_lookup_ht(tp_c, (tp_c->hgenerator|0x800)<<20));
304 return i > 0 ? (tp_c->hgenerator|0x800)<<20 : 0;
307 static int u32_init(struct tcf_proto *tp)
309 struct tc_u_hnode *root_ht;
310 struct tc_u_common *tp_c;
312 for (tp_c = u32_list; tp_c; tp_c = tp_c->next)
313 if (tp_c->q == tp->q)
314 break;
316 root_ht = kmalloc(sizeof(*root_ht), GFP_KERNEL);
317 if (root_ht == NULL)
318 return -ENOBUFS;
320 memset(root_ht, 0, sizeof(*root_ht));
321 root_ht->divisor = 0;
322 root_ht->refcnt++;
323 root_ht->handle = tp_c ? gen_new_htid(tp_c) : 0x80000000;
325 if (tp_c == NULL) {
326 tp_c = kmalloc(sizeof(*tp_c), GFP_KERNEL);
327 if (tp_c == NULL) {
328 kfree(root_ht);
329 return -ENOBUFS;
331 memset(tp_c, 0, sizeof(*tp_c));
332 tp_c->q = tp->q;
333 tp_c->next = u32_list;
334 u32_list = tp_c;
337 tp_c->refcnt++;
338 root_ht->next = tp_c->hlist;
339 tp_c->hlist = root_ht;
340 root_ht->tp_c = tp_c;
342 tp->root = root_ht;
343 tp->data = tp_c;
344 return 0;
347 static int u32_destroy_key(struct tcf_proto *tp, struct tc_u_knode *n)
349 unsigned long cl;
351 if ((cl = __cls_set_class(&n->res.class, 0)) != 0)
352 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
353 #ifdef CONFIG_NET_CLS_ACT
354 if (n->action) {
355 tcf_action_destroy(n->action, TCA_ACT_UNBIND);
357 #else
358 #ifdef CONFIG_NET_CLS_POLICE
359 tcf_police_release(n->police, TCA_ACT_UNBIND);
360 #endif
361 #endif
362 if (n->ht_down)
363 n->ht_down->refcnt--;
364 #ifdef CONFIG_CLS_U32_PERF
365 if (n && (NULL != n->pf))
366 kfree(n->pf);
367 #endif
368 kfree(n);
369 return 0;
372 static int u32_delete_key(struct tcf_proto *tp, struct tc_u_knode* key)
374 struct tc_u_knode **kp;
375 struct tc_u_hnode *ht = key->ht_up;
377 if (ht) {
378 for (kp = &ht->ht[TC_U32_HASH(key->handle)]; *kp; kp = &(*kp)->next) {
379 if (*kp == key) {
380 tcf_tree_lock(tp);
381 *kp = key->next;
382 tcf_tree_unlock(tp);
384 u32_destroy_key(tp, key);
385 return 0;
389 BUG_TRAP(0);
390 return 0;
393 static void u32_clear_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
395 struct tc_u_knode *n;
396 unsigned h;
398 for (h=0; h<=ht->divisor; h++) {
399 while ((n = ht->ht[h]) != NULL) {
400 ht->ht[h] = n->next;
402 u32_destroy_key(tp, n);
407 static int u32_destroy_hnode(struct tcf_proto *tp, struct tc_u_hnode *ht)
409 struct tc_u_common *tp_c = tp->data;
410 struct tc_u_hnode **hn;
412 BUG_TRAP(!ht->refcnt);
414 u32_clear_hnode(tp, ht);
416 for (hn = &tp_c->hlist; *hn; hn = &(*hn)->next) {
417 if (*hn == ht) {
418 *hn = ht->next;
419 kfree(ht);
420 return 0;
424 BUG_TRAP(0);
425 return -ENOENT;
428 static void u32_destroy(struct tcf_proto *tp)
430 struct tc_u_common *tp_c = tp->data;
431 struct tc_u_hnode *root_ht = xchg(&tp->root, NULL);
433 BUG_TRAP(root_ht != NULL);
435 if (root_ht && --root_ht->refcnt == 0)
436 u32_destroy_hnode(tp, root_ht);
438 if (--tp_c->refcnt == 0) {
439 struct tc_u_hnode *ht;
440 struct tc_u_common **tp_cp;
442 for (tp_cp = &u32_list; *tp_cp; tp_cp = &(*tp_cp)->next) {
443 if (*tp_cp == tp_c) {
444 *tp_cp = tp_c->next;
445 break;
449 for (ht=tp_c->hlist; ht; ht = ht->next)
450 u32_clear_hnode(tp, ht);
452 while ((ht = tp_c->hlist) != NULL) {
453 tp_c->hlist = ht->next;
455 BUG_TRAP(ht->refcnt == 0);
457 kfree(ht);
460 kfree(tp_c);
463 tp->data = NULL;
466 static int u32_delete(struct tcf_proto *tp, unsigned long arg)
468 struct tc_u_hnode *ht = (struct tc_u_hnode*)arg;
470 if (ht == NULL)
471 return 0;
473 if (TC_U32_KEY(ht->handle))
474 return u32_delete_key(tp, (struct tc_u_knode*)ht);
476 if (tp->root == ht)
477 return -EINVAL;
479 if (--ht->refcnt == 0)
480 u32_destroy_hnode(tp, ht);
482 return 0;
485 static u32 gen_new_kid(struct tc_u_hnode *ht, u32 handle)
487 struct tc_u_knode *n;
488 unsigned i = 0x7FF;
490 for (n=ht->ht[TC_U32_HASH(handle)]; n; n = n->next)
491 if (i < TC_U32_NODE(n->handle))
492 i = TC_U32_NODE(n->handle);
493 i++;
495 return handle|(i>0xFFF ? 0xFFF : i);
498 static int u32_set_parms(struct Qdisc *q, unsigned long base,
499 struct tc_u_hnode *ht,
500 struct tc_u_knode *n, struct rtattr **tb,
501 struct rtattr *est)
503 #ifdef CONFIG_NET_CLS_ACT
504 struct tc_action *act = NULL;
505 int ret;
506 #endif
507 if (tb[TCA_U32_LINK-1]) {
508 u32 handle = *(u32*)RTA_DATA(tb[TCA_U32_LINK-1]);
509 struct tc_u_hnode *ht_down = NULL;
511 if (TC_U32_KEY(handle))
512 return -EINVAL;
514 if (handle) {
515 ht_down = u32_lookup_ht(ht->tp_c, handle);
517 if (ht_down == NULL)
518 return -EINVAL;
519 ht_down->refcnt++;
522 sch_tree_lock(q);
523 ht_down = xchg(&n->ht_down, ht_down);
524 sch_tree_unlock(q);
526 if (ht_down)
527 ht_down->refcnt--;
529 if (tb[TCA_U32_CLASSID-1]) {
530 unsigned long cl;
532 n->res.classid = *(u32*)RTA_DATA(tb[TCA_U32_CLASSID-1]);
533 sch_tree_lock(q);
534 cl = __cls_set_class(&n->res.class, q->ops->cl_ops->bind_tcf(q, base, n->res.classid));
535 sch_tree_unlock(q);
536 if (cl)
537 q->ops->cl_ops->unbind_tcf(q, cl);
539 #ifdef CONFIG_NET_CLS_ACT
540 /*backward compatibility */
541 if (tb[TCA_U32_POLICE-1])
543 act = kmalloc(sizeof(*act),GFP_KERNEL);
544 if (NULL == act)
545 return -ENOMEM;
547 memset(act,0,sizeof(*act));
548 ret = tcf_action_init_1(tb[TCA_U32_POLICE-1], est,act,"police", TCA_ACT_NOREPLACE, TCA_ACT_BIND);
549 if (0 > ret){
550 tcf_action_destroy(act, TCA_ACT_UNBIND);
551 return ret;
553 act->type = TCA_OLD_COMPAT;
555 sch_tree_lock(q);
556 act = xchg(&n->action, act);
557 sch_tree_unlock(q);
559 tcf_action_destroy(act, TCA_ACT_UNBIND);
563 if(tb[TCA_U32_ACT-1]) {
564 act = kmalloc(sizeof(*act),GFP_KERNEL);
565 if (NULL == act)
566 return -ENOMEM;
567 memset(act,0,sizeof(*act));
568 ret = tcf_action_init(tb[TCA_U32_ACT-1], est,act,NULL,TCA_ACT_NOREPLACE, TCA_ACT_BIND);
569 if (0 > ret) {
570 tcf_action_destroy(act, TCA_ACT_UNBIND);
571 return ret;
574 sch_tree_lock(q);
575 act = xchg(&n->action, act);
576 sch_tree_unlock(q);
578 tcf_action_destroy(act, TCA_ACT_UNBIND);
582 #else
583 #ifdef CONFIG_NET_CLS_POLICE
584 if (tb[TCA_U32_POLICE-1]) {
585 struct tcf_police *police = tcf_police_locate(tb[TCA_U32_POLICE-1], est);
586 sch_tree_lock(q);
587 police = xchg(&n->police, police);
588 sch_tree_unlock(q);
589 tcf_police_release(police, TCA_ACT_UNBIND);
591 #endif
592 #endif
593 #ifdef CONFIG_NET_CLS_IND
594 n->indev[0] = 0;
595 if(tb[TCA_U32_INDEV-1]) {
596 struct rtattr *input_dev = tb[TCA_U32_INDEV-1];
597 if (RTA_PAYLOAD(input_dev) >= IFNAMSIZ) {
598 printk("cls_u32: bad indev name %s\n",(char*)RTA_DATA(input_dev));
599 /* should we clear state first? */
600 return -EINVAL;
602 sprintf(n->indev, "%s", (char*)RTA_DATA(input_dev));
603 printk("got IND %s\n",n->indev);
605 #endif
607 return 0;
610 static int u32_change(struct tcf_proto *tp, unsigned long base, u32 handle,
611 struct rtattr **tca,
612 unsigned long *arg)
614 struct tc_u_common *tp_c = tp->data;
615 struct tc_u_hnode *ht;
616 struct tc_u_knode *n;
617 struct tc_u32_sel *s;
618 struct rtattr *opt = tca[TCA_OPTIONS-1];
619 struct rtattr *tb[TCA_U32_MAX];
620 u32 htid;
621 int err;
623 if (opt == NULL)
624 return handle ? -EINVAL : 0;
626 if (rtattr_parse(tb, TCA_U32_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0)
627 return -EINVAL;
629 if ((n = (struct tc_u_knode*)*arg) != NULL) {
630 if (TC_U32_KEY(n->handle) == 0)
631 return -EINVAL;
633 return u32_set_parms(tp->q, base, n->ht_up, n, tb, tca[TCA_RATE-1]);
636 if (tb[TCA_U32_DIVISOR-1]) {
637 unsigned divisor = *(unsigned*)RTA_DATA(tb[TCA_U32_DIVISOR-1]);
639 if (--divisor > 0x100)
640 return -EINVAL;
641 if (TC_U32_KEY(handle))
642 return -EINVAL;
643 if (handle == 0) {
644 handle = gen_new_htid(tp->data);
645 if (handle == 0)
646 return -ENOMEM;
648 ht = kmalloc(sizeof(*ht) + divisor*sizeof(void*), GFP_KERNEL);
649 if (ht == NULL)
650 return -ENOBUFS;
651 memset(ht, 0, sizeof(*ht) + divisor*sizeof(void*));
652 ht->tp_c = tp_c;
653 ht->refcnt = 0;
654 ht->divisor = divisor;
655 ht->handle = handle;
656 ht->next = tp_c->hlist;
657 tp_c->hlist = ht;
658 *arg = (unsigned long)ht;
659 return 0;
662 if (tb[TCA_U32_HASH-1]) {
663 htid = *(unsigned*)RTA_DATA(tb[TCA_U32_HASH-1]);
664 if (TC_U32_HTID(htid) == TC_U32_ROOT) {
665 ht = tp->root;
666 htid = ht->handle;
667 } else {
668 ht = u32_lookup_ht(tp->data, TC_U32_HTID(htid));
669 if (ht == NULL)
670 return -EINVAL;
672 } else {
673 ht = tp->root;
674 htid = ht->handle;
677 if (ht->divisor < TC_U32_HASH(htid))
678 return -EINVAL;
680 if (handle) {
681 if (TC_U32_HTID(handle) && TC_U32_HTID(handle^htid))
682 return -EINVAL;
683 handle = htid | TC_U32_NODE(handle);
684 } else
685 handle = gen_new_kid(ht, htid);
687 if (tb[TCA_U32_SEL-1] == 0 ||
688 RTA_PAYLOAD(tb[TCA_U32_SEL-1]) < sizeof(struct tc_u32_sel))
689 return -EINVAL;
691 s = RTA_DATA(tb[TCA_U32_SEL-1]);
693 n = kmalloc(sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key), GFP_KERNEL);
694 if (n == NULL)
695 return -ENOBUFS;
697 memset(n, 0, sizeof(*n) + s->nkeys*sizeof(struct tc_u32_key));
698 #ifdef CONFIG_CLS_U32_PERF
699 n->pf = kmalloc(sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64), GFP_KERNEL);
700 if (n->pf == NULL) {
701 kfree(n);
702 return -ENOBUFS;
704 memset(n->pf, 0, sizeof(struct tc_u32_pcnt) + s->nkeys*sizeof(__u64));
705 #endif
707 memcpy(&n->sel, s, sizeof(*s) + s->nkeys*sizeof(struct tc_u32_key));
708 n->ht_up = ht;
709 n->handle = handle;
711 u8 i = 0;
712 u32 mask = s->hmask;
713 if (mask) {
714 while (!(mask & 1)) {
715 i++;
716 mask>>=1;
719 n->fshift = i;
721 err = u32_set_parms(tp->q, base, ht, n, tb, tca[TCA_RATE-1]);
722 if (err == 0) {
723 struct tc_u_knode **ins;
724 for (ins = &ht->ht[TC_U32_HASH(handle)]; *ins; ins = &(*ins)->next)
725 if (TC_U32_NODE(handle) < TC_U32_NODE((*ins)->handle))
726 break;
728 n->next = *ins;
729 wmb();
730 *ins = n;
732 *arg = (unsigned long)n;
733 return 0;
735 #ifdef CONFIG_CLS_U32_PERF
736 if (n && (NULL != n->pf))
737 kfree(n->pf);
738 #endif
739 kfree(n);
740 return err;
743 static void u32_walk(struct tcf_proto *tp, struct tcf_walker *arg)
745 struct tc_u_common *tp_c = tp->data;
746 struct tc_u_hnode *ht;
747 struct tc_u_knode *n;
748 unsigned h;
750 if (arg->stop)
751 return;
753 for (ht = tp_c->hlist; ht; ht = ht->next) {
754 if (arg->count >= arg->skip) {
755 if (arg->fn(tp, (unsigned long)ht, arg) < 0) {
756 arg->stop = 1;
757 return;
760 arg->count++;
761 for (h = 0; h <= ht->divisor; h++) {
762 for (n = ht->ht[h]; n; n = n->next) {
763 if (arg->count < arg->skip) {
764 arg->count++;
765 continue;
767 if (arg->fn(tp, (unsigned long)n, arg) < 0) {
768 arg->stop = 1;
769 return;
771 arg->count++;
777 static int u32_dump(struct tcf_proto *tp, unsigned long fh,
778 struct sk_buff *skb, struct tcmsg *t)
780 struct tc_u_knode *n = (struct tc_u_knode*)fh;
781 unsigned char *b = skb->tail;
782 struct rtattr *rta;
784 if (n == NULL)
785 return skb->len;
787 t->tcm_handle = n->handle;
789 rta = (struct rtattr*)b;
790 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
792 if (TC_U32_KEY(n->handle) == 0) {
793 struct tc_u_hnode *ht = (struct tc_u_hnode*)fh;
794 u32 divisor = ht->divisor+1;
795 RTA_PUT(skb, TCA_U32_DIVISOR, 4, &divisor);
796 } else {
797 RTA_PUT(skb, TCA_U32_SEL,
798 sizeof(n->sel) + n->sel.nkeys*sizeof(struct tc_u32_key),
799 &n->sel);
800 if (n->ht_up) {
801 u32 htid = n->handle & 0xFFFFF000;
802 RTA_PUT(skb, TCA_U32_HASH, 4, &htid);
804 if (n->res.classid)
805 RTA_PUT(skb, TCA_U32_CLASSID, 4, &n->res.classid);
806 if (n->ht_down)
807 RTA_PUT(skb, TCA_U32_LINK, 4, &n->ht_down->handle);
808 #ifdef CONFIG_NET_CLS_ACT
809 /* again for backward compatible mode - we want
810 * to work with both old and new modes of entering
811 * tc data even if iproute2 was newer - jhs
813 if (n->action) {
814 struct rtattr * p_rta = (struct rtattr*)skb->tail;
816 if (n->action->type != TCA_OLD_COMPAT) {
817 RTA_PUT(skb, TCA_U32_ACT, 0, NULL);
818 if (tcf_action_dump(skb,n->action, 0, 0) < 0) {
819 goto rtattr_failure;
821 } else {
822 RTA_PUT(skb, TCA_U32_POLICE, 0, NULL);
823 if (tcf_action_dump_old(skb,n->action,0,0) < 0) {
824 goto rtattr_failure;
828 p_rta->rta_len = skb->tail - (u8*)p_rta;
831 #else
832 #ifdef CONFIG_NET_CLS_POLICE
833 if (n->police) {
834 struct rtattr * p_rta = (struct rtattr*)skb->tail;
835 RTA_PUT(skb, TCA_U32_POLICE, 0, NULL);
837 if (tcf_police_dump(skb, n->police) < 0)
838 goto rtattr_failure;
840 p_rta->rta_len = skb->tail - (u8*)p_rta;
843 #endif
844 #endif
846 #ifdef CONFIG_NET_CLS_IND
847 if(strlen(n->indev)) {
848 struct rtattr * p_rta = (struct rtattr*)skb->tail;
849 RTA_PUT(skb, TCA_U32_INDEV, IFNAMSIZ, n->indev);
850 p_rta->rta_len = skb->tail - (u8*)p_rta;
852 #endif
853 #ifdef CONFIG_CLS_U32_PERF
854 RTA_PUT(skb, TCA_U32_PCNT,
855 sizeof(struct tc_u32_pcnt) + n->sel.nkeys*sizeof(__u64),
856 n->pf);
857 #endif
860 rta->rta_len = skb->tail - b;
861 #ifdef CONFIG_NET_CLS_ACT
862 if (TC_U32_KEY(n->handle) != 0) {
863 if (TC_U32_KEY(n->handle) && n->action && n->action->type == TCA_OLD_COMPAT) {
864 if (tcf_action_copy_stats(skb,n->action))
865 goto rtattr_failure;
868 #else
869 #ifdef CONFIG_NET_CLS_POLICE
870 if (TC_U32_KEY(n->handle) && n->police) {
871 if (qdisc_copy_stats(skb, &n->police->stats,
872 n->police->stats_lock))
873 goto rtattr_failure;
875 #endif
876 #endif
877 return skb->len;
879 rtattr_failure:
880 skb_trim(skb, b - skb->data);
881 return -1;
884 static struct tcf_proto_ops cls_u32_ops = {
885 .next = NULL,
886 .kind = "u32",
887 .classify = u32_classify,
888 .init = u32_init,
889 .destroy = u32_destroy,
890 .get = u32_get,
891 .put = u32_put,
892 .change = u32_change,
893 .delete = u32_delete,
894 .walk = u32_walk,
895 .dump = u32_dump,
896 .owner = THIS_MODULE,
899 static int __init init_u32(void)
901 printk("u32 classifier\n");
902 #ifdef CONFIG_CLS_U32_PERF
903 printk(" Perfomance counters on\n");
904 #endif
905 #ifdef CONFIG_NET_CLS_POLICE
906 printk(" OLD policer on \n");
907 #endif
908 #ifdef CONFIG_NET_CLS_IND
909 printk(" input device check on \n");
910 #endif
911 #ifdef CONFIG_NET_CLS_ACT
912 printk(" Actions configured \n");
913 #endif
914 return register_tcf_proto_ops(&cls_u32_ops);
917 static void __exit exit_u32(void)
919 unregister_tcf_proto_ops(&cls_u32_ops);
922 module_init(init_u32)
923 module_exit(exit_u32)
924 MODULE_LICENSE("GPL");