[NET_SCHED]: Set parent classid in default qdiscs
[linux-2.6.22.y-op.git] / net / sched / cls_route.c
blobd3aea730d4c85d923cda240af52272d91cf6e5da
1 /*
2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
20 #include <linux/mm.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
23 #include <linux/in.h>
24 #include <linux/errno.h>
25 #include <linux/interrupt.h>
26 #include <linux/if_ether.h>
27 #include <linux/inet.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/notifier.h>
31 #include <net/ip.h>
32 #include <net/route.h>
33 #include <linux/skbuff.h>
34 #include <net/sock.h>
35 #include <net/act_api.h>
36 #include <net/pkt_cls.h>
39 1. For now we assume that route tags < 256.
40 It allows to use direct table lookups, instead of hash tables.
41 2. For now we assume that "from TAG" and "fromdev DEV" statements
42 are mutually exclusive.
43 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
46 struct route4_fastmap
48 struct route4_filter *filter;
49 u32 id;
50 int iif;
53 struct route4_head
55 struct route4_fastmap fastmap[16];
56 struct route4_bucket *table[256+1];
59 struct route4_bucket
61 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
62 struct route4_filter *ht[16+16+1];
65 struct route4_filter
67 struct route4_filter *next;
68 u32 id;
69 int iif;
71 struct tcf_result res;
72 struct tcf_exts exts;
73 u32 handle;
74 struct route4_bucket *bkt;
77 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
79 static struct tcf_ext_map route_ext_map = {
80 .police = TCA_ROUTE4_POLICE,
81 .action = TCA_ROUTE4_ACT
84 static __inline__ int route4_fastmap_hash(u32 id, int iif)
86 return id&0xF;
89 static inline
90 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
92 spin_lock_bh(&dev->queue_lock);
93 memset(head->fastmap, 0, sizeof(head->fastmap));
94 spin_unlock_bh(&dev->queue_lock);
97 static void __inline__
98 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
99 struct route4_filter *f)
101 int h = route4_fastmap_hash(id, iif);
102 head->fastmap[h].id = id;
103 head->fastmap[h].iif = iif;
104 head->fastmap[h].filter = f;
107 static __inline__ int route4_hash_to(u32 id)
109 return id&0xFF;
112 static __inline__ int route4_hash_from(u32 id)
114 return (id>>16)&0xF;
117 static __inline__ int route4_hash_iif(int iif)
119 return 16 + ((iif>>16)&0xF);
122 static __inline__ int route4_hash_wild(void)
124 return 32;
127 #define ROUTE4_APPLY_RESULT() \
129 *res = f->res; \
130 if (tcf_exts_is_available(&f->exts)) { \
131 int r = tcf_exts_exec(skb, &f->exts, res); \
132 if (r < 0) { \
133 dont_cache = 1; \
134 continue; \
136 return r; \
137 } else if (!dont_cache) \
138 route4_set_fastmap(head, id, iif, f); \
139 return 0; \
142 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
143 struct tcf_result *res)
145 struct route4_head *head = (struct route4_head*)tp->root;
146 struct dst_entry *dst;
147 struct route4_bucket *b;
148 struct route4_filter *f;
149 u32 id, h;
150 int iif, dont_cache = 0;
152 if ((dst = skb->dst) == NULL)
153 goto failure;
155 id = dst->tclassid;
156 if (head == NULL)
157 goto old_method;
159 iif = ((struct rtable*)dst)->fl.iif;
161 h = route4_fastmap_hash(id, iif);
162 if (id == head->fastmap[h].id &&
163 iif == head->fastmap[h].iif &&
164 (f = head->fastmap[h].filter) != NULL) {
165 if (f == ROUTE4_FAILURE)
166 goto failure;
168 *res = f->res;
169 return 0;
172 h = route4_hash_to(id);
174 restart:
175 if ((b = head->table[h]) != NULL) {
176 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
177 if (f->id == id)
178 ROUTE4_APPLY_RESULT();
180 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
181 if (f->iif == iif)
182 ROUTE4_APPLY_RESULT();
184 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
185 ROUTE4_APPLY_RESULT();
188 if (h < 256) {
189 h = 256;
190 id &= ~0xFFFF;
191 goto restart;
194 if (!dont_cache)
195 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
196 failure:
197 return -1;
199 old_method:
200 if (id && (TC_H_MAJ(id) == 0 ||
201 !(TC_H_MAJ(id^tp->q->handle)))) {
202 res->classid = id;
203 res->class = 0;
204 return 0;
206 return -1;
209 static inline u32 to_hash(u32 id)
211 u32 h = id&0xFF;
212 if (id&0x8000)
213 h += 256;
214 return h;
217 static inline u32 from_hash(u32 id)
219 id &= 0xFFFF;
220 if (id == 0xFFFF)
221 return 32;
222 if (!(id & 0x8000)) {
223 if (id > 255)
224 return 256;
225 return id&0xF;
227 return 16 + (id&0xF);
230 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
232 struct route4_head *head = (struct route4_head*)tp->root;
233 struct route4_bucket *b;
234 struct route4_filter *f;
235 unsigned h1, h2;
237 if (!head)
238 return 0;
240 h1 = to_hash(handle);
241 if (h1 > 256)
242 return 0;
244 h2 = from_hash(handle>>16);
245 if (h2 > 32)
246 return 0;
248 if ((b = head->table[h1]) != NULL) {
249 for (f = b->ht[h2]; f; f = f->next)
250 if (f->handle == handle)
251 return (unsigned long)f;
253 return 0;
256 static void route4_put(struct tcf_proto *tp, unsigned long f)
260 static int route4_init(struct tcf_proto *tp)
262 return 0;
265 static inline void
266 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
268 tcf_unbind_filter(tp, &f->res);
269 tcf_exts_destroy(tp, &f->exts);
270 kfree(f);
273 static void route4_destroy(struct tcf_proto *tp)
275 struct route4_head *head = xchg(&tp->root, NULL);
276 int h1, h2;
278 if (head == NULL)
279 return;
281 for (h1=0; h1<=256; h1++) {
282 struct route4_bucket *b;
284 if ((b = head->table[h1]) != NULL) {
285 for (h2=0; h2<=32; h2++) {
286 struct route4_filter *f;
288 while ((f = b->ht[h2]) != NULL) {
289 b->ht[h2] = f->next;
290 route4_delete_filter(tp, f);
293 kfree(b);
296 kfree(head);
299 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
301 struct route4_head *head = (struct route4_head*)tp->root;
302 struct route4_filter **fp, *f = (struct route4_filter*)arg;
303 unsigned h = 0;
304 struct route4_bucket *b;
305 int i;
307 if (!head || !f)
308 return -EINVAL;
310 h = f->handle;
311 b = f->bkt;
313 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
314 if (*fp == f) {
315 tcf_tree_lock(tp);
316 *fp = f->next;
317 tcf_tree_unlock(tp);
319 route4_reset_fastmap(tp->q->dev, head, f->id);
320 route4_delete_filter(tp, f);
322 /* Strip tree */
324 for (i=0; i<=32; i++)
325 if (b->ht[i])
326 return 0;
328 /* OK, session has no flows */
329 tcf_tree_lock(tp);
330 head->table[to_hash(h)] = NULL;
331 tcf_tree_unlock(tp);
333 kfree(b);
334 return 0;
337 return 0;
340 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
341 struct route4_filter *f, u32 handle, struct route4_head *head,
342 struct rtattr **tb, struct rtattr *est, int new)
344 int err;
345 u32 id = 0, to = 0, nhandle = 0x8000;
346 struct route4_filter *fp;
347 unsigned int h1;
348 struct route4_bucket *b;
349 struct tcf_exts e;
351 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
352 if (err < 0)
353 return err;
355 err = -EINVAL;
356 if (tb[TCA_ROUTE4_CLASSID-1])
357 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
358 goto errout;
360 if (tb[TCA_ROUTE4_TO-1]) {
361 if (new && handle & 0x8000)
362 goto errout;
363 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
364 goto errout;
365 to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
366 if (to > 0xFF)
367 goto errout;
368 nhandle = to;
371 if (tb[TCA_ROUTE4_FROM-1]) {
372 if (tb[TCA_ROUTE4_IIF-1])
373 goto errout;
374 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
375 goto errout;
376 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
377 if (id > 0xFF)
378 goto errout;
379 nhandle |= id << 16;
380 } else if (tb[TCA_ROUTE4_IIF-1]) {
381 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
382 goto errout;
383 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
384 if (id > 0x7FFF)
385 goto errout;
386 nhandle |= (id | 0x8000) << 16;
387 } else
388 nhandle |= 0xFFFF << 16;
390 if (handle && new) {
391 nhandle |= handle & 0x7F00;
392 if (nhandle != handle)
393 goto errout;
396 h1 = to_hash(nhandle);
397 if ((b = head->table[h1]) == NULL) {
398 err = -ENOBUFS;
399 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
400 if (b == NULL)
401 goto errout;
403 tcf_tree_lock(tp);
404 head->table[h1] = b;
405 tcf_tree_unlock(tp);
406 } else {
407 unsigned int h2 = from_hash(nhandle >> 16);
408 err = -EEXIST;
409 for (fp = b->ht[h2]; fp; fp = fp->next)
410 if (fp->handle == f->handle)
411 goto errout;
414 tcf_tree_lock(tp);
415 if (tb[TCA_ROUTE4_TO-1])
416 f->id = to;
418 if (tb[TCA_ROUTE4_FROM-1])
419 f->id = to | id<<16;
420 else if (tb[TCA_ROUTE4_IIF-1])
421 f->iif = id;
423 f->handle = nhandle;
424 f->bkt = b;
425 tcf_tree_unlock(tp);
427 if (tb[TCA_ROUTE4_CLASSID-1]) {
428 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
429 tcf_bind_filter(tp, &f->res, base);
432 tcf_exts_change(tp, &f->exts, &e);
434 return 0;
435 errout:
436 tcf_exts_destroy(tp, &e);
437 return err;
440 static int route4_change(struct tcf_proto *tp, unsigned long base,
441 u32 handle,
442 struct rtattr **tca,
443 unsigned long *arg)
445 struct route4_head *head = tp->root;
446 struct route4_filter *f, *f1, **fp;
447 struct route4_bucket *b;
448 struct rtattr *opt = tca[TCA_OPTIONS-1];
449 struct rtattr *tb[TCA_ROUTE4_MAX];
450 unsigned int h, th;
451 u32 old_handle = 0;
452 int err;
454 if (opt == NULL)
455 return handle ? -EINVAL : 0;
457 if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
458 return -EINVAL;
460 if ((f = (struct route4_filter*)*arg) != NULL) {
461 if (f->handle != handle && handle)
462 return -EINVAL;
464 if (f->bkt)
465 old_handle = f->handle;
467 err = route4_set_parms(tp, base, f, handle, head, tb,
468 tca[TCA_RATE-1], 0);
469 if (err < 0)
470 return err;
472 goto reinsert;
475 err = -ENOBUFS;
476 if (head == NULL) {
477 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
478 if (head == NULL)
479 goto errout;
481 tcf_tree_lock(tp);
482 tp->root = head;
483 tcf_tree_unlock(tp);
486 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
487 if (f == NULL)
488 goto errout;
490 err = route4_set_parms(tp, base, f, handle, head, tb,
491 tca[TCA_RATE-1], 1);
492 if (err < 0)
493 goto errout;
495 reinsert:
496 h = from_hash(f->handle >> 16);
497 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
498 if (f->handle < f1->handle)
499 break;
501 f->next = f1;
502 tcf_tree_lock(tp);
503 *fp = f;
505 if (old_handle && f->handle != old_handle) {
506 th = to_hash(old_handle);
507 h = from_hash(old_handle >> 16);
508 if ((b = head->table[th]) != NULL) {
509 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
510 if (*fp == f) {
511 *fp = f->next;
512 break;
517 tcf_tree_unlock(tp);
519 route4_reset_fastmap(tp->q->dev, head, f->id);
520 *arg = (unsigned long)f;
521 return 0;
523 errout:
524 kfree(f);
525 return err;
528 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
530 struct route4_head *head = tp->root;
531 unsigned h, h1;
533 if (head == NULL)
534 arg->stop = 1;
536 if (arg->stop)
537 return;
539 for (h = 0; h <= 256; h++) {
540 struct route4_bucket *b = head->table[h];
542 if (b) {
543 for (h1 = 0; h1 <= 32; h1++) {
544 struct route4_filter *f;
546 for (f = b->ht[h1]; f; f = f->next) {
547 if (arg->count < arg->skip) {
548 arg->count++;
549 continue;
551 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
552 arg->stop = 1;
553 return;
555 arg->count++;
562 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
563 struct sk_buff *skb, struct tcmsg *t)
565 struct route4_filter *f = (struct route4_filter*)fh;
566 unsigned char *b = skb->tail;
567 struct rtattr *rta;
568 u32 id;
570 if (f == NULL)
571 return skb->len;
573 t->tcm_handle = f->handle;
575 rta = (struct rtattr*)b;
576 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
578 if (!(f->handle&0x8000)) {
579 id = f->id&0xFF;
580 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
582 if (f->handle&0x80000000) {
583 if ((f->handle>>16) != 0xFFFF)
584 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
585 } else {
586 id = f->id>>16;
587 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
589 if (f->res.classid)
590 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
592 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
593 goto rtattr_failure;
595 rta->rta_len = skb->tail - b;
597 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
598 goto rtattr_failure;
600 return skb->len;
602 rtattr_failure:
603 skb_trim(skb, b - skb->data);
604 return -1;
607 static struct tcf_proto_ops cls_route4_ops = {
608 .next = NULL,
609 .kind = "route",
610 .classify = route4_classify,
611 .init = route4_init,
612 .destroy = route4_destroy,
613 .get = route4_get,
614 .put = route4_put,
615 .change = route4_change,
616 .delete = route4_delete,
617 .walk = route4_walk,
618 .dump = route4_dump,
619 .owner = THIS_MODULE,
622 static int __init init_route4(void)
624 return register_tcf_proto_ops(&cls_route4_ops);
627 static void __exit exit_route4(void)
629 unregister_tcf_proto_ops(&cls_route4_ops);
632 module_init(init_route4)
633 module_exit(exit_route4)
634 MODULE_LICENSE("GPL");