[NET_SCHED]: sch_htb: use hrtimer based watchdog
[linux-2.6.22.y-op.git] / net / sched / cls_route.c
blobabc47cc48ad00b8d9a404fa4219b97aef7162dae
1 /*
2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/mm.h>
20 #include <linux/socket.h>
21 #include <linux/sockios.h>
22 #include <linux/in.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/if_ether.h>
26 #include <linux/inet.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/notifier.h>
30 #include <net/ip.h>
31 #include <net/route.h>
32 #include <linux/skbuff.h>
33 #include <net/sock.h>
34 #include <net/act_api.h>
35 #include <net/pkt_cls.h>
38 1. For now we assume that route tags < 256.
39 It allows to use direct table lookups, instead of hash tables.
40 2. For now we assume that "from TAG" and "fromdev DEV" statements
41 are mutually exclusive.
42 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
45 struct route4_fastmap
47 struct route4_filter *filter;
48 u32 id;
49 int iif;
52 struct route4_head
54 struct route4_fastmap fastmap[16];
55 struct route4_bucket *table[256+1];
58 struct route4_bucket
60 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
61 struct route4_filter *ht[16+16+1];
64 struct route4_filter
66 struct route4_filter *next;
67 u32 id;
68 int iif;
70 struct tcf_result res;
71 struct tcf_exts exts;
72 u32 handle;
73 struct route4_bucket *bkt;
76 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
78 static struct tcf_ext_map route_ext_map = {
79 .police = TCA_ROUTE4_POLICE,
80 .action = TCA_ROUTE4_ACT
83 static __inline__ int route4_fastmap_hash(u32 id, int iif)
85 return id&0xF;
88 static inline
89 void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
91 spin_lock_bh(&dev->queue_lock);
92 memset(head->fastmap, 0, sizeof(head->fastmap));
93 spin_unlock_bh(&dev->queue_lock);
96 static inline void
97 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
98 struct route4_filter *f)
100 int h = route4_fastmap_hash(id, iif);
101 head->fastmap[h].id = id;
102 head->fastmap[h].iif = iif;
103 head->fastmap[h].filter = f;
106 static __inline__ int route4_hash_to(u32 id)
108 return id&0xFF;
111 static __inline__ int route4_hash_from(u32 id)
113 return (id>>16)&0xF;
116 static __inline__ int route4_hash_iif(int iif)
118 return 16 + ((iif>>16)&0xF);
121 static __inline__ int route4_hash_wild(void)
123 return 32;
126 #define ROUTE4_APPLY_RESULT() \
128 *res = f->res; \
129 if (tcf_exts_is_available(&f->exts)) { \
130 int r = tcf_exts_exec(skb, &f->exts, res); \
131 if (r < 0) { \
132 dont_cache = 1; \
133 continue; \
135 return r; \
136 } else if (!dont_cache) \
137 route4_set_fastmap(head, id, iif, f); \
138 return 0; \
141 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
142 struct tcf_result *res)
144 struct route4_head *head = (struct route4_head*)tp->root;
145 struct dst_entry *dst;
146 struct route4_bucket *b;
147 struct route4_filter *f;
148 u32 id, h;
149 int iif, dont_cache = 0;
151 if ((dst = skb->dst) == NULL)
152 goto failure;
154 id = dst->tclassid;
155 if (head == NULL)
156 goto old_method;
158 iif = ((struct rtable*)dst)->fl.iif;
160 h = route4_fastmap_hash(id, iif);
161 if (id == head->fastmap[h].id &&
162 iif == head->fastmap[h].iif &&
163 (f = head->fastmap[h].filter) != NULL) {
164 if (f == ROUTE4_FAILURE)
165 goto failure;
167 *res = f->res;
168 return 0;
171 h = route4_hash_to(id);
173 restart:
174 if ((b = head->table[h]) != NULL) {
175 for (f = b->ht[route4_hash_from(id)]; f; f = f->next)
176 if (f->id == id)
177 ROUTE4_APPLY_RESULT();
179 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next)
180 if (f->iif == iif)
181 ROUTE4_APPLY_RESULT();
183 for (f = b->ht[route4_hash_wild()]; f; f = f->next)
184 ROUTE4_APPLY_RESULT();
187 if (h < 256) {
188 h = 256;
189 id &= ~0xFFFF;
190 goto restart;
193 if (!dont_cache)
194 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
195 failure:
196 return -1;
198 old_method:
199 if (id && (TC_H_MAJ(id) == 0 ||
200 !(TC_H_MAJ(id^tp->q->handle)))) {
201 res->classid = id;
202 res->class = 0;
203 return 0;
205 return -1;
208 static inline u32 to_hash(u32 id)
210 u32 h = id&0xFF;
211 if (id&0x8000)
212 h += 256;
213 return h;
216 static inline u32 from_hash(u32 id)
218 id &= 0xFFFF;
219 if (id == 0xFFFF)
220 return 32;
221 if (!(id & 0x8000)) {
222 if (id > 255)
223 return 256;
224 return id&0xF;
226 return 16 + (id&0xF);
229 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
231 struct route4_head *head = (struct route4_head*)tp->root;
232 struct route4_bucket *b;
233 struct route4_filter *f;
234 unsigned h1, h2;
236 if (!head)
237 return 0;
239 h1 = to_hash(handle);
240 if (h1 > 256)
241 return 0;
243 h2 = from_hash(handle>>16);
244 if (h2 > 32)
245 return 0;
247 if ((b = head->table[h1]) != NULL) {
248 for (f = b->ht[h2]; f; f = f->next)
249 if (f->handle == handle)
250 return (unsigned long)f;
252 return 0;
255 static void route4_put(struct tcf_proto *tp, unsigned long f)
259 static int route4_init(struct tcf_proto *tp)
261 return 0;
264 static inline void
265 route4_delete_filter(struct tcf_proto *tp, struct route4_filter *f)
267 tcf_unbind_filter(tp, &f->res);
268 tcf_exts_destroy(tp, &f->exts);
269 kfree(f);
272 static void route4_destroy(struct tcf_proto *tp)
274 struct route4_head *head = xchg(&tp->root, NULL);
275 int h1, h2;
277 if (head == NULL)
278 return;
280 for (h1=0; h1<=256; h1++) {
281 struct route4_bucket *b;
283 if ((b = head->table[h1]) != NULL) {
284 for (h2=0; h2<=32; h2++) {
285 struct route4_filter *f;
287 while ((f = b->ht[h2]) != NULL) {
288 b->ht[h2] = f->next;
289 route4_delete_filter(tp, f);
292 kfree(b);
295 kfree(head);
298 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
300 struct route4_head *head = (struct route4_head*)tp->root;
301 struct route4_filter **fp, *f = (struct route4_filter*)arg;
302 unsigned h = 0;
303 struct route4_bucket *b;
304 int i;
306 if (!head || !f)
307 return -EINVAL;
309 h = f->handle;
310 b = f->bkt;
312 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
313 if (*fp == f) {
314 tcf_tree_lock(tp);
315 *fp = f->next;
316 tcf_tree_unlock(tp);
318 route4_reset_fastmap(tp->q->dev, head, f->id);
319 route4_delete_filter(tp, f);
321 /* Strip tree */
323 for (i=0; i<=32; i++)
324 if (b->ht[i])
325 return 0;
327 /* OK, session has no flows */
328 tcf_tree_lock(tp);
329 head->table[to_hash(h)] = NULL;
330 tcf_tree_unlock(tp);
332 kfree(b);
333 return 0;
336 return 0;
339 static int route4_set_parms(struct tcf_proto *tp, unsigned long base,
340 struct route4_filter *f, u32 handle, struct route4_head *head,
341 struct rtattr **tb, struct rtattr *est, int new)
343 int err;
344 u32 id = 0, to = 0, nhandle = 0x8000;
345 struct route4_filter *fp;
346 unsigned int h1;
347 struct route4_bucket *b;
348 struct tcf_exts e;
350 err = tcf_exts_validate(tp, tb, est, &e, &route_ext_map);
351 if (err < 0)
352 return err;
354 err = -EINVAL;
355 if (tb[TCA_ROUTE4_CLASSID-1])
356 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < sizeof(u32))
357 goto errout;
359 if (tb[TCA_ROUTE4_TO-1]) {
360 if (new && handle & 0x8000)
361 goto errout;
362 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < sizeof(u32))
363 goto errout;
364 to = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
365 if (to > 0xFF)
366 goto errout;
367 nhandle = to;
370 if (tb[TCA_ROUTE4_FROM-1]) {
371 if (tb[TCA_ROUTE4_IIF-1])
372 goto errout;
373 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < sizeof(u32))
374 goto errout;
375 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]);
376 if (id > 0xFF)
377 goto errout;
378 nhandle |= id << 16;
379 } else if (tb[TCA_ROUTE4_IIF-1]) {
380 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < sizeof(u32))
381 goto errout;
382 id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
383 if (id > 0x7FFF)
384 goto errout;
385 nhandle |= (id | 0x8000) << 16;
386 } else
387 nhandle |= 0xFFFF << 16;
389 if (handle && new) {
390 nhandle |= handle & 0x7F00;
391 if (nhandle != handle)
392 goto errout;
395 h1 = to_hash(nhandle);
396 if ((b = head->table[h1]) == NULL) {
397 err = -ENOBUFS;
398 b = kzalloc(sizeof(struct route4_bucket), GFP_KERNEL);
399 if (b == NULL)
400 goto errout;
402 tcf_tree_lock(tp);
403 head->table[h1] = b;
404 tcf_tree_unlock(tp);
405 } else {
406 unsigned int h2 = from_hash(nhandle >> 16);
407 err = -EEXIST;
408 for (fp = b->ht[h2]; fp; fp = fp->next)
409 if (fp->handle == f->handle)
410 goto errout;
413 tcf_tree_lock(tp);
414 if (tb[TCA_ROUTE4_TO-1])
415 f->id = to;
417 if (tb[TCA_ROUTE4_FROM-1])
418 f->id = to | id<<16;
419 else if (tb[TCA_ROUTE4_IIF-1])
420 f->iif = id;
422 f->handle = nhandle;
423 f->bkt = b;
424 tcf_tree_unlock(tp);
426 if (tb[TCA_ROUTE4_CLASSID-1]) {
427 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
428 tcf_bind_filter(tp, &f->res, base);
431 tcf_exts_change(tp, &f->exts, &e);
433 return 0;
434 errout:
435 tcf_exts_destroy(tp, &e);
436 return err;
439 static int route4_change(struct tcf_proto *tp, unsigned long base,
440 u32 handle,
441 struct rtattr **tca,
442 unsigned long *arg)
444 struct route4_head *head = tp->root;
445 struct route4_filter *f, *f1, **fp;
446 struct route4_bucket *b;
447 struct rtattr *opt = tca[TCA_OPTIONS-1];
448 struct rtattr *tb[TCA_ROUTE4_MAX];
449 unsigned int h, th;
450 u32 old_handle = 0;
451 int err;
453 if (opt == NULL)
454 return handle ? -EINVAL : 0;
456 if (rtattr_parse_nested(tb, TCA_ROUTE4_MAX, opt) < 0)
457 return -EINVAL;
459 if ((f = (struct route4_filter*)*arg) != NULL) {
460 if (f->handle != handle && handle)
461 return -EINVAL;
463 if (f->bkt)
464 old_handle = f->handle;
466 err = route4_set_parms(tp, base, f, handle, head, tb,
467 tca[TCA_RATE-1], 0);
468 if (err < 0)
469 return err;
471 goto reinsert;
474 err = -ENOBUFS;
475 if (head == NULL) {
476 head = kzalloc(sizeof(struct route4_head), GFP_KERNEL);
477 if (head == NULL)
478 goto errout;
480 tcf_tree_lock(tp);
481 tp->root = head;
482 tcf_tree_unlock(tp);
485 f = kzalloc(sizeof(struct route4_filter), GFP_KERNEL);
486 if (f == NULL)
487 goto errout;
489 err = route4_set_parms(tp, base, f, handle, head, tb,
490 tca[TCA_RATE-1], 1);
491 if (err < 0)
492 goto errout;
494 reinsert:
495 h = from_hash(f->handle >> 16);
496 for (fp = &f->bkt->ht[h]; (f1=*fp) != NULL; fp = &f1->next)
497 if (f->handle < f1->handle)
498 break;
500 f->next = f1;
501 tcf_tree_lock(tp);
502 *fp = f;
504 if (old_handle && f->handle != old_handle) {
505 th = to_hash(old_handle);
506 h = from_hash(old_handle >> 16);
507 if ((b = head->table[th]) != NULL) {
508 for (fp = &b->ht[h]; *fp; fp = &(*fp)->next) {
509 if (*fp == f) {
510 *fp = f->next;
511 break;
516 tcf_tree_unlock(tp);
518 route4_reset_fastmap(tp->q->dev, head, f->id);
519 *arg = (unsigned long)f;
520 return 0;
522 errout:
523 kfree(f);
524 return err;
527 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
529 struct route4_head *head = tp->root;
530 unsigned h, h1;
532 if (head == NULL)
533 arg->stop = 1;
535 if (arg->stop)
536 return;
538 for (h = 0; h <= 256; h++) {
539 struct route4_bucket *b = head->table[h];
541 if (b) {
542 for (h1 = 0; h1 <= 32; h1++) {
543 struct route4_filter *f;
545 for (f = b->ht[h1]; f; f = f->next) {
546 if (arg->count < arg->skip) {
547 arg->count++;
548 continue;
550 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
551 arg->stop = 1;
552 return;
554 arg->count++;
561 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
562 struct sk_buff *skb, struct tcmsg *t)
564 struct route4_filter *f = (struct route4_filter*)fh;
565 unsigned char *b = skb->tail;
566 struct rtattr *rta;
567 u32 id;
569 if (f == NULL)
570 return skb->len;
572 t->tcm_handle = f->handle;
574 rta = (struct rtattr*)b;
575 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
577 if (!(f->handle&0x8000)) {
578 id = f->id&0xFF;
579 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
581 if (f->handle&0x80000000) {
582 if ((f->handle>>16) != 0xFFFF)
583 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
584 } else {
585 id = f->id>>16;
586 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
588 if (f->res.classid)
589 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
591 if (tcf_exts_dump(skb, &f->exts, &route_ext_map) < 0)
592 goto rtattr_failure;
594 rta->rta_len = skb->tail - b;
596 if (tcf_exts_dump_stats(skb, &f->exts, &route_ext_map) < 0)
597 goto rtattr_failure;
599 return skb->len;
601 rtattr_failure:
602 skb_trim(skb, b - skb->data);
603 return -1;
606 static struct tcf_proto_ops cls_route4_ops = {
607 .next = NULL,
608 .kind = "route",
609 .classify = route4_classify,
610 .init = route4_init,
611 .destroy = route4_destroy,
612 .get = route4_get,
613 .put = route4_put,
614 .change = route4_change,
615 .delete = route4_delete,
616 .walk = route4_walk,
617 .dump = route4_dump,
618 .owner = THIS_MODULE,
621 static int __init init_route4(void)
623 return register_tcf_proto_ops(&cls_route4_ops);
626 static void __exit exit_route4(void)
628 unregister_tcf_proto_ops(&cls_route4_ops);
631 module_init(init_route4)
632 module_exit(exit_route4)
633 MODULE_LICENSE("GPL");