Kill st_fstype member.
[linux-2.6/linux-mips.git] / net / sched / cls_route.c
blob3d90c2538c847738564bc78d7c9a884c21c8b990
1 /*
2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/config.h>
14 #include <asm/uaccess.h>
15 #include <asm/system.h>
16 #include <asm/bitops.h>
17 #include <linux/types.h>
18 #include <linux/kernel.h>
19 #include <linux/sched.h>
20 #include <linux/string.h>
21 #include <linux/mm.h>
22 #include <linux/socket.h>
23 #include <linux/sockios.h>
24 #include <linux/in.h>
25 #include <linux/errno.h>
26 #include <linux/interrupt.h>
27 #include <linux/if_ether.h>
28 #include <linux/inet.h>
29 #include <linux/netdevice.h>
30 #include <linux/etherdevice.h>
31 #include <linux/notifier.h>
32 #include <net/ip.h>
33 #include <net/route.h>
34 #include <linux/skbuff.h>
35 #include <net/sock.h>
36 #include <net/pkt_sched.h>
39 1. For now we assume that route tags < 256.
40 It allows to use direct table lookups, instead of hash tables.
41 2. For now we assume that "from TAG" and "fromdev DEV" statements
42 are mutually exclusive.
43 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
46 struct route4_fastmap
48 struct route4_filter *filter;
49 u32 id;
50 int iif;
53 struct route4_head
55 struct route4_fastmap fastmap[16];
56 struct route4_bucket *table[256+1];
59 struct route4_bucket
61 struct route4_filter *ht[16+16+1];
64 struct route4_filter
66 struct route4_filter *next;
67 u32 id;
68 int iif;
70 struct tcf_result res;
71 #ifdef CONFIG_NET_CLS_POLICE
72 struct tcf_police *police;
73 #endif
75 u32 handle;
76 struct route4_bucket *bkt;
79 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
81 static __inline__ int route4_fastmap_hash(u32 id, int iif)
83 return id&0xF;
86 static void route4_reset_fastmap(struct net_device *dev, struct route4_head *head, u32 id)
88 spin_lock_bh(&dev->queue_lock);
89 memset(head->fastmap, 0, sizeof(head->fastmap));
90 spin_unlock_bh(&dev->queue_lock);
93 static void __inline__
94 route4_set_fastmap(struct route4_head *head, u32 id, int iif,
95 struct route4_filter *f)
97 int h = route4_fastmap_hash(id, iif);
98 head->fastmap[h].id = id;
99 head->fastmap[h].iif = iif;
100 head->fastmap[h].filter = f;
103 static __inline__ int route4_hash_to(u32 id)
105 return id&0xFF;
108 static __inline__ int route4_hash_from(u32 id)
110 return (id>>16)&0xF;
113 static __inline__ int route4_hash_iif(int iif)
115 return 16 + ((iif>>16)&0xF);
118 static __inline__ int route4_hash_wild(void)
120 return 32;
123 #ifdef CONFIG_NET_CLS_POLICE
124 #define IF_ROUTE_POLICE \
125 if (f->police) { \
126 int pol_res = tcf_police(skb, f->police); \
127 if (pol_res >= 0) return pol_res; \
128 dont_cache = 1; \
129 continue; \
131 if (!dont_cache)
132 #else
133 #define IF_ROUTE_POLICE
134 #endif
137 static int route4_classify(struct sk_buff *skb, struct tcf_proto *tp,
138 struct tcf_result *res)
140 struct route4_head *head = (struct route4_head*)tp->root;
141 struct dst_entry *dst;
142 struct route4_bucket *b;
143 struct route4_filter *f;
144 #ifdef CONFIG_NET_CLS_POLICE
145 int dont_cache = 0;
146 #endif
147 u32 id, h;
148 int iif;
150 if ((dst = skb->dst) == NULL)
151 goto failure;
153 id = dst->tclassid;
154 if (head == NULL)
155 goto old_method;
157 iif = ((struct rtable*)dst)->key.iif;
159 h = route4_fastmap_hash(id, iif);
160 if (id == head->fastmap[h].id &&
161 iif == head->fastmap[h].iif &&
162 (f = head->fastmap[h].filter) != NULL) {
163 if (f == ROUTE4_FAILURE)
164 goto failure;
166 *res = f->res;
167 return 0;
170 h = route4_hash_to(id);
172 restart:
173 if ((b = head->table[h]) != NULL) {
174 f = b->ht[route4_hash_from(id)];
176 for ( ; f; f = f->next) {
177 if (f->id == id) {
178 *res = f->res;
179 IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
180 return 0;
184 for (f = b->ht[route4_hash_iif(iif)]; f; f = f->next) {
185 if (f->iif == iif) {
186 *res = f->res;
187 IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
188 return 0;
192 for (f = b->ht[route4_hash_wild()]; f; f = f->next) {
193 *res = f->res;
194 IF_ROUTE_POLICE route4_set_fastmap(head, id, iif, f);
195 return 0;
199 if (h < 256) {
200 h = 256;
201 id &= ~0xFFFF;
202 goto restart;
205 #ifdef CONFIG_NET_CLS_POLICE
206 if (!dont_cache)
207 #endif
208 route4_set_fastmap(head, id, iif, ROUTE4_FAILURE);
209 failure:
210 return -1;
212 old_method:
213 if (id && (TC_H_MAJ(id) == 0 ||
214 !(TC_H_MAJ(id^tp->q->handle)))) {
215 res->classid = id;
216 res->class = 0;
217 return 0;
219 return -1;
222 static u32 to_hash(u32 id)
224 u32 h = id&0xFF;
225 if (id&0x8000)
226 h += 256;
227 return h;
230 static u32 from_hash(u32 id)
232 id &= 0xFFFF;
233 if (id == 0xFFFF)
234 return 32;
235 if (!(id & 0x8000)) {
236 if (id > 255)
237 return 256;
238 return id&0xF;
240 return 16 + (id&0xF);
243 static unsigned long route4_get(struct tcf_proto *tp, u32 handle)
245 struct route4_head *head = (struct route4_head*)tp->root;
246 struct route4_bucket *b;
247 struct route4_filter *f;
248 unsigned h1, h2;
250 if (!head)
251 return 0;
253 h1 = to_hash(handle);
254 if (h1 > 256)
255 return 0;
257 h2 = from_hash(handle>>16);
258 if (h2 > 32)
259 return 0;
261 if ((b = head->table[h1]) != NULL) {
262 for (f = b->ht[h2]; f; f = f->next)
263 if (f->handle == handle)
264 return (unsigned long)f;
266 return 0;
269 static void route4_put(struct tcf_proto *tp, unsigned long f)
273 static int route4_init(struct tcf_proto *tp)
275 MOD_INC_USE_COUNT;
276 return 0;
279 static void route4_destroy(struct tcf_proto *tp)
281 struct route4_head *head = xchg(&tp->root, NULL);
282 int h1, h2;
284 if (head == NULL) {
285 MOD_DEC_USE_COUNT;
286 return;
289 for (h1=0; h1<=256; h1++) {
290 struct route4_bucket *b;
292 if ((b = head->table[h1]) != NULL) {
293 for (h2=0; h2<=32; h2++) {
294 struct route4_filter *f;
296 while ((f = b->ht[h2]) != NULL) {
297 unsigned long cl;
299 b->ht[h2] = f->next;
300 if ((cl = __cls_set_class(&f->res.class, 0)) != 0)
301 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
302 #ifdef CONFIG_NET_CLS_POLICE
303 tcf_police_release(f->police);
304 #endif
305 kfree(f);
308 kfree(b);
311 kfree(head);
312 MOD_DEC_USE_COUNT;
315 static int route4_delete(struct tcf_proto *tp, unsigned long arg)
317 struct route4_head *head = (struct route4_head*)tp->root;
318 struct route4_filter **fp, *f = (struct route4_filter*)arg;
319 unsigned h = f->handle;
320 struct route4_bucket *b;
321 int i;
323 if (!head || !f)
324 return -EINVAL;
326 b = f->bkt;
328 for (fp = &b->ht[from_hash(h>>16)]; *fp; fp = &(*fp)->next) {
329 if (*fp == f) {
330 unsigned long cl;
332 tcf_tree_lock(tp);
333 *fp = f->next;
334 tcf_tree_unlock(tp);
336 route4_reset_fastmap(tp->q->dev, head, f->id);
338 if ((cl = cls_set_class(tp, &f->res.class, 0)) != 0)
339 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
341 #ifdef CONFIG_NET_CLS_POLICE
342 tcf_police_release(f->police);
343 #endif
344 kfree(f);
346 /* Strip tree */
348 for (i=0; i<=32; i++)
349 if (b->ht[i])
350 return 0;
352 /* OK, session has no flows */
353 tcf_tree_lock(tp);
354 head->table[to_hash(h)] = NULL;
355 tcf_tree_unlock(tp);
357 kfree(b);
358 return 0;
361 return 0;
364 static int route4_change(struct tcf_proto *tp, unsigned long base,
365 u32 handle,
366 struct rtattr **tca,
367 unsigned long *arg)
369 struct route4_head *head = tp->root;
370 struct route4_filter *f, *f1, **ins_f;
371 struct route4_bucket *b;
372 struct rtattr *opt = tca[TCA_OPTIONS-1];
373 struct rtattr *tb[TCA_ROUTE4_MAX];
374 unsigned h1, h2;
375 int err;
377 if (opt == NULL)
378 return handle ? -EINVAL : 0;
380 if (rtattr_parse(tb, TCA_ROUTE4_MAX, RTA_DATA(opt), RTA_PAYLOAD(opt)) < 0)
381 return -EINVAL;
383 if ((f = (struct route4_filter*)*arg) != NULL) {
384 /* Node exists: adjust only classid */
386 if (f->handle != handle && handle)
387 return -EINVAL;
388 if (tb[TCA_ROUTE4_CLASSID-1]) {
389 unsigned long cl;
391 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
392 cl = cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
393 if (cl)
394 tp->q->ops->cl_ops->unbind_tcf(tp->q, cl);
396 #ifdef CONFIG_NET_CLS_POLICE
397 if (tb[TCA_ROUTE4_POLICE-1]) {
398 struct tcf_police *police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
400 tcf_tree_lock(tp);
401 police = xchg(&f->police, police);
402 tcf_tree_unlock(tp);
404 tcf_police_release(police);
406 #endif
407 return 0;
410 /* Now more serious part... */
412 if (head == NULL) {
413 head = kmalloc(sizeof(struct route4_head), GFP_KERNEL);
414 if (head == NULL)
415 return -ENOBUFS;
416 memset(head, 0, sizeof(struct route4_head));
418 tcf_tree_lock(tp);
419 tp->root = head;
420 tcf_tree_unlock(tp);
423 f = kmalloc(sizeof(struct route4_filter), GFP_KERNEL);
424 if (f == NULL)
425 return -ENOBUFS;
427 memset(f, 0, sizeof(*f));
429 err = -EINVAL;
430 f->handle = 0x8000;
431 if (tb[TCA_ROUTE4_TO-1]) {
432 if (handle&0x8000)
433 goto errout;
434 if (RTA_PAYLOAD(tb[TCA_ROUTE4_TO-1]) < 4)
435 goto errout;
436 f->id = *(u32*)RTA_DATA(tb[TCA_ROUTE4_TO-1]);
437 if (f->id > 0xFF)
438 goto errout;
439 f->handle = f->id;
441 if (tb[TCA_ROUTE4_FROM-1]) {
442 u32 sid;
443 if (tb[TCA_ROUTE4_IIF-1])
444 goto errout;
445 if (RTA_PAYLOAD(tb[TCA_ROUTE4_FROM-1]) < 4)
446 goto errout;
447 sid = (*(u32*)RTA_DATA(tb[TCA_ROUTE4_FROM-1]));
448 if (sid > 0xFF)
449 goto errout;
450 f->handle |= sid<<16;
451 f->id |= sid<<16;
452 } else if (tb[TCA_ROUTE4_IIF-1]) {
453 if (RTA_PAYLOAD(tb[TCA_ROUTE4_IIF-1]) < 4)
454 goto errout;
455 f->iif = *(u32*)RTA_DATA(tb[TCA_ROUTE4_IIF-1]);
456 if (f->iif > 0x7FFF)
457 goto errout;
458 f->handle |= (f->iif|0x8000)<<16;
459 } else
460 f->handle |= 0xFFFF<<16;
462 if (handle) {
463 f->handle |= handle&0x7F00;
464 if (f->handle != handle)
465 goto errout;
468 if (tb[TCA_ROUTE4_CLASSID-1]) {
469 if (RTA_PAYLOAD(tb[TCA_ROUTE4_CLASSID-1]) < 4)
470 goto errout;
471 f->res.classid = *(u32*)RTA_DATA(tb[TCA_ROUTE4_CLASSID-1]);
474 h1 = to_hash(f->handle);
475 if ((b = head->table[h1]) == NULL) {
476 err = -ENOBUFS;
477 b = kmalloc(sizeof(struct route4_bucket), GFP_KERNEL);
478 if (b == NULL)
479 goto errout;
480 memset(b, 0, sizeof(*b));
482 tcf_tree_lock(tp);
483 head->table[h1] = b;
484 tcf_tree_unlock(tp);
486 f->bkt = b;
488 err = -EEXIST;
489 h2 = from_hash(f->handle>>16);
490 for (ins_f = &b->ht[h2]; (f1=*ins_f) != NULL; ins_f = &f1->next) {
491 if (f->handle < f1->handle)
492 break;
493 if (f1->handle == f->handle)
494 goto errout;
497 cls_set_class(tp, &f->res.class, tp->q->ops->cl_ops->bind_tcf(tp->q, base, f->res.classid));
498 #ifdef CONFIG_NET_CLS_POLICE
499 if (tb[TCA_ROUTE4_POLICE-1])
500 f->police = tcf_police_locate(tb[TCA_ROUTE4_POLICE-1], tca[TCA_RATE-1]);
501 #endif
503 f->next = f1;
504 tcf_tree_lock(tp);
505 *ins_f = f;
506 tcf_tree_unlock(tp);
508 route4_reset_fastmap(tp->q->dev, head, f->id);
509 *arg = (unsigned long)f;
510 return 0;
512 errout:
513 if (f)
514 kfree(f);
515 return err;
518 static void route4_walk(struct tcf_proto *tp, struct tcf_walker *arg)
520 struct route4_head *head = tp->root;
521 unsigned h, h1;
523 if (head == NULL)
524 arg->stop = 1;
526 if (arg->stop)
527 return;
529 for (h = 0; h <= 256; h++) {
530 struct route4_bucket *b = head->table[h];
532 if (b) {
533 for (h1 = 0; h1 <= 32; h1++) {
534 struct route4_filter *f;
536 for (f = b->ht[h1]; f; f = f->next) {
537 if (arg->count < arg->skip) {
538 arg->count++;
539 continue;
541 if (arg->fn(tp, (unsigned long)f, arg) < 0) {
542 arg->stop = 1;
543 break;
545 arg->count++;
552 #ifdef CONFIG_RTNETLINK
553 static int route4_dump(struct tcf_proto *tp, unsigned long fh,
554 struct sk_buff *skb, struct tcmsg *t)
556 struct route4_filter *f = (struct route4_filter*)fh;
557 unsigned char *b = skb->tail;
558 struct rtattr *rta;
559 u32 id;
561 if (f == NULL)
562 return skb->len;
564 t->tcm_handle = f->handle;
566 rta = (struct rtattr*)b;
567 RTA_PUT(skb, TCA_OPTIONS, 0, NULL);
569 if (!(f->handle&0x8000)) {
570 id = f->id&0xFF;
571 RTA_PUT(skb, TCA_ROUTE4_TO, sizeof(id), &id);
573 if (f->handle&0x80000000) {
574 if ((f->handle>>16) != 0xFFFF)
575 RTA_PUT(skb, TCA_ROUTE4_IIF, sizeof(f->iif), &f->iif);
576 } else {
577 id = f->id>>16;
578 RTA_PUT(skb, TCA_ROUTE4_FROM, sizeof(id), &id);
580 if (f->res.classid)
581 RTA_PUT(skb, TCA_ROUTE4_CLASSID, 4, &f->res.classid);
582 #ifdef CONFIG_NET_CLS_POLICE
583 if (f->police) {
584 struct rtattr * p_rta = (struct rtattr*)skb->tail;
586 RTA_PUT(skb, TCA_ROUTE4_POLICE, 0, NULL);
588 if (tcf_police_dump(skb, f->police) < 0)
589 goto rtattr_failure;
591 p_rta->rta_len = skb->tail - (u8*)p_rta;
593 #endif
595 rta->rta_len = skb->tail - b;
596 #ifdef CONFIG_NET_CLS_POLICE
597 if (f->police) {
598 if (qdisc_copy_stats(skb, &f->police->stats))
599 goto rtattr_failure;
601 #endif
602 return skb->len;
604 rtattr_failure:
605 skb_trim(skb, b - skb->data);
606 return -1;
608 #endif
610 struct tcf_proto_ops cls_route4_ops = {
611 NULL,
612 "route",
613 route4_classify,
614 route4_init,
615 route4_destroy,
617 route4_get,
618 route4_put,
619 route4_change,
620 route4_delete,
621 route4_walk,
622 #ifdef CONFIG_RTNETLINK
623 route4_dump
624 #else
625 NULL
626 #endif
629 #ifdef MODULE
630 int init_module(void)
632 return register_tcf_proto_ops(&cls_route4_ops);
635 void cleanup_module(void)
637 unregister_tcf_proto_ops(&cls_route4_ops);
639 #endif