2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/sched.h>
19 #include <linux/string.h>
21 #include <linux/socket.h>
22 #include <linux/sockios.h>
24 #include <linux/errno.h>
25 #include <linux/interrupt.h>
26 #include <linux/if_ether.h>
27 #include <linux/inet.h>
28 #include <linux/netdevice.h>
29 #include <linux/etherdevice.h>
30 #include <linux/notifier.h>
32 #include <net/route.h>
33 #include <linux/skbuff.h>
35 #include <net/act_api.h>
36 #include <net/pkt_cls.h>
39 1. For now we assume that route tags < 256.
40 It allows to use direct table lookups, instead of hash tables.
41 2. For now we assume that "from TAG" and "fromdev DEV" statements
42 are mutually exclusive.
43 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
48 struct route4_filter
*filter
;
55 struct route4_fastmap fastmap
[16];
56 struct route4_bucket
*table
[256+1];
61 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
62 struct route4_filter
*ht
[16+16+1];
67 struct route4_filter
*next
;
71 struct tcf_result res
;
74 struct route4_bucket
*bkt
;
77 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
79 static struct tcf_ext_map route_ext_map
= {
80 .police
= TCA_ROUTE4_POLICE
,
81 .action
= TCA_ROUTE4_ACT
84 static __inline__
int route4_fastmap_hash(u32 id
, int iif
)
90 void route4_reset_fastmap(struct net_device
*dev
, struct route4_head
*head
, u32 id
)
92 spin_lock_bh(&dev
->queue_lock
);
93 memset(head
->fastmap
, 0, sizeof(head
->fastmap
));
94 spin_unlock_bh(&dev
->queue_lock
);
97 static void __inline__
98 route4_set_fastmap(struct route4_head
*head
, u32 id
, int iif
,
99 struct route4_filter
*f
)
101 int h
= route4_fastmap_hash(id
, iif
);
102 head
->fastmap
[h
].id
= id
;
103 head
->fastmap
[h
].iif
= iif
;
104 head
->fastmap
[h
].filter
= f
;
107 static __inline__
int route4_hash_to(u32 id
)
112 static __inline__
int route4_hash_from(u32 id
)
117 static __inline__
int route4_hash_iif(int iif
)
119 return 16 + ((iif
>>16)&0xF);
122 static __inline__
int route4_hash_wild(void)
127 #define ROUTE4_APPLY_RESULT() \
130 if (tcf_exts_is_available(&f->exts)) { \
131 int r = tcf_exts_exec(skb, &f->exts, res); \
137 } else if (!dont_cache) \
138 route4_set_fastmap(head, id, iif, f); \
142 static int route4_classify(struct sk_buff
*skb
, struct tcf_proto
*tp
,
143 struct tcf_result
*res
)
145 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
146 struct dst_entry
*dst
;
147 struct route4_bucket
*b
;
148 struct route4_filter
*f
;
150 int iif
, dont_cache
= 0;
152 if ((dst
= skb
->dst
) == NULL
)
159 iif
= ((struct rtable
*)dst
)->fl
.iif
;
161 h
= route4_fastmap_hash(id
, iif
);
162 if (id
== head
->fastmap
[h
].id
&&
163 iif
== head
->fastmap
[h
].iif
&&
164 (f
= head
->fastmap
[h
].filter
) != NULL
) {
165 if (f
== ROUTE4_FAILURE
)
172 h
= route4_hash_to(id
);
175 if ((b
= head
->table
[h
]) != NULL
) {
176 for (f
= b
->ht
[route4_hash_from(id
)]; f
; f
= f
->next
)
178 ROUTE4_APPLY_RESULT();
180 for (f
= b
->ht
[route4_hash_iif(iif
)]; f
; f
= f
->next
)
182 ROUTE4_APPLY_RESULT();
184 for (f
= b
->ht
[route4_hash_wild()]; f
; f
= f
->next
)
185 ROUTE4_APPLY_RESULT();
195 route4_set_fastmap(head
, id
, iif
, ROUTE4_FAILURE
);
200 if (id
&& (TC_H_MAJ(id
) == 0 ||
201 !(TC_H_MAJ(id
^tp
->q
->handle
)))) {
209 static inline u32
to_hash(u32 id
)
217 static inline u32
from_hash(u32 id
)
222 if (!(id
& 0x8000)) {
227 return 16 + (id
&0xF);
230 static unsigned long route4_get(struct tcf_proto
*tp
, u32 handle
)
232 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
233 struct route4_bucket
*b
;
234 struct route4_filter
*f
;
240 h1
= to_hash(handle
);
244 h2
= from_hash(handle
>>16);
248 if ((b
= head
->table
[h1
]) != NULL
) {
249 for (f
= b
->ht
[h2
]; f
; f
= f
->next
)
250 if (f
->handle
== handle
)
251 return (unsigned long)f
;
256 static void route4_put(struct tcf_proto
*tp
, unsigned long f
)
260 static int route4_init(struct tcf_proto
*tp
)
266 route4_delete_filter(struct tcf_proto
*tp
, struct route4_filter
*f
)
268 tcf_unbind_filter(tp
, &f
->res
);
269 tcf_exts_destroy(tp
, &f
->exts
);
273 static void route4_destroy(struct tcf_proto
*tp
)
275 struct route4_head
*head
= xchg(&tp
->root
, NULL
);
281 for (h1
=0; h1
<=256; h1
++) {
282 struct route4_bucket
*b
;
284 if ((b
= head
->table
[h1
]) != NULL
) {
285 for (h2
=0; h2
<=32; h2
++) {
286 struct route4_filter
*f
;
288 while ((f
= b
->ht
[h2
]) != NULL
) {
290 route4_delete_filter(tp
, f
);
299 static int route4_delete(struct tcf_proto
*tp
, unsigned long arg
)
301 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
302 struct route4_filter
**fp
, *f
= (struct route4_filter
*)arg
;
304 struct route4_bucket
*b
;
313 for (fp
= &b
->ht
[from_hash(h
>>16)]; *fp
; fp
= &(*fp
)->next
) {
319 route4_reset_fastmap(tp
->q
->dev
, head
, f
->id
);
320 route4_delete_filter(tp
, f
);
324 for (i
=0; i
<=32; i
++)
328 /* OK, session has no flows */
330 head
->table
[to_hash(h
)] = NULL
;
340 static int route4_set_parms(struct tcf_proto
*tp
, unsigned long base
,
341 struct route4_filter
*f
, u32 handle
, struct route4_head
*head
,
342 struct rtattr
**tb
, struct rtattr
*est
, int new)
345 u32 id
= 0, to
= 0, nhandle
= 0x8000;
346 struct route4_filter
*fp
;
348 struct route4_bucket
*b
;
351 err
= tcf_exts_validate(tp
, tb
, est
, &e
, &route_ext_map
);
356 if (tb
[TCA_ROUTE4_CLASSID
-1])
357 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_CLASSID
-1]) < sizeof(u32
))
360 if (tb
[TCA_ROUTE4_TO
-1]) {
361 if (new && handle
& 0x8000)
363 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_TO
-1]) < sizeof(u32
))
365 to
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_TO
-1]);
371 if (tb
[TCA_ROUTE4_FROM
-1]) {
372 if (tb
[TCA_ROUTE4_IIF
-1])
374 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_FROM
-1]) < sizeof(u32
))
376 id
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_FROM
-1]);
380 } else if (tb
[TCA_ROUTE4_IIF
-1]) {
381 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_IIF
-1]) < sizeof(u32
))
383 id
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_IIF
-1]);
386 nhandle
|= (id
| 0x8000) << 16;
388 nhandle
|= 0xFFFF << 16;
391 nhandle
|= handle
& 0x7F00;
392 if (nhandle
!= handle
)
396 h1
= to_hash(nhandle
);
397 if ((b
= head
->table
[h1
]) == NULL
) {
399 b
= kzalloc(sizeof(struct route4_bucket
), GFP_KERNEL
);
407 unsigned int h2
= from_hash(nhandle
>> 16);
409 for (fp
= b
->ht
[h2
]; fp
; fp
= fp
->next
)
410 if (fp
->handle
== f
->handle
)
415 if (tb
[TCA_ROUTE4_TO
-1])
418 if (tb
[TCA_ROUTE4_FROM
-1])
420 else if (tb
[TCA_ROUTE4_IIF
-1])
427 if (tb
[TCA_ROUTE4_CLASSID
-1]) {
428 f
->res
.classid
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_CLASSID
-1]);
429 tcf_bind_filter(tp
, &f
->res
, base
);
432 tcf_exts_change(tp
, &f
->exts
, &e
);
436 tcf_exts_destroy(tp
, &e
);
440 static int route4_change(struct tcf_proto
*tp
, unsigned long base
,
445 struct route4_head
*head
= tp
->root
;
446 struct route4_filter
*f
, *f1
, **fp
;
447 struct route4_bucket
*b
;
448 struct rtattr
*opt
= tca
[TCA_OPTIONS
-1];
449 struct rtattr
*tb
[TCA_ROUTE4_MAX
];
455 return handle
? -EINVAL
: 0;
457 if (rtattr_parse_nested(tb
, TCA_ROUTE4_MAX
, opt
) < 0)
460 if ((f
= (struct route4_filter
*)*arg
) != NULL
) {
461 if (f
->handle
!= handle
&& handle
)
465 old_handle
= f
->handle
;
467 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
477 head
= kzalloc(sizeof(struct route4_head
), GFP_KERNEL
);
486 f
= kzalloc(sizeof(struct route4_filter
), GFP_KERNEL
);
490 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
496 h
= from_hash(f
->handle
>> 16);
497 for (fp
= &f
->bkt
->ht
[h
]; (f1
=*fp
) != NULL
; fp
= &f1
->next
)
498 if (f
->handle
< f1
->handle
)
505 if (old_handle
&& f
->handle
!= old_handle
) {
506 th
= to_hash(old_handle
);
507 h
= from_hash(old_handle
>> 16);
508 if ((b
= head
->table
[th
]) != NULL
) {
509 for (fp
= &b
->ht
[h
]; *fp
; fp
= &(*fp
)->next
) {
519 route4_reset_fastmap(tp
->q
->dev
, head
, f
->id
);
520 *arg
= (unsigned long)f
;
528 static void route4_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
530 struct route4_head
*head
= tp
->root
;
539 for (h
= 0; h
<= 256; h
++) {
540 struct route4_bucket
*b
= head
->table
[h
];
543 for (h1
= 0; h1
<= 32; h1
++) {
544 struct route4_filter
*f
;
546 for (f
= b
->ht
[h1
]; f
; f
= f
->next
) {
547 if (arg
->count
< arg
->skip
) {
551 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
562 static int route4_dump(struct tcf_proto
*tp
, unsigned long fh
,
563 struct sk_buff
*skb
, struct tcmsg
*t
)
565 struct route4_filter
*f
= (struct route4_filter
*)fh
;
566 unsigned char *b
= skb
->tail
;
573 t
->tcm_handle
= f
->handle
;
575 rta
= (struct rtattr
*)b
;
576 RTA_PUT(skb
, TCA_OPTIONS
, 0, NULL
);
578 if (!(f
->handle
&0x8000)) {
580 RTA_PUT(skb
, TCA_ROUTE4_TO
, sizeof(id
), &id
);
582 if (f
->handle
&0x80000000) {
583 if ((f
->handle
>>16) != 0xFFFF)
584 RTA_PUT(skb
, TCA_ROUTE4_IIF
, sizeof(f
->iif
), &f
->iif
);
587 RTA_PUT(skb
, TCA_ROUTE4_FROM
, sizeof(id
), &id
);
590 RTA_PUT(skb
, TCA_ROUTE4_CLASSID
, 4, &f
->res
.classid
);
592 if (tcf_exts_dump(skb
, &f
->exts
, &route_ext_map
) < 0)
595 rta
->rta_len
= skb
->tail
- b
;
597 if (tcf_exts_dump_stats(skb
, &f
->exts
, &route_ext_map
) < 0)
603 skb_trim(skb
, b
- skb
->data
);
607 static struct tcf_proto_ops cls_route4_ops
= {
610 .classify
= route4_classify
,
612 .destroy
= route4_destroy
,
615 .change
= route4_change
,
616 .delete = route4_delete
,
619 .owner
= THIS_MODULE
,
622 static int __init
init_route4(void)
624 return register_tcf_proto_ops(&cls_route4_ops
);
627 static void __exit
exit_route4(void)
629 unregister_tcf_proto_ops(&cls_route4_ops
);
632 module_init(init_route4
)
633 module_exit(exit_route4
)
634 MODULE_LICENSE("GPL");