2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <asm/uaccess.h>
14 #include <asm/system.h>
15 #include <linux/bitops.h>
16 #include <linux/types.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
20 #include <linux/socket.h>
21 #include <linux/sockios.h>
23 #include <linux/errno.h>
24 #include <linux/interrupt.h>
25 #include <linux/if_ether.h>
26 #include <linux/inet.h>
27 #include <linux/netdevice.h>
28 #include <linux/etherdevice.h>
29 #include <linux/notifier.h>
31 #include <net/route.h>
32 #include <linux/skbuff.h>
34 #include <net/act_api.h>
35 #include <net/pkt_cls.h>
38 1. For now we assume that route tags < 256.
39 It allows to use direct table lookups, instead of hash tables.
40 2. For now we assume that "from TAG" and "fromdev DEV" statements
41 are mutually exclusive.
42 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
47 struct route4_filter
*filter
;
54 struct route4_fastmap fastmap
[16];
55 struct route4_bucket
*table
[256+1];
60 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
61 struct route4_filter
*ht
[16+16+1];
66 struct route4_filter
*next
;
70 struct tcf_result res
;
73 struct route4_bucket
*bkt
;
76 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
78 static struct tcf_ext_map route_ext_map
= {
79 .police
= TCA_ROUTE4_POLICE
,
80 .action
= TCA_ROUTE4_ACT
83 static __inline__
int route4_fastmap_hash(u32 id
, int iif
)
89 void route4_reset_fastmap(struct net_device
*dev
, struct route4_head
*head
, u32 id
)
91 spin_lock_bh(&dev
->queue_lock
);
92 memset(head
->fastmap
, 0, sizeof(head
->fastmap
));
93 spin_unlock_bh(&dev
->queue_lock
);
96 static void __inline__
97 route4_set_fastmap(struct route4_head
*head
, u32 id
, int iif
,
98 struct route4_filter
*f
)
100 int h
= route4_fastmap_hash(id
, iif
);
101 head
->fastmap
[h
].id
= id
;
102 head
->fastmap
[h
].iif
= iif
;
103 head
->fastmap
[h
].filter
= f
;
106 static __inline__
int route4_hash_to(u32 id
)
111 static __inline__
int route4_hash_from(u32 id
)
116 static __inline__
int route4_hash_iif(int iif
)
118 return 16 + ((iif
>>16)&0xF);
121 static __inline__
int route4_hash_wild(void)
126 #define ROUTE4_APPLY_RESULT() \
129 if (tcf_exts_is_available(&f->exts)) { \
130 int r = tcf_exts_exec(skb, &f->exts, res); \
136 } else if (!dont_cache) \
137 route4_set_fastmap(head, id, iif, f); \
141 static int route4_classify(struct sk_buff
*skb
, struct tcf_proto
*tp
,
142 struct tcf_result
*res
)
144 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
145 struct dst_entry
*dst
;
146 struct route4_bucket
*b
;
147 struct route4_filter
*f
;
149 int iif
, dont_cache
= 0;
151 if ((dst
= skb
->dst
) == NULL
)
158 iif
= ((struct rtable
*)dst
)->fl
.iif
;
160 h
= route4_fastmap_hash(id
, iif
);
161 if (id
== head
->fastmap
[h
].id
&&
162 iif
== head
->fastmap
[h
].iif
&&
163 (f
= head
->fastmap
[h
].filter
) != NULL
) {
164 if (f
== ROUTE4_FAILURE
)
171 h
= route4_hash_to(id
);
174 if ((b
= head
->table
[h
]) != NULL
) {
175 for (f
= b
->ht
[route4_hash_from(id
)]; f
; f
= f
->next
)
177 ROUTE4_APPLY_RESULT();
179 for (f
= b
->ht
[route4_hash_iif(iif
)]; f
; f
= f
->next
)
181 ROUTE4_APPLY_RESULT();
183 for (f
= b
->ht
[route4_hash_wild()]; f
; f
= f
->next
)
184 ROUTE4_APPLY_RESULT();
194 route4_set_fastmap(head
, id
, iif
, ROUTE4_FAILURE
);
199 if (id
&& (TC_H_MAJ(id
) == 0 ||
200 !(TC_H_MAJ(id
^tp
->q
->handle
)))) {
208 static inline u32
to_hash(u32 id
)
216 static inline u32
from_hash(u32 id
)
221 if (!(id
& 0x8000)) {
226 return 16 + (id
&0xF);
229 static unsigned long route4_get(struct tcf_proto
*tp
, u32 handle
)
231 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
232 struct route4_bucket
*b
;
233 struct route4_filter
*f
;
239 h1
= to_hash(handle
);
243 h2
= from_hash(handle
>>16);
247 if ((b
= head
->table
[h1
]) != NULL
) {
248 for (f
= b
->ht
[h2
]; f
; f
= f
->next
)
249 if (f
->handle
== handle
)
250 return (unsigned long)f
;
255 static void route4_put(struct tcf_proto
*tp
, unsigned long f
)
259 static int route4_init(struct tcf_proto
*tp
)
265 route4_delete_filter(struct tcf_proto
*tp
, struct route4_filter
*f
)
267 tcf_unbind_filter(tp
, &f
->res
);
268 tcf_exts_destroy(tp
, &f
->exts
);
272 static void route4_destroy(struct tcf_proto
*tp
)
274 struct route4_head
*head
= xchg(&tp
->root
, NULL
);
280 for (h1
=0; h1
<=256; h1
++) {
281 struct route4_bucket
*b
;
283 if ((b
= head
->table
[h1
]) != NULL
) {
284 for (h2
=0; h2
<=32; h2
++) {
285 struct route4_filter
*f
;
287 while ((f
= b
->ht
[h2
]) != NULL
) {
289 route4_delete_filter(tp
, f
);
298 static int route4_delete(struct tcf_proto
*tp
, unsigned long arg
)
300 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
301 struct route4_filter
**fp
, *f
= (struct route4_filter
*)arg
;
303 struct route4_bucket
*b
;
312 for (fp
= &b
->ht
[from_hash(h
>>16)]; *fp
; fp
= &(*fp
)->next
) {
318 route4_reset_fastmap(tp
->q
->dev
, head
, f
->id
);
319 route4_delete_filter(tp
, f
);
323 for (i
=0; i
<=32; i
++)
327 /* OK, session has no flows */
329 head
->table
[to_hash(h
)] = NULL
;
339 static int route4_set_parms(struct tcf_proto
*tp
, unsigned long base
,
340 struct route4_filter
*f
, u32 handle
, struct route4_head
*head
,
341 struct rtattr
**tb
, struct rtattr
*est
, int new)
344 u32 id
= 0, to
= 0, nhandle
= 0x8000;
345 struct route4_filter
*fp
;
347 struct route4_bucket
*b
;
350 err
= tcf_exts_validate(tp
, tb
, est
, &e
, &route_ext_map
);
355 if (tb
[TCA_ROUTE4_CLASSID
-1])
356 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_CLASSID
-1]) < sizeof(u32
))
359 if (tb
[TCA_ROUTE4_TO
-1]) {
360 if (new && handle
& 0x8000)
362 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_TO
-1]) < sizeof(u32
))
364 to
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_TO
-1]);
370 if (tb
[TCA_ROUTE4_FROM
-1]) {
371 if (tb
[TCA_ROUTE4_IIF
-1])
373 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_FROM
-1]) < sizeof(u32
))
375 id
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_FROM
-1]);
379 } else if (tb
[TCA_ROUTE4_IIF
-1]) {
380 if (RTA_PAYLOAD(tb
[TCA_ROUTE4_IIF
-1]) < sizeof(u32
))
382 id
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_IIF
-1]);
385 nhandle
|= (id
| 0x8000) << 16;
387 nhandle
|= 0xFFFF << 16;
390 nhandle
|= handle
& 0x7F00;
391 if (nhandle
!= handle
)
395 h1
= to_hash(nhandle
);
396 if ((b
= head
->table
[h1
]) == NULL
) {
398 b
= kzalloc(sizeof(struct route4_bucket
), GFP_KERNEL
);
406 unsigned int h2
= from_hash(nhandle
>> 16);
408 for (fp
= b
->ht
[h2
]; fp
; fp
= fp
->next
)
409 if (fp
->handle
== f
->handle
)
414 if (tb
[TCA_ROUTE4_TO
-1])
417 if (tb
[TCA_ROUTE4_FROM
-1])
419 else if (tb
[TCA_ROUTE4_IIF
-1])
426 if (tb
[TCA_ROUTE4_CLASSID
-1]) {
427 f
->res
.classid
= *(u32
*)RTA_DATA(tb
[TCA_ROUTE4_CLASSID
-1]);
428 tcf_bind_filter(tp
, &f
->res
, base
);
431 tcf_exts_change(tp
, &f
->exts
, &e
);
435 tcf_exts_destroy(tp
, &e
);
439 static int route4_change(struct tcf_proto
*tp
, unsigned long base
,
444 struct route4_head
*head
= tp
->root
;
445 struct route4_filter
*f
, *f1
, **fp
;
446 struct route4_bucket
*b
;
447 struct rtattr
*opt
= tca
[TCA_OPTIONS
-1];
448 struct rtattr
*tb
[TCA_ROUTE4_MAX
];
454 return handle
? -EINVAL
: 0;
456 if (rtattr_parse_nested(tb
, TCA_ROUTE4_MAX
, opt
) < 0)
459 if ((f
= (struct route4_filter
*)*arg
) != NULL
) {
460 if (f
->handle
!= handle
&& handle
)
464 old_handle
= f
->handle
;
466 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
476 head
= kzalloc(sizeof(struct route4_head
), GFP_KERNEL
);
485 f
= kzalloc(sizeof(struct route4_filter
), GFP_KERNEL
);
489 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
495 h
= from_hash(f
->handle
>> 16);
496 for (fp
= &f
->bkt
->ht
[h
]; (f1
=*fp
) != NULL
; fp
= &f1
->next
)
497 if (f
->handle
< f1
->handle
)
504 if (old_handle
&& f
->handle
!= old_handle
) {
505 th
= to_hash(old_handle
);
506 h
= from_hash(old_handle
>> 16);
507 if ((b
= head
->table
[th
]) != NULL
) {
508 for (fp
= &b
->ht
[h
]; *fp
; fp
= &(*fp
)->next
) {
518 route4_reset_fastmap(tp
->q
->dev
, head
, f
->id
);
519 *arg
= (unsigned long)f
;
527 static void route4_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
529 struct route4_head
*head
= tp
->root
;
538 for (h
= 0; h
<= 256; h
++) {
539 struct route4_bucket
*b
= head
->table
[h
];
542 for (h1
= 0; h1
<= 32; h1
++) {
543 struct route4_filter
*f
;
545 for (f
= b
->ht
[h1
]; f
; f
= f
->next
) {
546 if (arg
->count
< arg
->skip
) {
550 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
561 static int route4_dump(struct tcf_proto
*tp
, unsigned long fh
,
562 struct sk_buff
*skb
, struct tcmsg
*t
)
564 struct route4_filter
*f
= (struct route4_filter
*)fh
;
565 unsigned char *b
= skb
->tail
;
572 t
->tcm_handle
= f
->handle
;
574 rta
= (struct rtattr
*)b
;
575 RTA_PUT(skb
, TCA_OPTIONS
, 0, NULL
);
577 if (!(f
->handle
&0x8000)) {
579 RTA_PUT(skb
, TCA_ROUTE4_TO
, sizeof(id
), &id
);
581 if (f
->handle
&0x80000000) {
582 if ((f
->handle
>>16) != 0xFFFF)
583 RTA_PUT(skb
, TCA_ROUTE4_IIF
, sizeof(f
->iif
), &f
->iif
);
586 RTA_PUT(skb
, TCA_ROUTE4_FROM
, sizeof(id
), &id
);
589 RTA_PUT(skb
, TCA_ROUTE4_CLASSID
, 4, &f
->res
.classid
);
591 if (tcf_exts_dump(skb
, &f
->exts
, &route_ext_map
) < 0)
594 rta
->rta_len
= skb
->tail
- b
;
596 if (tcf_exts_dump_stats(skb
, &f
->exts
, &route_ext_map
) < 0)
602 skb_trim(skb
, b
- skb
->data
);
606 static struct tcf_proto_ops cls_route4_ops
= {
609 .classify
= route4_classify
,
611 .destroy
= route4_destroy
,
614 .change
= route4_change
,
615 .delete = route4_delete
,
618 .owner
= THIS_MODULE
,
621 static int __init
init_route4(void)
623 return register_tcf_proto_ops(&cls_route4_ops
);
626 static void __exit
exit_route4(void)
628 unregister_tcf_proto_ops(&cls_route4_ops
);
631 module_init(init_route4
)
632 module_exit(exit_route4
)
633 MODULE_LICENSE("GPL");