2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
26 1. For now we assume that route tags < 256.
27 It allows to use direct table lookups, instead of hash tables.
28 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 are mutually exclusive.
30 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
35 struct route4_filter
*filter
;
42 struct route4_fastmap fastmap
[16];
43 struct route4_bucket
*table
[256+1];
48 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
49 struct route4_filter
*ht
[16+16+1];
54 struct route4_filter
*next
;
58 struct tcf_result res
;
61 struct route4_bucket
*bkt
;
64 #define ROUTE4_FAILURE ((struct route4_filter*)(-1L))
66 static const struct tcf_ext_map route_ext_map
= {
67 .police
= TCA_ROUTE4_POLICE
,
68 .action
= TCA_ROUTE4_ACT
71 static __inline__
int route4_fastmap_hash(u32 id
, int iif
)
77 void route4_reset_fastmap(struct Qdisc
*q
, struct route4_head
*head
, u32 id
)
79 spinlock_t
*root_lock
= qdisc_root_sleeping_lock(q
);
81 spin_lock_bh(root_lock
);
82 memset(head
->fastmap
, 0, sizeof(head
->fastmap
));
83 spin_unlock_bh(root_lock
);
87 route4_set_fastmap(struct route4_head
*head
, u32 id
, int iif
,
88 struct route4_filter
*f
)
90 int h
= route4_fastmap_hash(id
, iif
);
91 head
->fastmap
[h
].id
= id
;
92 head
->fastmap
[h
].iif
= iif
;
93 head
->fastmap
[h
].filter
= f
;
96 static __inline__
int route4_hash_to(u32 id
)
101 static __inline__
int route4_hash_from(u32 id
)
106 static __inline__
int route4_hash_iif(int iif
)
108 return 16 + ((iif
>>16)&0xF);
111 static __inline__
int route4_hash_wild(void)
116 #define ROUTE4_APPLY_RESULT() \
119 if (tcf_exts_is_available(&f->exts)) { \
120 int r = tcf_exts_exec(skb, &f->exts, res); \
126 } else if (!dont_cache) \
127 route4_set_fastmap(head, id, iif, f); \
131 static int route4_classify(struct sk_buff
*skb
, struct tcf_proto
*tp
,
132 struct tcf_result
*res
)
134 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
135 struct dst_entry
*dst
;
136 struct route4_bucket
*b
;
137 struct route4_filter
*f
;
139 int iif
, dont_cache
= 0;
141 if ((dst
= skb_dst(skb
)) == NULL
)
148 iif
= ((struct rtable
*)dst
)->fl
.iif
;
150 h
= route4_fastmap_hash(id
, iif
);
151 if (id
== head
->fastmap
[h
].id
&&
152 iif
== head
->fastmap
[h
].iif
&&
153 (f
= head
->fastmap
[h
].filter
) != NULL
) {
154 if (f
== ROUTE4_FAILURE
)
161 h
= route4_hash_to(id
);
164 if ((b
= head
->table
[h
]) != NULL
) {
165 for (f
= b
->ht
[route4_hash_from(id
)]; f
; f
= f
->next
)
167 ROUTE4_APPLY_RESULT();
169 for (f
= b
->ht
[route4_hash_iif(iif
)]; f
; f
= f
->next
)
171 ROUTE4_APPLY_RESULT();
173 for (f
= b
->ht
[route4_hash_wild()]; f
; f
= f
->next
)
174 ROUTE4_APPLY_RESULT();
184 route4_set_fastmap(head
, id
, iif
, ROUTE4_FAILURE
);
189 if (id
&& (TC_H_MAJ(id
) == 0 ||
190 !(TC_H_MAJ(id
^tp
->q
->handle
)))) {
198 static inline u32
to_hash(u32 id
)
206 static inline u32
from_hash(u32 id
)
211 if (!(id
& 0x8000)) {
216 return 16 + (id
&0xF);
219 static unsigned long route4_get(struct tcf_proto
*tp
, u32 handle
)
221 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
222 struct route4_bucket
*b
;
223 struct route4_filter
*f
;
229 h1
= to_hash(handle
);
233 h2
= from_hash(handle
>>16);
237 if ((b
= head
->table
[h1
]) != NULL
) {
238 for (f
= b
->ht
[h2
]; f
; f
= f
->next
)
239 if (f
->handle
== handle
)
240 return (unsigned long)f
;
245 static void route4_put(struct tcf_proto
*tp
, unsigned long f
)
249 static int route4_init(struct tcf_proto
*tp
)
255 route4_delete_filter(struct tcf_proto
*tp
, struct route4_filter
*f
)
257 tcf_unbind_filter(tp
, &f
->res
);
258 tcf_exts_destroy(tp
, &f
->exts
);
262 static void route4_destroy(struct tcf_proto
*tp
)
264 struct route4_head
*head
= tp
->root
;
270 for (h1
=0; h1
<=256; h1
++) {
271 struct route4_bucket
*b
;
273 if ((b
= head
->table
[h1
]) != NULL
) {
274 for (h2
=0; h2
<=32; h2
++) {
275 struct route4_filter
*f
;
277 while ((f
= b
->ht
[h2
]) != NULL
) {
279 route4_delete_filter(tp
, f
);
288 static int route4_delete(struct tcf_proto
*tp
, unsigned long arg
)
290 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
291 struct route4_filter
**fp
, *f
= (struct route4_filter
*)arg
;
293 struct route4_bucket
*b
;
302 for (fp
= &b
->ht
[from_hash(h
>>16)]; *fp
; fp
= &(*fp
)->next
) {
308 route4_reset_fastmap(tp
->q
, head
, f
->id
);
309 route4_delete_filter(tp
, f
);
313 for (i
=0; i
<=32; i
++)
317 /* OK, session has no flows */
319 head
->table
[to_hash(h
)] = NULL
;
329 static const struct nla_policy route4_policy
[TCA_ROUTE4_MAX
+ 1] = {
330 [TCA_ROUTE4_CLASSID
] = { .type
= NLA_U32
},
331 [TCA_ROUTE4_TO
] = { .type
= NLA_U32
},
332 [TCA_ROUTE4_FROM
] = { .type
= NLA_U32
},
333 [TCA_ROUTE4_IIF
] = { .type
= NLA_U32
},
336 static int route4_set_parms(struct tcf_proto
*tp
, unsigned long base
,
337 struct route4_filter
*f
, u32 handle
, struct route4_head
*head
,
338 struct nlattr
**tb
, struct nlattr
*est
, int new)
341 u32 id
= 0, to
= 0, nhandle
= 0x8000;
342 struct route4_filter
*fp
;
344 struct route4_bucket
*b
;
347 err
= tcf_exts_validate(tp
, tb
, est
, &e
, &route_ext_map
);
352 if (tb
[TCA_ROUTE4_TO
]) {
353 if (new && handle
& 0x8000)
355 to
= nla_get_u32(tb
[TCA_ROUTE4_TO
]);
361 if (tb
[TCA_ROUTE4_FROM
]) {
362 if (tb
[TCA_ROUTE4_IIF
])
364 id
= nla_get_u32(tb
[TCA_ROUTE4_FROM
]);
368 } else if (tb
[TCA_ROUTE4_IIF
]) {
369 id
= nla_get_u32(tb
[TCA_ROUTE4_IIF
]);
372 nhandle
|= (id
| 0x8000) << 16;
374 nhandle
|= 0xFFFF << 16;
377 nhandle
|= handle
& 0x7F00;
378 if (nhandle
!= handle
)
382 h1
= to_hash(nhandle
);
383 if ((b
= head
->table
[h1
]) == NULL
) {
385 b
= kzalloc(sizeof(struct route4_bucket
), GFP_KERNEL
);
393 unsigned int h2
= from_hash(nhandle
>> 16);
395 for (fp
= b
->ht
[h2
]; fp
; fp
= fp
->next
)
396 if (fp
->handle
== f
->handle
)
401 if (tb
[TCA_ROUTE4_TO
])
404 if (tb
[TCA_ROUTE4_FROM
])
406 else if (tb
[TCA_ROUTE4_IIF
])
413 if (tb
[TCA_ROUTE4_CLASSID
]) {
414 f
->res
.classid
= nla_get_u32(tb
[TCA_ROUTE4_CLASSID
]);
415 tcf_bind_filter(tp
, &f
->res
, base
);
418 tcf_exts_change(tp
, &f
->exts
, &e
);
422 tcf_exts_destroy(tp
, &e
);
426 static int route4_change(struct tcf_proto
*tp
, unsigned long base
,
431 struct route4_head
*head
= tp
->root
;
432 struct route4_filter
*f
, *f1
, **fp
;
433 struct route4_bucket
*b
;
434 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
435 struct nlattr
*tb
[TCA_ROUTE4_MAX
+ 1];
441 return handle
? -EINVAL
: 0;
443 err
= nla_parse_nested(tb
, TCA_ROUTE4_MAX
, opt
, route4_policy
);
447 if ((f
= (struct route4_filter
*)*arg
) != NULL
) {
448 if (f
->handle
!= handle
&& handle
)
452 old_handle
= f
->handle
;
454 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
464 head
= kzalloc(sizeof(struct route4_head
), GFP_KERNEL
);
473 f
= kzalloc(sizeof(struct route4_filter
), GFP_KERNEL
);
477 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
483 h
= from_hash(f
->handle
>> 16);
484 for (fp
= &f
->bkt
->ht
[h
]; (f1
=*fp
) != NULL
; fp
= &f1
->next
)
485 if (f
->handle
< f1
->handle
)
492 if (old_handle
&& f
->handle
!= old_handle
) {
493 th
= to_hash(old_handle
);
494 h
= from_hash(old_handle
>> 16);
495 if ((b
= head
->table
[th
]) != NULL
) {
496 for (fp
= &b
->ht
[h
]; *fp
; fp
= &(*fp
)->next
) {
506 route4_reset_fastmap(tp
->q
, head
, f
->id
);
507 *arg
= (unsigned long)f
;
515 static void route4_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
517 struct route4_head
*head
= tp
->root
;
526 for (h
= 0; h
<= 256; h
++) {
527 struct route4_bucket
*b
= head
->table
[h
];
530 for (h1
= 0; h1
<= 32; h1
++) {
531 struct route4_filter
*f
;
533 for (f
= b
->ht
[h1
]; f
; f
= f
->next
) {
534 if (arg
->count
< arg
->skip
) {
538 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
549 static int route4_dump(struct tcf_proto
*tp
, unsigned long fh
,
550 struct sk_buff
*skb
, struct tcmsg
*t
)
552 struct route4_filter
*f
= (struct route4_filter
*)fh
;
553 unsigned char *b
= skb_tail_pointer(skb
);
560 t
->tcm_handle
= f
->handle
;
562 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
564 goto nla_put_failure
;
566 if (!(f
->handle
&0x8000)) {
568 NLA_PUT_U32(skb
, TCA_ROUTE4_TO
, id
);
570 if (f
->handle
&0x80000000) {
571 if ((f
->handle
>>16) != 0xFFFF)
572 NLA_PUT_U32(skb
, TCA_ROUTE4_IIF
, f
->iif
);
575 NLA_PUT_U32(skb
, TCA_ROUTE4_FROM
, id
);
578 NLA_PUT_U32(skb
, TCA_ROUTE4_CLASSID
, f
->res
.classid
);
580 if (tcf_exts_dump(skb
, &f
->exts
, &route_ext_map
) < 0)
581 goto nla_put_failure
;
583 nla_nest_end(skb
, nest
);
585 if (tcf_exts_dump_stats(skb
, &f
->exts
, &route_ext_map
) < 0)
586 goto nla_put_failure
;
595 static struct tcf_proto_ops cls_route4_ops __read_mostly
= {
597 .classify
= route4_classify
,
599 .destroy
= route4_destroy
,
602 .change
= route4_change
,
603 .delete = route4_delete
,
606 .owner
= THIS_MODULE
,
609 static int __init
init_route4(void)
611 return register_tcf_proto_ops(&cls_route4_ops
);
614 static void __exit
exit_route4(void)
616 unregister_tcf_proto_ops(&cls_route4_ops
);
619 module_init(init_route4
)
620 module_exit(exit_route4
)
621 MODULE_LICENSE("GPL");