2 * net/sched/cls_route.c ROUTE4 classifier.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/string.h>
17 #include <linux/errno.h>
18 #include <linux/skbuff.h>
20 #include <net/route.h>
21 #include <net/netlink.h>
22 #include <net/act_api.h>
23 #include <net/pkt_cls.h>
26 * 1. For now we assume that route tags < 256.
27 * It allows to use direct table lookups, instead of hash tables.
28 * 2. For now we assume that "from TAG" and "fromdev DEV" statements
29 * are mutually exclusive.
30 * 3. "to TAG from ANY" has higher priority, than "to ANY from XXX"
33 struct route4_fastmap
{
34 struct route4_filter
*filter
;
40 struct route4_fastmap fastmap
[16];
41 struct route4_bucket
*table
[256 + 1];
44 struct route4_bucket
{
45 /* 16 FROM buckets + 16 IIF buckets + 1 wildcard bucket */
46 struct route4_filter
*ht
[16 + 16 + 1];
49 struct route4_filter
{
50 struct route4_filter
*next
;
54 struct tcf_result res
;
57 struct route4_bucket
*bkt
;
60 #define ROUTE4_FAILURE ((struct route4_filter *)(-1L))
62 static const struct tcf_ext_map route_ext_map
= {
63 .police
= TCA_ROUTE4_POLICE
,
64 .action
= TCA_ROUTE4_ACT
67 static inline int route4_fastmap_hash(u32 id
, int iif
)
73 route4_reset_fastmap(struct Qdisc
*q
, struct route4_head
*head
, u32 id
)
75 spinlock_t
*root_lock
= qdisc_root_sleeping_lock(q
);
77 spin_lock_bh(root_lock
);
78 memset(head
->fastmap
, 0, sizeof(head
->fastmap
));
79 spin_unlock_bh(root_lock
);
83 route4_set_fastmap(struct route4_head
*head
, u32 id
, int iif
,
84 struct route4_filter
*f
)
86 int h
= route4_fastmap_hash(id
, iif
);
88 head
->fastmap
[h
].id
= id
;
89 head
->fastmap
[h
].iif
= iif
;
90 head
->fastmap
[h
].filter
= f
;
93 static inline int route4_hash_to(u32 id
)
98 static inline int route4_hash_from(u32 id
)
100 return (id
>> 16) & 0xF;
103 static inline int route4_hash_iif(int iif
)
105 return 16 + ((iif
>> 16) & 0xF);
108 static inline int route4_hash_wild(void)
113 #define ROUTE4_APPLY_RESULT() \
116 if (tcf_exts_is_available(&f->exts)) { \
117 int r = tcf_exts_exec(skb, &f->exts, res); \
123 } else if (!dont_cache) \
124 route4_set_fastmap(head, id, iif, f); \
128 static int route4_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
129 struct tcf_result
*res
)
131 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
132 struct dst_entry
*dst
;
133 struct route4_bucket
*b
;
134 struct route4_filter
*f
;
136 int iif
, dont_cache
= 0;
148 h
= route4_fastmap_hash(id
, iif
);
149 if (id
== head
->fastmap
[h
].id
&&
150 iif
== head
->fastmap
[h
].iif
&&
151 (f
= head
->fastmap
[h
].filter
) != NULL
) {
152 if (f
== ROUTE4_FAILURE
)
159 h
= route4_hash_to(id
);
164 for (f
= b
->ht
[route4_hash_from(id
)]; f
; f
= f
->next
)
166 ROUTE4_APPLY_RESULT();
168 for (f
= b
->ht
[route4_hash_iif(iif
)]; f
; f
= f
->next
)
170 ROUTE4_APPLY_RESULT();
172 for (f
= b
->ht
[route4_hash_wild()]; f
; f
= f
->next
)
173 ROUTE4_APPLY_RESULT();
183 route4_set_fastmap(head
, id
, iif
, ROUTE4_FAILURE
);
188 if (id
&& (TC_H_MAJ(id
) == 0 ||
189 !(TC_H_MAJ(id
^tp
->q
->handle
)))) {
197 static inline u32
to_hash(u32 id
)
206 static inline u32
from_hash(u32 id
)
211 if (!(id
& 0x8000)) {
216 return 16 + (id
& 0xF);
219 static unsigned long route4_get(struct tcf_proto
*tp
, u32 handle
)
221 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
222 struct route4_bucket
*b
;
223 struct route4_filter
*f
;
229 h1
= to_hash(handle
);
233 h2
= from_hash(handle
>> 16);
239 for (f
= b
->ht
[h2
]; f
; f
= f
->next
)
240 if (f
->handle
== handle
)
241 return (unsigned long)f
;
246 static void route4_put(struct tcf_proto
*tp
, unsigned long f
)
250 static int route4_init(struct tcf_proto
*tp
)
256 route4_delete_filter(struct tcf_proto
*tp
, struct route4_filter
*f
)
258 tcf_unbind_filter(tp
, &f
->res
);
259 tcf_exts_destroy(tp
, &f
->exts
);
263 static void route4_destroy(struct tcf_proto
*tp
)
265 struct route4_head
*head
= tp
->root
;
271 for (h1
= 0; h1
<= 256; h1
++) {
272 struct route4_bucket
*b
;
276 for (h2
= 0; h2
<= 32; h2
++) {
277 struct route4_filter
*f
;
279 while ((f
= b
->ht
[h2
]) != NULL
) {
281 route4_delete_filter(tp
, f
);
290 static int route4_delete(struct tcf_proto
*tp
, unsigned long arg
)
292 struct route4_head
*head
= (struct route4_head
*)tp
->root
;
293 struct route4_filter
**fp
, *f
= (struct route4_filter
*)arg
;
295 struct route4_bucket
*b
;
304 for (fp
= &b
->ht
[from_hash(h
>> 16)]; *fp
; fp
= &(*fp
)->next
) {
310 route4_reset_fastmap(tp
->q
, head
, f
->id
);
311 route4_delete_filter(tp
, f
);
315 for (i
= 0; i
<= 32; i
++)
319 /* OK, session has no flows */
321 head
->table
[to_hash(h
)] = NULL
;
331 static const struct nla_policy route4_policy
[TCA_ROUTE4_MAX
+ 1] = {
332 [TCA_ROUTE4_CLASSID
] = { .type
= NLA_U32
},
333 [TCA_ROUTE4_TO
] = { .type
= NLA_U32
},
334 [TCA_ROUTE4_FROM
] = { .type
= NLA_U32
},
335 [TCA_ROUTE4_IIF
] = { .type
= NLA_U32
},
338 static int route4_set_parms(struct tcf_proto
*tp
, unsigned long base
,
339 struct route4_filter
*f
, u32 handle
, struct route4_head
*head
,
340 struct nlattr
**tb
, struct nlattr
*est
, int new)
343 u32 id
= 0, to
= 0, nhandle
= 0x8000;
344 struct route4_filter
*fp
;
346 struct route4_bucket
*b
;
349 err
= tcf_exts_validate(tp
, tb
, est
, &e
, &route_ext_map
);
354 if (tb
[TCA_ROUTE4_TO
]) {
355 if (new && handle
& 0x8000)
357 to
= nla_get_u32(tb
[TCA_ROUTE4_TO
]);
363 if (tb
[TCA_ROUTE4_FROM
]) {
364 if (tb
[TCA_ROUTE4_IIF
])
366 id
= nla_get_u32(tb
[TCA_ROUTE4_FROM
]);
370 } else if (tb
[TCA_ROUTE4_IIF
]) {
371 id
= nla_get_u32(tb
[TCA_ROUTE4_IIF
]);
374 nhandle
|= (id
| 0x8000) << 16;
376 nhandle
|= 0xFFFF << 16;
379 nhandle
|= handle
& 0x7F00;
380 if (nhandle
!= handle
)
384 h1
= to_hash(nhandle
);
388 b
= kzalloc(sizeof(struct route4_bucket
), GFP_KERNEL
);
396 unsigned int h2
= from_hash(nhandle
>> 16);
399 for (fp
= b
->ht
[h2
]; fp
; fp
= fp
->next
)
400 if (fp
->handle
== f
->handle
)
405 if (tb
[TCA_ROUTE4_TO
])
408 if (tb
[TCA_ROUTE4_FROM
])
410 else if (tb
[TCA_ROUTE4_IIF
])
417 if (tb
[TCA_ROUTE4_CLASSID
]) {
418 f
->res
.classid
= nla_get_u32(tb
[TCA_ROUTE4_CLASSID
]);
419 tcf_bind_filter(tp
, &f
->res
, base
);
422 tcf_exts_change(tp
, &f
->exts
, &e
);
426 tcf_exts_destroy(tp
, &e
);
430 static int route4_change(struct sk_buff
*in_skb
,
431 struct tcf_proto
*tp
, unsigned long base
,
436 struct route4_head
*head
= tp
->root
;
437 struct route4_filter
*f
, *f1
, **fp
;
438 struct route4_bucket
*b
;
439 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
440 struct nlattr
*tb
[TCA_ROUTE4_MAX
+ 1];
446 return handle
? -EINVAL
: 0;
448 err
= nla_parse_nested(tb
, TCA_ROUTE4_MAX
, opt
, route4_policy
);
452 f
= (struct route4_filter
*)*arg
;
454 if (f
->handle
!= handle
&& handle
)
458 old_handle
= f
->handle
;
460 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
470 head
= kzalloc(sizeof(struct route4_head
), GFP_KERNEL
);
479 f
= kzalloc(sizeof(struct route4_filter
), GFP_KERNEL
);
483 err
= route4_set_parms(tp
, base
, f
, handle
, head
, tb
,
489 h
= from_hash(f
->handle
>> 16);
490 for (fp
= &f
->bkt
->ht
[h
]; (f1
= *fp
) != NULL
; fp
= &f1
->next
)
491 if (f
->handle
< f1
->handle
)
498 if (old_handle
&& f
->handle
!= old_handle
) {
499 th
= to_hash(old_handle
);
500 h
= from_hash(old_handle
>> 16);
503 for (fp
= &b
->ht
[h
]; *fp
; fp
= &(*fp
)->next
) {
513 route4_reset_fastmap(tp
->q
, head
, f
->id
);
514 *arg
= (unsigned long)f
;
522 static void route4_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
524 struct route4_head
*head
= tp
->root
;
533 for (h
= 0; h
<= 256; h
++) {
534 struct route4_bucket
*b
= head
->table
[h
];
537 for (h1
= 0; h1
<= 32; h1
++) {
538 struct route4_filter
*f
;
540 for (f
= b
->ht
[h1
]; f
; f
= f
->next
) {
541 if (arg
->count
< arg
->skip
) {
545 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
556 static int route4_dump(struct tcf_proto
*tp
, unsigned long fh
,
557 struct sk_buff
*skb
, struct tcmsg
*t
)
559 struct route4_filter
*f
= (struct route4_filter
*)fh
;
560 unsigned char *b
= skb_tail_pointer(skb
);
567 t
->tcm_handle
= f
->handle
;
569 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
571 goto nla_put_failure
;
573 if (!(f
->handle
& 0x8000)) {
575 if (nla_put_u32(skb
, TCA_ROUTE4_TO
, id
))
576 goto nla_put_failure
;
578 if (f
->handle
& 0x80000000) {
579 if ((f
->handle
>> 16) != 0xFFFF &&
580 nla_put_u32(skb
, TCA_ROUTE4_IIF
, f
->iif
))
581 goto nla_put_failure
;
584 if (nla_put_u32(skb
, TCA_ROUTE4_FROM
, id
))
585 goto nla_put_failure
;
587 if (f
->res
.classid
&&
588 nla_put_u32(skb
, TCA_ROUTE4_CLASSID
, f
->res
.classid
))
589 goto nla_put_failure
;
591 if (tcf_exts_dump(skb
, &f
->exts
, &route_ext_map
) < 0)
592 goto nla_put_failure
;
594 nla_nest_end(skb
, nest
);
596 if (tcf_exts_dump_stats(skb
, &f
->exts
, &route_ext_map
) < 0)
597 goto nla_put_failure
;
606 static struct tcf_proto_ops cls_route4_ops __read_mostly
= {
608 .classify
= route4_classify
,
610 .destroy
= route4_destroy
,
613 .change
= route4_change
,
614 .delete = route4_delete
,
617 .owner
= THIS_MODULE
,
620 static int __init
init_route4(void)
622 return register_tcf_proto_ops(&cls_route4_ops
);
625 static void __exit
exit_route4(void)
627 unregister_tcf_proto_ops(&cls_route4_ops
);
630 module_init(init_route4
)
631 module_exit(exit_route4
)
632 MODULE_LICENSE("GPL");