2 * net/sched/cls_flow.c Generic flow classifier
4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/jhash.h>
16 #include <linux/random.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/skbuff.h>
21 #include <linux/ipv6.h>
23 #include <net/pkt_cls.h>
25 #include <net/route.h>
26 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
27 #include <net/netfilter/nf_conntrack.h>
31 struct list_head filters
;
35 struct list_head list
;
37 struct tcf_ematch_tree ematches
;
51 static u32 flow_hashrnd __read_mostly
;
52 static int flow_hashrnd_initted __read_mostly
;
54 static const struct tcf_ext_map flow_ext_map
= {
55 .action
= TCA_FLOW_ACT
,
56 .police
= TCA_FLOW_POLICE
,
59 static inline u32
addr_fold(void *addr
)
61 unsigned long a
= (unsigned long)addr
;
63 return (a
& 0xFFFFFFFF) ^ (BITS_PER_LONG
> 32 ? a
>> 32 : 0);
66 static u32
flow_get_src(const struct sk_buff
*skb
)
68 switch (skb
->protocol
) {
69 case __constant_htons(ETH_P_IP
):
70 return ntohl(ip_hdr(skb
)->saddr
);
71 case __constant_htons(ETH_P_IPV6
):
72 return ntohl(ipv6_hdr(skb
)->saddr
.s6_addr32
[3]);
74 return addr_fold(skb
->sk
);
78 static u32
flow_get_dst(const struct sk_buff
*skb
)
80 switch (skb
->protocol
) {
81 case __constant_htons(ETH_P_IP
):
82 return ntohl(ip_hdr(skb
)->daddr
);
83 case __constant_htons(ETH_P_IPV6
):
84 return ntohl(ipv6_hdr(skb
)->daddr
.s6_addr32
[3]);
86 return addr_fold(skb
->dst
) ^ (__force u16
)skb
->protocol
;
90 static u32
flow_get_proto(const struct sk_buff
*skb
)
92 switch (skb
->protocol
) {
93 case __constant_htons(ETH_P_IP
):
94 return ip_hdr(skb
)->protocol
;
95 case __constant_htons(ETH_P_IPV6
):
96 return ipv6_hdr(skb
)->nexthdr
;
102 static int has_ports(u8 protocol
)
107 case IPPROTO_UDPLITE
:
117 static u32
flow_get_proto_src(const struct sk_buff
*skb
)
121 switch (skb
->protocol
) {
122 case __constant_htons(ETH_P_IP
): {
123 struct iphdr
*iph
= ip_hdr(skb
);
125 if (!(iph
->frag_off
&htons(IP_MF
|IP_OFFSET
)) &&
126 has_ports(iph
->protocol
))
127 res
= ntohs(*(__be16
*)((void *)iph
+ iph
->ihl
* 4));
130 case __constant_htons(ETH_P_IPV6
): {
131 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
133 if (has_ports(iph
->nexthdr
))
134 res
= ntohs(*(__be16
*)&iph
[1]);
138 res
= addr_fold(skb
->sk
);
144 static u32
flow_get_proto_dst(const struct sk_buff
*skb
)
148 switch (skb
->protocol
) {
149 case __constant_htons(ETH_P_IP
): {
150 struct iphdr
*iph
= ip_hdr(skb
);
152 if (!(iph
->frag_off
&htons(IP_MF
|IP_OFFSET
)) &&
153 has_ports(iph
->protocol
))
154 res
= ntohs(*(__be16
*)((void *)iph
+ iph
->ihl
* 4 + 2));
157 case __constant_htons(ETH_P_IPV6
): {
158 struct ipv6hdr
*iph
= ipv6_hdr(skb
);
160 if (has_ports(iph
->nexthdr
))
161 res
= ntohs(*(__be16
*)((void *)&iph
[1] + 2));
165 res
= addr_fold(skb
->dst
) ^ (__force u16
)skb
->protocol
;
171 static u32
flow_get_iif(const struct sk_buff
*skb
)
176 static u32
flow_get_priority(const struct sk_buff
*skb
)
178 return skb
->priority
;
181 static u32
flow_get_mark(const struct sk_buff
*skb
)
186 static u32
flow_get_nfct(const struct sk_buff
*skb
)
188 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
189 return addr_fold(skb
->nfct
);
195 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
196 #define CTTUPLE(skb, member) \
198 enum ip_conntrack_info ctinfo; \
199 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
202 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
205 #define CTTUPLE(skb, member) \
212 static u32
flow_get_nfct_src(const struct sk_buff
*skb
)
214 switch (skb
->protocol
) {
215 case __constant_htons(ETH_P_IP
):
216 return ntohl(CTTUPLE(skb
, src
.u3
.ip
));
217 case __constant_htons(ETH_P_IPV6
):
218 return ntohl(CTTUPLE(skb
, src
.u3
.ip6
[3]));
221 return flow_get_src(skb
);
224 static u32
flow_get_nfct_dst(const struct sk_buff
*skb
)
226 switch (skb
->protocol
) {
227 case __constant_htons(ETH_P_IP
):
228 return ntohl(CTTUPLE(skb
, dst
.u3
.ip
));
229 case __constant_htons(ETH_P_IPV6
):
230 return ntohl(CTTUPLE(skb
, dst
.u3
.ip6
[3]));
233 return flow_get_dst(skb
);
236 static u32
flow_get_nfct_proto_src(const struct sk_buff
*skb
)
238 return ntohs(CTTUPLE(skb
, src
.u
.all
));
240 return flow_get_proto_src(skb
);
243 static u32
flow_get_nfct_proto_dst(const struct sk_buff
*skb
)
245 return ntohs(CTTUPLE(skb
, dst
.u
.all
));
247 return flow_get_proto_dst(skb
);
250 static u32
flow_get_rtclassid(const struct sk_buff
*skb
)
252 #ifdef CONFIG_NET_CLS_ROUTE
254 return skb
->dst
->tclassid
;
259 static u32
flow_get_skuid(const struct sk_buff
*skb
)
261 if (skb
->sk
&& skb
->sk
->sk_socket
&& skb
->sk
->sk_socket
->file
)
262 return skb
->sk
->sk_socket
->file
->f_uid
;
266 static u32
flow_get_skgid(const struct sk_buff
*skb
)
268 if (skb
->sk
&& skb
->sk
->sk_socket
&& skb
->sk
->sk_socket
->file
)
269 return skb
->sk
->sk_socket
->file
->f_gid
;
273 static u32
flow_key_get(const struct sk_buff
*skb
, int key
)
277 return flow_get_src(skb
);
279 return flow_get_dst(skb
);
281 return flow_get_proto(skb
);
282 case FLOW_KEY_PROTO_SRC
:
283 return flow_get_proto_src(skb
);
284 case FLOW_KEY_PROTO_DST
:
285 return flow_get_proto_dst(skb
);
287 return flow_get_iif(skb
);
288 case FLOW_KEY_PRIORITY
:
289 return flow_get_priority(skb
);
291 return flow_get_mark(skb
);
293 return flow_get_nfct(skb
);
294 case FLOW_KEY_NFCT_SRC
:
295 return flow_get_nfct_src(skb
);
296 case FLOW_KEY_NFCT_DST
:
297 return flow_get_nfct_dst(skb
);
298 case FLOW_KEY_NFCT_PROTO_SRC
:
299 return flow_get_nfct_proto_src(skb
);
300 case FLOW_KEY_NFCT_PROTO_DST
:
301 return flow_get_nfct_proto_dst(skb
);
302 case FLOW_KEY_RTCLASSID
:
303 return flow_get_rtclassid(skb
);
305 return flow_get_skuid(skb
);
307 return flow_get_skgid(skb
);
314 static int flow_classify(struct sk_buff
*skb
, struct tcf_proto
*tp
,
315 struct tcf_result
*res
)
317 struct flow_head
*head
= tp
->root
;
318 struct flow_filter
*f
;
324 list_for_each_entry(f
, &head
->filters
, list
) {
327 if (!tcf_em_tree_match(skb
, &f
->ematches
, NULL
))
330 keymask
= f
->keymask
;
332 for (n
= 0; n
< f
->nkeys
; n
++) {
333 key
= ffs(keymask
) - 1;
334 keymask
&= ~(1 << key
);
335 keys
[n
] = flow_key_get(skb
, key
);
338 if (f
->mode
== FLOW_MODE_HASH
)
339 classid
= jhash2(keys
, f
->nkeys
, flow_hashrnd
);
342 classid
= (classid
& f
->mask
) ^ f
->xor;
343 classid
= (classid
>> f
->rshift
) + f
->addend
;
347 classid
%= f
->divisor
;
350 res
->classid
= TC_H_MAKE(f
->baseclass
, f
->baseclass
+ classid
);
352 r
= tcf_exts_exec(skb
, &f
->exts
, res
);
360 static const struct nla_policy flow_policy
[TCA_FLOW_MAX
+ 1] = {
361 [TCA_FLOW_KEYS
] = { .type
= NLA_U32
},
362 [TCA_FLOW_MODE
] = { .type
= NLA_U32
},
363 [TCA_FLOW_BASECLASS
] = { .type
= NLA_U32
},
364 [TCA_FLOW_RSHIFT
] = { .type
= NLA_U32
},
365 [TCA_FLOW_ADDEND
] = { .type
= NLA_U32
},
366 [TCA_FLOW_MASK
] = { .type
= NLA_U32
},
367 [TCA_FLOW_XOR
] = { .type
= NLA_U32
},
368 [TCA_FLOW_DIVISOR
] = { .type
= NLA_U32
},
369 [TCA_FLOW_ACT
] = { .type
= NLA_NESTED
},
370 [TCA_FLOW_POLICE
] = { .type
= NLA_NESTED
},
371 [TCA_FLOW_EMATCHES
] = { .type
= NLA_NESTED
},
374 static int flow_change(struct tcf_proto
*tp
, unsigned long base
,
375 u32 handle
, struct nlattr
**tca
,
378 struct flow_head
*head
= tp
->root
;
379 struct flow_filter
*f
;
380 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
381 struct nlattr
*tb
[TCA_FLOW_MAX
+ 1];
383 struct tcf_ematch_tree t
;
384 unsigned int nkeys
= 0;
393 err
= nla_parse_nested(tb
, TCA_FLOW_MAX
, opt
, flow_policy
);
397 if (tb
[TCA_FLOW_BASECLASS
]) {
398 baseclass
= nla_get_u32(tb
[TCA_FLOW_BASECLASS
]);
399 if (TC_H_MIN(baseclass
) == 0)
403 if (tb
[TCA_FLOW_KEYS
]) {
404 keymask
= nla_get_u32(tb
[TCA_FLOW_KEYS
]);
405 if (fls(keymask
) - 1 > FLOW_KEY_MAX
)
408 nkeys
= hweight32(keymask
);
413 err
= tcf_exts_validate(tp
, tb
, tca
[TCA_RATE
], &e
, &flow_ext_map
);
417 err
= tcf_em_tree_validate(tp
, tb
[TCA_FLOW_EMATCHES
], &t
);
421 f
= (struct flow_filter
*)*arg
;
424 if (f
->handle
!= handle
&& handle
)
428 if (tb
[TCA_FLOW_MODE
])
429 mode
= nla_get_u32(tb
[TCA_FLOW_MODE
]);
430 if (mode
!= FLOW_MODE_HASH
&& nkeys
> 1)
436 if (!tb
[TCA_FLOW_KEYS
])
439 mode
= FLOW_MODE_MAP
;
440 if (tb
[TCA_FLOW_MODE
])
441 mode
= nla_get_u32(tb
[TCA_FLOW_MODE
]);
442 if (mode
!= FLOW_MODE_HASH
&& nkeys
> 1)
445 if (TC_H_MAJ(baseclass
) == 0)
446 baseclass
= TC_H_MAKE(tp
->q
->handle
, baseclass
);
447 if (TC_H_MIN(baseclass
) == 0)
448 baseclass
= TC_H_MAKE(baseclass
, 1);
451 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
459 tcf_exts_change(tp
, &f
->exts
, &e
);
460 tcf_em_tree_change(tp
, &f
->ematches
, &t
);
464 if (tb
[TCA_FLOW_KEYS
]) {
465 f
->keymask
= keymask
;
471 if (tb
[TCA_FLOW_MASK
])
472 f
->mask
= nla_get_u32(tb
[TCA_FLOW_MASK
]);
473 if (tb
[TCA_FLOW_XOR
])
474 f
->xor = nla_get_u32(tb
[TCA_FLOW_XOR
]);
475 if (tb
[TCA_FLOW_RSHIFT
])
476 f
->rshift
= nla_get_u32(tb
[TCA_FLOW_RSHIFT
]);
477 if (tb
[TCA_FLOW_ADDEND
])
478 f
->addend
= nla_get_u32(tb
[TCA_FLOW_ADDEND
]);
480 if (tb
[TCA_FLOW_DIVISOR
])
481 f
->divisor
= nla_get_u32(tb
[TCA_FLOW_DIVISOR
]);
483 f
->baseclass
= baseclass
;
486 list_add_tail(&f
->list
, &head
->filters
);
490 *arg
= (unsigned long)f
;
494 tcf_em_tree_destroy(tp
, &t
);
496 tcf_exts_destroy(tp
, &e
);
500 static void flow_destroy_filter(struct tcf_proto
*tp
, struct flow_filter
*f
)
502 tcf_exts_destroy(tp
, &f
->exts
);
503 tcf_em_tree_destroy(tp
, &f
->ematches
);
507 static int flow_delete(struct tcf_proto
*tp
, unsigned long arg
)
509 struct flow_filter
*f
= (struct flow_filter
*)arg
;
514 flow_destroy_filter(tp
, f
);
518 static int flow_init(struct tcf_proto
*tp
)
520 struct flow_head
*head
;
522 if (!flow_hashrnd_initted
) {
523 get_random_bytes(&flow_hashrnd
, 4);
524 flow_hashrnd_initted
= 1;
527 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
530 INIT_LIST_HEAD(&head
->filters
);
535 static void flow_destroy(struct tcf_proto
*tp
)
537 struct flow_head
*head
= tp
->root
;
538 struct flow_filter
*f
, *next
;
540 list_for_each_entry_safe(f
, next
, &head
->filters
, list
) {
542 flow_destroy_filter(tp
, f
);
547 static unsigned long flow_get(struct tcf_proto
*tp
, u32 handle
)
549 struct flow_head
*head
= tp
->root
;
550 struct flow_filter
*f
;
552 list_for_each_entry(f
, &head
->filters
, list
)
553 if (f
->handle
== handle
)
554 return (unsigned long)f
;
558 static void flow_put(struct tcf_proto
*tp
, unsigned long f
)
563 static int flow_dump(struct tcf_proto
*tp
, unsigned long fh
,
564 struct sk_buff
*skb
, struct tcmsg
*t
)
566 struct flow_filter
*f
= (struct flow_filter
*)fh
;
572 t
->tcm_handle
= f
->handle
;
574 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
576 goto nla_put_failure
;
578 NLA_PUT_U32(skb
, TCA_FLOW_KEYS
, f
->keymask
);
579 NLA_PUT_U32(skb
, TCA_FLOW_MODE
, f
->mode
);
581 if (f
->mask
!= ~0 || f
->xor != 0) {
582 NLA_PUT_U32(skb
, TCA_FLOW_MASK
, f
->mask
);
583 NLA_PUT_U32(skb
, TCA_FLOW_XOR
, f
->xor);
586 NLA_PUT_U32(skb
, TCA_FLOW_RSHIFT
, f
->rshift
);
588 NLA_PUT_U32(skb
, TCA_FLOW_ADDEND
, f
->addend
);
591 NLA_PUT_U32(skb
, TCA_FLOW_DIVISOR
, f
->divisor
);
593 NLA_PUT_U32(skb
, TCA_FLOW_BASECLASS
, f
->baseclass
);
595 if (tcf_exts_dump(skb
, &f
->exts
, &flow_ext_map
) < 0)
596 goto nla_put_failure
;
598 if (f
->ematches
.hdr
.nmatches
&&
599 tcf_em_tree_dump(skb
, &f
->ematches
, TCA_FLOW_EMATCHES
) < 0)
600 goto nla_put_failure
;
602 nla_nest_end(skb
, nest
);
604 if (tcf_exts_dump_stats(skb
, &f
->exts
, &flow_ext_map
) < 0)
605 goto nla_put_failure
;
610 nlmsg_trim(skb
, nest
);
614 static void flow_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
616 struct flow_head
*head
= tp
->root
;
617 struct flow_filter
*f
;
619 list_for_each_entry(f
, &head
->filters
, list
) {
620 if (arg
->count
< arg
->skip
)
622 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
631 static struct tcf_proto_ops cls_flow_ops __read_mostly
= {
633 .classify
= flow_classify
,
635 .destroy
= flow_destroy
,
636 .change
= flow_change
,
637 .delete = flow_delete
,
642 .owner
= THIS_MODULE
,
645 static int __init
cls_flow_init(void)
647 return register_tcf_proto_ops(&cls_flow_ops
);
650 static void __exit
cls_flow_exit(void)
652 unregister_tcf_proto_ops(&cls_flow_ops
);
655 module_init(cls_flow_init
);
656 module_exit(cls_flow_exit
);
658 MODULE_LICENSE("GPL");
659 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
660 MODULE_DESCRIPTION("TC flow classifier");