2 * net/sched/cls_flow.c Generic flow classifier
4 * Copyright (c) 2007, 2008 Patrick McHardy <kaber@trash.net>
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version 2
9 * of the License, or (at your option) any later version.
12 #include <linux/kernel.h>
13 #include <linux/init.h>
14 #include <linux/list.h>
15 #include <linux/jhash.h>
16 #include <linux/random.h>
17 #include <linux/pkt_cls.h>
18 #include <linux/skbuff.h>
21 #include <linux/ipv6.h>
22 #include <linux/if_vlan.h>
23 #include <linux/slab.h>
25 #include <net/pkt_cls.h>
27 #include <net/route.h>
28 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
29 #include <net/netfilter/nf_conntrack.h>
33 struct list_head filters
;
37 struct list_head list
;
39 struct tcf_ematch_tree ematches
;
40 struct timer_list perturb_timer
;
56 static const struct tcf_ext_map flow_ext_map
= {
57 .action
= TCA_FLOW_ACT
,
58 .police
= TCA_FLOW_POLICE
,
61 static inline u32
addr_fold(void *addr
)
63 unsigned long a
= (unsigned long)addr
;
65 return (a
& 0xFFFFFFFF) ^ (BITS_PER_LONG
> 32 ? a
>> 32 : 0);
68 static u32
flow_get_src(struct sk_buff
*skb
)
70 switch (skb
->protocol
) {
72 if (pskb_network_may_pull(skb
, sizeof(struct iphdr
)))
73 return ntohl(ip_hdr(skb
)->saddr
);
75 case htons(ETH_P_IPV6
):
76 if (pskb_network_may_pull(skb
, sizeof(struct ipv6hdr
)))
77 return ntohl(ipv6_hdr(skb
)->saddr
.s6_addr32
[3]);
81 return addr_fold(skb
->sk
);
84 static u32
flow_get_dst(struct sk_buff
*skb
)
86 switch (skb
->protocol
) {
88 if (pskb_network_may_pull(skb
, sizeof(struct iphdr
)))
89 return ntohl(ip_hdr(skb
)->daddr
);
91 case htons(ETH_P_IPV6
):
92 if (pskb_network_may_pull(skb
, sizeof(struct ipv6hdr
)))
93 return ntohl(ipv6_hdr(skb
)->daddr
.s6_addr32
[3]);
97 return addr_fold(skb_dst(skb
)) ^ (__force u16
)skb
->protocol
;
100 static u32
flow_get_proto(struct sk_buff
*skb
)
102 switch (skb
->protocol
) {
103 case htons(ETH_P_IP
):
104 return pskb_network_may_pull(skb
, sizeof(struct iphdr
)) ?
105 ip_hdr(skb
)->protocol
: 0;
106 case htons(ETH_P_IPV6
):
107 return pskb_network_may_pull(skb
, sizeof(struct ipv6hdr
)) ?
108 ipv6_hdr(skb
)->nexthdr
: 0;
114 static int has_ports(u8 protocol
)
119 case IPPROTO_UDPLITE
:
129 static u32
flow_get_proto_src(struct sk_buff
*skb
)
131 switch (skb
->protocol
) {
132 case htons(ETH_P_IP
): {
135 if (!pskb_network_may_pull(skb
, sizeof(*iph
)))
138 if (!(iph
->frag_off
&htons(IP_MF
|IP_OFFSET
)) &&
139 has_ports(iph
->protocol
) &&
140 pskb_network_may_pull(skb
, iph
->ihl
* 4 + 2))
141 return ntohs(*(__be16
*)((void *)iph
+ iph
->ihl
* 4));
144 case htons(ETH_P_IPV6
): {
147 if (!pskb_network_may_pull(skb
, sizeof(*iph
) + 2))
150 if (has_ports(iph
->nexthdr
))
151 return ntohs(*(__be16
*)&iph
[1]);
156 return addr_fold(skb
->sk
);
159 static u32
flow_get_proto_dst(struct sk_buff
*skb
)
161 switch (skb
->protocol
) {
162 case htons(ETH_P_IP
): {
165 if (!pskb_network_may_pull(skb
, sizeof(*iph
)))
168 if (!(iph
->frag_off
&htons(IP_MF
|IP_OFFSET
)) &&
169 has_ports(iph
->protocol
) &&
170 pskb_network_may_pull(skb
, iph
->ihl
* 4 + 4))
171 return ntohs(*(__be16
*)((void *)iph
+ iph
->ihl
* 4 + 2));
174 case htons(ETH_P_IPV6
): {
177 if (!pskb_network_may_pull(skb
, sizeof(*iph
) + 4))
180 if (has_ports(iph
->nexthdr
))
181 return ntohs(*(__be16
*)((void *)&iph
[1] + 2));
186 return addr_fold(skb_dst(skb
)) ^ (__force u16
)skb
->protocol
;
189 static u32
flow_get_iif(const struct sk_buff
*skb
)
194 static u32
flow_get_priority(const struct sk_buff
*skb
)
196 return skb
->priority
;
199 static u32
flow_get_mark(const struct sk_buff
*skb
)
204 static u32
flow_get_nfct(const struct sk_buff
*skb
)
206 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
207 return addr_fold(skb
->nfct
);
213 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
214 #define CTTUPLE(skb, member) \
216 enum ip_conntrack_info ctinfo; \
217 struct nf_conn *ct = nf_ct_get(skb, &ctinfo); \
220 ct->tuplehash[CTINFO2DIR(ctinfo)].tuple.member; \
223 #define CTTUPLE(skb, member) \
230 static u32
flow_get_nfct_src(struct sk_buff
*skb
)
232 switch (skb
->protocol
) {
233 case htons(ETH_P_IP
):
234 return ntohl(CTTUPLE(skb
, src
.u3
.ip
));
235 case htons(ETH_P_IPV6
):
236 return ntohl(CTTUPLE(skb
, src
.u3
.ip6
[3]));
239 return flow_get_src(skb
);
242 static u32
flow_get_nfct_dst(struct sk_buff
*skb
)
244 switch (skb
->protocol
) {
245 case htons(ETH_P_IP
):
246 return ntohl(CTTUPLE(skb
, dst
.u3
.ip
));
247 case htons(ETH_P_IPV6
):
248 return ntohl(CTTUPLE(skb
, dst
.u3
.ip6
[3]));
251 return flow_get_dst(skb
);
254 static u32
flow_get_nfct_proto_src(struct sk_buff
*skb
)
256 return ntohs(CTTUPLE(skb
, src
.u
.all
));
258 return flow_get_proto_src(skb
);
261 static u32
flow_get_nfct_proto_dst(struct sk_buff
*skb
)
263 return ntohs(CTTUPLE(skb
, dst
.u
.all
));
265 return flow_get_proto_dst(skb
);
268 static u32
flow_get_rtclassid(const struct sk_buff
*skb
)
270 #ifdef CONFIG_NET_CLS_ROUTE
272 return skb_dst(skb
)->tclassid
;
277 static u32
flow_get_skuid(const struct sk_buff
*skb
)
279 if (skb
->sk
&& skb
->sk
->sk_socket
&& skb
->sk
->sk_socket
->file
)
280 return skb
->sk
->sk_socket
->file
->f_cred
->fsuid
;
284 static u32
flow_get_skgid(const struct sk_buff
*skb
)
286 if (skb
->sk
&& skb
->sk
->sk_socket
&& skb
->sk
->sk_socket
->file
)
287 return skb
->sk
->sk_socket
->file
->f_cred
->fsgid
;
291 static u32
flow_get_vlan_tag(const struct sk_buff
*skb
)
293 u16
uninitialized_var(tag
);
295 if (vlan_get_tag(skb
, &tag
) < 0)
297 return tag
& VLAN_VID_MASK
;
300 static u32
flow_key_get(struct sk_buff
*skb
, int key
)
304 return flow_get_src(skb
);
306 return flow_get_dst(skb
);
308 return flow_get_proto(skb
);
309 case FLOW_KEY_PROTO_SRC
:
310 return flow_get_proto_src(skb
);
311 case FLOW_KEY_PROTO_DST
:
312 return flow_get_proto_dst(skb
);
314 return flow_get_iif(skb
);
315 case FLOW_KEY_PRIORITY
:
316 return flow_get_priority(skb
);
318 return flow_get_mark(skb
);
320 return flow_get_nfct(skb
);
321 case FLOW_KEY_NFCT_SRC
:
322 return flow_get_nfct_src(skb
);
323 case FLOW_KEY_NFCT_DST
:
324 return flow_get_nfct_dst(skb
);
325 case FLOW_KEY_NFCT_PROTO_SRC
:
326 return flow_get_nfct_proto_src(skb
);
327 case FLOW_KEY_NFCT_PROTO_DST
:
328 return flow_get_nfct_proto_dst(skb
);
329 case FLOW_KEY_RTCLASSID
:
330 return flow_get_rtclassid(skb
);
332 return flow_get_skuid(skb
);
334 return flow_get_skgid(skb
);
335 case FLOW_KEY_VLAN_TAG
:
336 return flow_get_vlan_tag(skb
);
343 static int flow_classify(struct sk_buff
*skb
, struct tcf_proto
*tp
,
344 struct tcf_result
*res
)
346 struct flow_head
*head
= tp
->root
;
347 struct flow_filter
*f
;
353 list_for_each_entry(f
, &head
->filters
, list
) {
356 if (!tcf_em_tree_match(skb
, &f
->ematches
, NULL
))
359 keymask
= f
->keymask
;
361 for (n
= 0; n
< f
->nkeys
; n
++) {
362 key
= ffs(keymask
) - 1;
363 keymask
&= ~(1 << key
);
364 keys
[n
] = flow_key_get(skb
, key
);
367 if (f
->mode
== FLOW_MODE_HASH
)
368 classid
= jhash2(keys
, f
->nkeys
, f
->hashrnd
);
371 classid
= (classid
& f
->mask
) ^ f
->xor;
372 classid
= (classid
>> f
->rshift
) + f
->addend
;
376 classid
%= f
->divisor
;
379 res
->classid
= TC_H_MAKE(f
->baseclass
, f
->baseclass
+ classid
);
381 r
= tcf_exts_exec(skb
, &f
->exts
, res
);
389 static void flow_perturbation(unsigned long arg
)
391 struct flow_filter
*f
= (struct flow_filter
*)arg
;
393 get_random_bytes(&f
->hashrnd
, 4);
394 if (f
->perturb_period
)
395 mod_timer(&f
->perturb_timer
, jiffies
+ f
->perturb_period
);
398 static const struct nla_policy flow_policy
[TCA_FLOW_MAX
+ 1] = {
399 [TCA_FLOW_KEYS
] = { .type
= NLA_U32
},
400 [TCA_FLOW_MODE
] = { .type
= NLA_U32
},
401 [TCA_FLOW_BASECLASS
] = { .type
= NLA_U32
},
402 [TCA_FLOW_RSHIFT
] = { .type
= NLA_U32
},
403 [TCA_FLOW_ADDEND
] = { .type
= NLA_U32
},
404 [TCA_FLOW_MASK
] = { .type
= NLA_U32
},
405 [TCA_FLOW_XOR
] = { .type
= NLA_U32
},
406 [TCA_FLOW_DIVISOR
] = { .type
= NLA_U32
},
407 [TCA_FLOW_ACT
] = { .type
= NLA_NESTED
},
408 [TCA_FLOW_POLICE
] = { .type
= NLA_NESTED
},
409 [TCA_FLOW_EMATCHES
] = { .type
= NLA_NESTED
},
410 [TCA_FLOW_PERTURB
] = { .type
= NLA_U32
},
413 static int flow_change(struct tcf_proto
*tp
, unsigned long base
,
414 u32 handle
, struct nlattr
**tca
,
417 struct flow_head
*head
= tp
->root
;
418 struct flow_filter
*f
;
419 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
420 struct nlattr
*tb
[TCA_FLOW_MAX
+ 1];
422 struct tcf_ematch_tree t
;
423 unsigned int nkeys
= 0;
424 unsigned int perturb_period
= 0;
433 err
= nla_parse_nested(tb
, TCA_FLOW_MAX
, opt
, flow_policy
);
437 if (tb
[TCA_FLOW_BASECLASS
]) {
438 baseclass
= nla_get_u32(tb
[TCA_FLOW_BASECLASS
]);
439 if (TC_H_MIN(baseclass
) == 0)
443 if (tb
[TCA_FLOW_KEYS
]) {
444 keymask
= nla_get_u32(tb
[TCA_FLOW_KEYS
]);
446 nkeys
= hweight32(keymask
);
450 if (fls(keymask
) - 1 > FLOW_KEY_MAX
)
454 err
= tcf_exts_validate(tp
, tb
, tca
[TCA_RATE
], &e
, &flow_ext_map
);
458 err
= tcf_em_tree_validate(tp
, tb
[TCA_FLOW_EMATCHES
], &t
);
462 f
= (struct flow_filter
*)*arg
;
465 if (f
->handle
!= handle
&& handle
)
469 if (tb
[TCA_FLOW_MODE
])
470 mode
= nla_get_u32(tb
[TCA_FLOW_MODE
]);
471 if (mode
!= FLOW_MODE_HASH
&& nkeys
> 1)
474 if (mode
== FLOW_MODE_HASH
)
475 perturb_period
= f
->perturb_period
;
476 if (tb
[TCA_FLOW_PERTURB
]) {
477 if (mode
!= FLOW_MODE_HASH
)
479 perturb_period
= nla_get_u32(tb
[TCA_FLOW_PERTURB
]) * HZ
;
485 if (!tb
[TCA_FLOW_KEYS
])
488 mode
= FLOW_MODE_MAP
;
489 if (tb
[TCA_FLOW_MODE
])
490 mode
= nla_get_u32(tb
[TCA_FLOW_MODE
]);
491 if (mode
!= FLOW_MODE_HASH
&& nkeys
> 1)
494 if (tb
[TCA_FLOW_PERTURB
]) {
495 if (mode
!= FLOW_MODE_HASH
)
497 perturb_period
= nla_get_u32(tb
[TCA_FLOW_PERTURB
]) * HZ
;
500 if (TC_H_MAJ(baseclass
) == 0)
501 baseclass
= TC_H_MAKE(tp
->q
->handle
, baseclass
);
502 if (TC_H_MIN(baseclass
) == 0)
503 baseclass
= TC_H_MAKE(baseclass
, 1);
506 f
= kzalloc(sizeof(*f
), GFP_KERNEL
);
513 get_random_bytes(&f
->hashrnd
, 4);
514 f
->perturb_timer
.function
= flow_perturbation
;
515 f
->perturb_timer
.data
= (unsigned long)f
;
516 init_timer_deferrable(&f
->perturb_timer
);
519 tcf_exts_change(tp
, &f
->exts
, &e
);
520 tcf_em_tree_change(tp
, &f
->ematches
, &t
);
524 if (tb
[TCA_FLOW_KEYS
]) {
525 f
->keymask
= keymask
;
531 if (tb
[TCA_FLOW_MASK
])
532 f
->mask
= nla_get_u32(tb
[TCA_FLOW_MASK
]);
533 if (tb
[TCA_FLOW_XOR
])
534 f
->xor = nla_get_u32(tb
[TCA_FLOW_XOR
]);
535 if (tb
[TCA_FLOW_RSHIFT
])
536 f
->rshift
= nla_get_u32(tb
[TCA_FLOW_RSHIFT
]);
537 if (tb
[TCA_FLOW_ADDEND
])
538 f
->addend
= nla_get_u32(tb
[TCA_FLOW_ADDEND
]);
540 if (tb
[TCA_FLOW_DIVISOR
])
541 f
->divisor
= nla_get_u32(tb
[TCA_FLOW_DIVISOR
]);
543 f
->baseclass
= baseclass
;
545 f
->perturb_period
= perturb_period
;
546 del_timer(&f
->perturb_timer
);
548 mod_timer(&f
->perturb_timer
, jiffies
+ perturb_period
);
551 list_add_tail(&f
->list
, &head
->filters
);
555 *arg
= (unsigned long)f
;
559 tcf_em_tree_destroy(tp
, &t
);
561 tcf_exts_destroy(tp
, &e
);
565 static void flow_destroy_filter(struct tcf_proto
*tp
, struct flow_filter
*f
)
567 del_timer_sync(&f
->perturb_timer
);
568 tcf_exts_destroy(tp
, &f
->exts
);
569 tcf_em_tree_destroy(tp
, &f
->ematches
);
573 static int flow_delete(struct tcf_proto
*tp
, unsigned long arg
)
575 struct flow_filter
*f
= (struct flow_filter
*)arg
;
580 flow_destroy_filter(tp
, f
);
584 static int flow_init(struct tcf_proto
*tp
)
586 struct flow_head
*head
;
588 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
591 INIT_LIST_HEAD(&head
->filters
);
596 static void flow_destroy(struct tcf_proto
*tp
)
598 struct flow_head
*head
= tp
->root
;
599 struct flow_filter
*f
, *next
;
601 list_for_each_entry_safe(f
, next
, &head
->filters
, list
) {
603 flow_destroy_filter(tp
, f
);
608 static unsigned long flow_get(struct tcf_proto
*tp
, u32 handle
)
610 struct flow_head
*head
= tp
->root
;
611 struct flow_filter
*f
;
613 list_for_each_entry(f
, &head
->filters
, list
)
614 if (f
->handle
== handle
)
615 return (unsigned long)f
;
619 static void flow_put(struct tcf_proto
*tp
, unsigned long f
)
623 static int flow_dump(struct tcf_proto
*tp
, unsigned long fh
,
624 struct sk_buff
*skb
, struct tcmsg
*t
)
626 struct flow_filter
*f
= (struct flow_filter
*)fh
;
632 t
->tcm_handle
= f
->handle
;
634 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
636 goto nla_put_failure
;
638 NLA_PUT_U32(skb
, TCA_FLOW_KEYS
, f
->keymask
);
639 NLA_PUT_U32(skb
, TCA_FLOW_MODE
, f
->mode
);
641 if (f
->mask
!= ~0 || f
->xor != 0) {
642 NLA_PUT_U32(skb
, TCA_FLOW_MASK
, f
->mask
);
643 NLA_PUT_U32(skb
, TCA_FLOW_XOR
, f
->xor);
646 NLA_PUT_U32(skb
, TCA_FLOW_RSHIFT
, f
->rshift
);
648 NLA_PUT_U32(skb
, TCA_FLOW_ADDEND
, f
->addend
);
651 NLA_PUT_U32(skb
, TCA_FLOW_DIVISOR
, f
->divisor
);
653 NLA_PUT_U32(skb
, TCA_FLOW_BASECLASS
, f
->baseclass
);
655 if (f
->perturb_period
)
656 NLA_PUT_U32(skb
, TCA_FLOW_PERTURB
, f
->perturb_period
/ HZ
);
658 if (tcf_exts_dump(skb
, &f
->exts
, &flow_ext_map
) < 0)
659 goto nla_put_failure
;
660 #ifdef CONFIG_NET_EMATCH
661 if (f
->ematches
.hdr
.nmatches
&&
662 tcf_em_tree_dump(skb
, &f
->ematches
, TCA_FLOW_EMATCHES
) < 0)
663 goto nla_put_failure
;
665 nla_nest_end(skb
, nest
);
667 if (tcf_exts_dump_stats(skb
, &f
->exts
, &flow_ext_map
) < 0)
668 goto nla_put_failure
;
673 nlmsg_trim(skb
, nest
);
677 static void flow_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
679 struct flow_head
*head
= tp
->root
;
680 struct flow_filter
*f
;
682 list_for_each_entry(f
, &head
->filters
, list
) {
683 if (arg
->count
< arg
->skip
)
685 if (arg
->fn(tp
, (unsigned long)f
, arg
) < 0) {
694 static struct tcf_proto_ops cls_flow_ops __read_mostly
= {
696 .classify
= flow_classify
,
698 .destroy
= flow_destroy
,
699 .change
= flow_change
,
700 .delete = flow_delete
,
705 .owner
= THIS_MODULE
,
708 static int __init
cls_flow_init(void)
710 return register_tcf_proto_ops(&cls_flow_ops
);
713 static void __exit
cls_flow_exit(void)
715 unregister_tcf_proto_ops(&cls_flow_ops
);
718 module_init(cls_flow_init
);
719 module_exit(cls_flow_exit
);
721 MODULE_LICENSE("GPL");
722 MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>");
723 MODULE_DESCRIPTION("TC flow classifier");