2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
20 #include <linux/idr.h>
22 #include <net/rtnetlink.h>
23 #include <net/pkt_cls.h>
26 MODULE_LICENSE("GPL");
27 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
28 MODULE_DESCRIPTION("TC BPF based classifier");
30 #define CLS_BPF_NAME_LEN 256
31 #define CLS_BPF_SUPPORTED_GEN_FLAGS \
32 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
35 struct list_head plist
;
36 struct idr handle_idr
;
41 struct bpf_prog
*filter
;
42 struct list_head link
;
43 struct tcf_result res
;
49 struct sock_filter
*bpf_ops
;
53 struct work_struct work
;
58 static const struct nla_policy bpf_policy
[TCA_BPF_MAX
+ 1] = {
59 [TCA_BPF_CLASSID
] = { .type
= NLA_U32
},
60 [TCA_BPF_FLAGS
] = { .type
= NLA_U32
},
61 [TCA_BPF_FLAGS_GEN
] = { .type
= NLA_U32
},
62 [TCA_BPF_FD
] = { .type
= NLA_U32
},
63 [TCA_BPF_NAME
] = { .type
= NLA_NUL_STRING
,
64 .len
= CLS_BPF_NAME_LEN
},
65 [TCA_BPF_OPS_LEN
] = { .type
= NLA_U16
},
66 [TCA_BPF_OPS
] = { .type
= NLA_BINARY
,
67 .len
= sizeof(struct sock_filter
) * BPF_MAXINSNS
},
70 static int cls_bpf_exec_opcode(int code
)
85 static int cls_bpf_classify(struct sk_buff
*skb
, const struct tcf_proto
*tp
,
86 struct tcf_result
*res
)
88 struct cls_bpf_head
*head
= rcu_dereference_bh(tp
->root
);
89 bool at_ingress
= skb_at_tc_ingress(skb
);
90 struct cls_bpf_prog
*prog
;
93 /* Needed here for accessing maps. */
95 list_for_each_entry_rcu(prog
, &head
->plist
, link
) {
98 qdisc_skb_cb(skb
)->tc_classid
= prog
->res
.classid
;
100 if (tc_skip_sw(prog
->gen_flags
)) {
101 filter_res
= prog
->exts_integrated
? TC_ACT_UNSPEC
: 0;
102 } else if (at_ingress
) {
103 /* It is safe to push/pull even if skb_shared() */
104 __skb_push(skb
, skb
->mac_len
);
105 bpf_compute_data_pointers(skb
);
106 filter_res
= BPF_PROG_RUN(prog
->filter
, skb
);
107 __skb_pull(skb
, skb
->mac_len
);
109 bpf_compute_data_pointers(skb
);
110 filter_res
= BPF_PROG_RUN(prog
->filter
, skb
);
113 if (prog
->exts_integrated
) {
115 res
->classid
= TC_H_MAJ(prog
->res
.classid
) |
116 qdisc_skb_cb(skb
)->tc_classid
;
118 ret
= cls_bpf_exec_opcode(filter_res
);
119 if (ret
== TC_ACT_UNSPEC
)
126 if (filter_res
!= -1) {
128 res
->classid
= filter_res
;
133 ret
= tcf_exts_exec(skb
, &prog
->exts
, res
);
144 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog
*prog
)
146 return !prog
->bpf_ops
;
149 static int cls_bpf_offload_cmd(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
,
150 struct cls_bpf_prog
*oldprog
,
151 struct netlink_ext_ack
*extack
)
153 struct tcf_block
*block
= tp
->chain
->block
;
154 struct tc_cls_bpf_offload cls_bpf
= {};
155 struct cls_bpf_prog
*obj
;
159 skip_sw
= prog
&& tc_skip_sw(prog
->gen_flags
);
160 obj
= prog
?: oldprog
;
162 tc_cls_common_offload_init(&cls_bpf
.common
, tp
, obj
->gen_flags
,
164 cls_bpf
.command
= TC_CLSBPF_OFFLOAD
;
165 cls_bpf
.exts
= &obj
->exts
;
166 cls_bpf
.prog
= prog
? prog
->filter
: NULL
;
167 cls_bpf
.oldprog
= oldprog
? oldprog
->filter
: NULL
;
168 cls_bpf
.name
= obj
->bpf_name
;
169 cls_bpf
.exts_integrated
= obj
->exts_integrated
;
172 tcf_block_offload_dec(block
, &oldprog
->gen_flags
);
174 err
= tc_setup_cb_call(block
, NULL
, TC_SETUP_CLSBPF
, &cls_bpf
, skip_sw
);
177 cls_bpf_offload_cmd(tp
, oldprog
, prog
, extack
);
179 } else if (err
> 0) {
180 tcf_block_offload_inc(block
, &prog
->gen_flags
);
184 if (prog
&& skip_sw
&& !(prog
->gen_flags
& TCA_CLS_FLAGS_IN_HW
))
190 static u32
cls_bpf_flags(u32 flags
)
192 return flags
& CLS_BPF_SUPPORTED_GEN_FLAGS
;
195 static int cls_bpf_offload(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
,
196 struct cls_bpf_prog
*oldprog
,
197 struct netlink_ext_ack
*extack
)
199 if (prog
&& oldprog
&&
200 cls_bpf_flags(prog
->gen_flags
) !=
201 cls_bpf_flags(oldprog
->gen_flags
))
204 if (prog
&& tc_skip_hw(prog
->gen_flags
))
206 if (oldprog
&& tc_skip_hw(oldprog
->gen_flags
))
208 if (!prog
&& !oldprog
)
211 return cls_bpf_offload_cmd(tp
, prog
, oldprog
, extack
);
214 static void cls_bpf_stop_offload(struct tcf_proto
*tp
,
215 struct cls_bpf_prog
*prog
,
216 struct netlink_ext_ack
*extack
)
220 err
= cls_bpf_offload_cmd(tp
, NULL
, prog
, extack
);
222 pr_err("Stopping hardware offload failed: %d\n", err
);
225 static void cls_bpf_offload_update_stats(struct tcf_proto
*tp
,
226 struct cls_bpf_prog
*prog
)
228 struct tcf_block
*block
= tp
->chain
->block
;
229 struct tc_cls_bpf_offload cls_bpf
= {};
231 tc_cls_common_offload_init(&cls_bpf
.common
, tp
, prog
->gen_flags
, NULL
);
232 cls_bpf
.command
= TC_CLSBPF_STATS
;
233 cls_bpf
.exts
= &prog
->exts
;
234 cls_bpf
.prog
= prog
->filter
;
235 cls_bpf
.name
= prog
->bpf_name
;
236 cls_bpf
.exts_integrated
= prog
->exts_integrated
;
238 tc_setup_cb_call(block
, NULL
, TC_SETUP_CLSBPF
, &cls_bpf
, false);
241 static int cls_bpf_init(struct tcf_proto
*tp
)
243 struct cls_bpf_head
*head
;
245 head
= kzalloc(sizeof(*head
), GFP_KERNEL
);
249 INIT_LIST_HEAD_RCU(&head
->plist
);
250 idr_init(&head
->handle_idr
);
251 rcu_assign_pointer(tp
->root
, head
);
256 static void cls_bpf_free_parms(struct cls_bpf_prog
*prog
)
258 if (cls_bpf_is_ebpf(prog
))
259 bpf_prog_put(prog
->filter
);
261 bpf_prog_destroy(prog
->filter
);
263 kfree(prog
->bpf_name
);
264 kfree(prog
->bpf_ops
);
267 static void __cls_bpf_delete_prog(struct cls_bpf_prog
*prog
)
269 tcf_exts_destroy(&prog
->exts
);
270 tcf_exts_put_net(&prog
->exts
);
272 cls_bpf_free_parms(prog
);
276 static void cls_bpf_delete_prog_work(struct work_struct
*work
)
278 struct cls_bpf_prog
*prog
= container_of(work
, struct cls_bpf_prog
, work
);
281 __cls_bpf_delete_prog(prog
);
285 static void cls_bpf_delete_prog_rcu(struct rcu_head
*rcu
)
287 struct cls_bpf_prog
*prog
= container_of(rcu
, struct cls_bpf_prog
, rcu
);
289 INIT_WORK(&prog
->work
, cls_bpf_delete_prog_work
);
290 tcf_queue_work(&prog
->work
);
293 static void __cls_bpf_delete(struct tcf_proto
*tp
, struct cls_bpf_prog
*prog
,
294 struct netlink_ext_ack
*extack
)
296 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
298 idr_remove(&head
->handle_idr
, prog
->handle
);
299 cls_bpf_stop_offload(tp
, prog
, extack
);
300 list_del_rcu(&prog
->link
);
301 tcf_unbind_filter(tp
, &prog
->res
);
302 if (tcf_exts_get_net(&prog
->exts
))
303 call_rcu(&prog
->rcu
, cls_bpf_delete_prog_rcu
);
305 __cls_bpf_delete_prog(prog
);
308 static int cls_bpf_delete(struct tcf_proto
*tp
, void *arg
, bool *last
,
309 struct netlink_ext_ack
*extack
)
311 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
313 __cls_bpf_delete(tp
, arg
, extack
);
314 *last
= list_empty(&head
->plist
);
318 static void cls_bpf_destroy(struct tcf_proto
*tp
,
319 struct netlink_ext_ack
*extack
)
321 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
322 struct cls_bpf_prog
*prog
, *tmp
;
324 list_for_each_entry_safe(prog
, tmp
, &head
->plist
, link
)
325 __cls_bpf_delete(tp
, prog
, extack
);
327 idr_destroy(&head
->handle_idr
);
328 kfree_rcu(head
, rcu
);
331 static void *cls_bpf_get(struct tcf_proto
*tp
, u32 handle
)
333 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
334 struct cls_bpf_prog
*prog
;
336 list_for_each_entry(prog
, &head
->plist
, link
) {
337 if (prog
->handle
== handle
)
344 static int cls_bpf_prog_from_ops(struct nlattr
**tb
, struct cls_bpf_prog
*prog
)
346 struct sock_filter
*bpf_ops
;
347 struct sock_fprog_kern fprog_tmp
;
349 u16 bpf_size
, bpf_num_ops
;
352 bpf_num_ops
= nla_get_u16(tb
[TCA_BPF_OPS_LEN
]);
353 if (bpf_num_ops
> BPF_MAXINSNS
|| bpf_num_ops
== 0)
356 bpf_size
= bpf_num_ops
* sizeof(*bpf_ops
);
357 if (bpf_size
!= nla_len(tb
[TCA_BPF_OPS
]))
360 bpf_ops
= kzalloc(bpf_size
, GFP_KERNEL
);
364 memcpy(bpf_ops
, nla_data(tb
[TCA_BPF_OPS
]), bpf_size
);
366 fprog_tmp
.len
= bpf_num_ops
;
367 fprog_tmp
.filter
= bpf_ops
;
369 ret
= bpf_prog_create(&fp
, &fprog_tmp
);
375 prog
->bpf_ops
= bpf_ops
;
376 prog
->bpf_num_ops
= bpf_num_ops
;
377 prog
->bpf_name
= NULL
;
383 static int cls_bpf_prog_from_efd(struct nlattr
**tb
, struct cls_bpf_prog
*prog
,
384 u32 gen_flags
, const struct tcf_proto
*tp
)
391 bpf_fd
= nla_get_u32(tb
[TCA_BPF_FD
]);
392 skip_sw
= gen_flags
& TCA_CLS_FLAGS_SKIP_SW
;
394 fp
= bpf_prog_get_type_dev(bpf_fd
, BPF_PROG_TYPE_SCHED_CLS
, skip_sw
);
398 if (tb
[TCA_BPF_NAME
]) {
399 name
= nla_memdup(tb
[TCA_BPF_NAME
], GFP_KERNEL
);
406 prog
->bpf_ops
= NULL
;
407 prog
->bpf_name
= name
;
411 tcf_block_netif_keep_dst(tp
->chain
->block
);
416 static int cls_bpf_set_parms(struct net
*net
, struct tcf_proto
*tp
,
417 struct cls_bpf_prog
*prog
, unsigned long base
,
418 struct nlattr
**tb
, struct nlattr
*est
, bool ovr
,
419 struct netlink_ext_ack
*extack
)
421 bool is_bpf
, is_ebpf
, have_exts
= false;
425 is_bpf
= tb
[TCA_BPF_OPS_LEN
] && tb
[TCA_BPF_OPS
];
426 is_ebpf
= tb
[TCA_BPF_FD
];
427 if ((!is_bpf
&& !is_ebpf
) || (is_bpf
&& is_ebpf
))
430 ret
= tcf_exts_validate(net
, tp
, tb
, est
, &prog
->exts
, ovr
, extack
);
434 if (tb
[TCA_BPF_FLAGS
]) {
435 u32 bpf_flags
= nla_get_u32(tb
[TCA_BPF_FLAGS
]);
437 if (bpf_flags
& ~TCA_BPF_FLAG_ACT_DIRECT
)
440 have_exts
= bpf_flags
& TCA_BPF_FLAG_ACT_DIRECT
;
442 if (tb
[TCA_BPF_FLAGS_GEN
]) {
443 gen_flags
= nla_get_u32(tb
[TCA_BPF_FLAGS_GEN
]);
444 if (gen_flags
& ~CLS_BPF_SUPPORTED_GEN_FLAGS
||
445 !tc_flags_valid(gen_flags
))
449 prog
->exts_integrated
= have_exts
;
450 prog
->gen_flags
= gen_flags
;
452 ret
= is_bpf
? cls_bpf_prog_from_ops(tb
, prog
) :
453 cls_bpf_prog_from_efd(tb
, prog
, gen_flags
, tp
);
457 if (tb
[TCA_BPF_CLASSID
]) {
458 prog
->res
.classid
= nla_get_u32(tb
[TCA_BPF_CLASSID
]);
459 tcf_bind_filter(tp
, &prog
->res
, base
);
465 static int cls_bpf_change(struct net
*net
, struct sk_buff
*in_skb
,
466 struct tcf_proto
*tp
, unsigned long base
,
467 u32 handle
, struct nlattr
**tca
,
468 void **arg
, bool ovr
, struct netlink_ext_ack
*extack
)
470 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
471 struct cls_bpf_prog
*oldprog
= *arg
;
472 struct nlattr
*tb
[TCA_BPF_MAX
+ 1];
473 struct cls_bpf_prog
*prog
;
476 if (tca
[TCA_OPTIONS
] == NULL
)
479 ret
= nla_parse_nested(tb
, TCA_BPF_MAX
, tca
[TCA_OPTIONS
], bpf_policy
,
484 prog
= kzalloc(sizeof(*prog
), GFP_KERNEL
);
488 ret
= tcf_exts_init(&prog
->exts
, TCA_BPF_ACT
, TCA_BPF_POLICE
);
493 if (handle
&& oldprog
->handle
!= handle
) {
501 ret
= idr_alloc_u32(&head
->handle_idr
, prog
, &handle
,
502 INT_MAX
, GFP_KERNEL
);
503 } else if (!oldprog
) {
504 ret
= idr_alloc_u32(&head
->handle_idr
, prog
, &handle
,
510 prog
->handle
= handle
;
512 ret
= cls_bpf_set_parms(net
, tp
, prog
, base
, tb
, tca
[TCA_RATE
], ovr
,
517 ret
= cls_bpf_offload(tp
, prog
, oldprog
, extack
);
521 if (!tc_in_hw(prog
->gen_flags
))
522 prog
->gen_flags
|= TCA_CLS_FLAGS_NOT_IN_HW
;
525 idr_replace(&head
->handle_idr
, prog
, handle
);
526 list_replace_rcu(&oldprog
->link
, &prog
->link
);
527 tcf_unbind_filter(tp
, &oldprog
->res
);
528 tcf_exts_get_net(&oldprog
->exts
);
529 call_rcu(&oldprog
->rcu
, cls_bpf_delete_prog_rcu
);
531 list_add_rcu(&prog
->link
, &head
->plist
);
538 cls_bpf_free_parms(prog
);
541 idr_remove(&head
->handle_idr
, prog
->handle
);
543 tcf_exts_destroy(&prog
->exts
);
548 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog
*prog
,
553 if (nla_put_u16(skb
, TCA_BPF_OPS_LEN
, prog
->bpf_num_ops
))
556 nla
= nla_reserve(skb
, TCA_BPF_OPS
, prog
->bpf_num_ops
*
557 sizeof(struct sock_filter
));
561 memcpy(nla_data(nla
), prog
->bpf_ops
, nla_len(nla
));
566 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog
*prog
,
571 if (prog
->bpf_name
&&
572 nla_put_string(skb
, TCA_BPF_NAME
, prog
->bpf_name
))
575 if (nla_put_u32(skb
, TCA_BPF_ID
, prog
->filter
->aux
->id
))
578 nla
= nla_reserve(skb
, TCA_BPF_TAG
, sizeof(prog
->filter
->tag
));
582 memcpy(nla_data(nla
), prog
->filter
->tag
, nla_len(nla
));
587 static int cls_bpf_dump(struct net
*net
, struct tcf_proto
*tp
, void *fh
,
588 struct sk_buff
*skb
, struct tcmsg
*tm
)
590 struct cls_bpf_prog
*prog
= fh
;
598 tm
->tcm_handle
= prog
->handle
;
600 cls_bpf_offload_update_stats(tp
, prog
);
602 nest
= nla_nest_start(skb
, TCA_OPTIONS
);
604 goto nla_put_failure
;
606 if (prog
->res
.classid
&&
607 nla_put_u32(skb
, TCA_BPF_CLASSID
, prog
->res
.classid
))
608 goto nla_put_failure
;
610 if (cls_bpf_is_ebpf(prog
))
611 ret
= cls_bpf_dump_ebpf_info(prog
, skb
);
613 ret
= cls_bpf_dump_bpf_info(prog
, skb
);
615 goto nla_put_failure
;
617 if (tcf_exts_dump(skb
, &prog
->exts
) < 0)
618 goto nla_put_failure
;
620 if (prog
->exts_integrated
)
621 bpf_flags
|= TCA_BPF_FLAG_ACT_DIRECT
;
622 if (bpf_flags
&& nla_put_u32(skb
, TCA_BPF_FLAGS
, bpf_flags
))
623 goto nla_put_failure
;
624 if (prog
->gen_flags
&&
625 nla_put_u32(skb
, TCA_BPF_FLAGS_GEN
, prog
->gen_flags
))
626 goto nla_put_failure
;
628 nla_nest_end(skb
, nest
);
630 if (tcf_exts_dump_stats(skb
, &prog
->exts
) < 0)
631 goto nla_put_failure
;
636 nla_nest_cancel(skb
, nest
);
640 static void cls_bpf_bind_class(void *fh
, u32 classid
, unsigned long cl
)
642 struct cls_bpf_prog
*prog
= fh
;
644 if (prog
&& prog
->res
.classid
== classid
)
645 prog
->res
.class = cl
;
648 static void cls_bpf_walk(struct tcf_proto
*tp
, struct tcf_walker
*arg
)
650 struct cls_bpf_head
*head
= rtnl_dereference(tp
->root
);
651 struct cls_bpf_prog
*prog
;
653 list_for_each_entry(prog
, &head
->plist
, link
) {
654 if (arg
->count
< arg
->skip
)
656 if (arg
->fn(tp
, prog
, arg
) < 0) {
665 static struct tcf_proto_ops cls_bpf_ops __read_mostly
= {
667 .owner
= THIS_MODULE
,
668 .classify
= cls_bpf_classify
,
669 .init
= cls_bpf_init
,
670 .destroy
= cls_bpf_destroy
,
672 .change
= cls_bpf_change
,
673 .delete = cls_bpf_delete
,
674 .walk
= cls_bpf_walk
,
675 .dump
= cls_bpf_dump
,
676 .bind_class
= cls_bpf_bind_class
,
679 static int __init
cls_bpf_init_mod(void)
681 return register_tcf_proto_ops(&cls_bpf_ops
);
684 static void __exit
cls_bpf_exit_mod(void)
686 unregister_tcf_proto_ops(&cls_bpf_ops
);
689 module_init(cls_bpf_init_mod
);
690 module_exit(cls_bpf_exit_mod
);