x86/tracing: Disentangle pagefault and resched IPI tracing key
[linux-stable.git] / net / sched / cls_bpf.c
blobf57bd531ba98e0661768e347a2e0b80d04affe30
1 /*
2 * Berkeley Packet Filter based traffic classifier
4 * Might be used to classify traffic through flexible, user-defined and
5 * possibly JIT-ed BPF filters for traffic control as an alternative to
6 * ematches.
8 * (C) 2013 Daniel Borkmann <dborkman@redhat.com>
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
15 #include <linux/module.h>
16 #include <linux/types.h>
17 #include <linux/skbuff.h>
18 #include <linux/filter.h>
19 #include <linux/bpf.h>
21 #include <net/rtnetlink.h>
22 #include <net/pkt_cls.h>
23 #include <net/sock.h>
25 MODULE_LICENSE("GPL");
26 MODULE_AUTHOR("Daniel Borkmann <dborkman@redhat.com>");
27 MODULE_DESCRIPTION("TC BPF based classifier");
29 #define CLS_BPF_NAME_LEN 256
30 #define CLS_BPF_SUPPORTED_GEN_FLAGS \
31 (TCA_CLS_FLAGS_SKIP_HW | TCA_CLS_FLAGS_SKIP_SW)
33 struct cls_bpf_head {
34 struct list_head plist;
35 u32 hgen;
36 struct rcu_head rcu;
39 struct cls_bpf_prog {
40 struct bpf_prog *filter;
41 struct list_head link;
42 struct tcf_result res;
43 bool exts_integrated;
44 bool offloaded;
45 u32 gen_flags;
46 struct tcf_exts exts;
47 u32 handle;
48 u16 bpf_num_ops;
49 struct sock_filter *bpf_ops;
50 const char *bpf_name;
51 struct tcf_proto *tp;
52 struct rcu_head rcu;
55 static const struct nla_policy bpf_policy[TCA_BPF_MAX + 1] = {
56 [TCA_BPF_CLASSID] = { .type = NLA_U32 },
57 [TCA_BPF_FLAGS] = { .type = NLA_U32 },
58 [TCA_BPF_FLAGS_GEN] = { .type = NLA_U32 },
59 [TCA_BPF_FD] = { .type = NLA_U32 },
60 [TCA_BPF_NAME] = { .type = NLA_NUL_STRING,
61 .len = CLS_BPF_NAME_LEN },
62 [TCA_BPF_OPS_LEN] = { .type = NLA_U16 },
63 [TCA_BPF_OPS] = { .type = NLA_BINARY,
64 .len = sizeof(struct sock_filter) * BPF_MAXINSNS },
67 static int cls_bpf_exec_opcode(int code)
69 switch (code) {
70 case TC_ACT_OK:
71 case TC_ACT_SHOT:
72 case TC_ACT_STOLEN:
73 case TC_ACT_TRAP:
74 case TC_ACT_REDIRECT:
75 case TC_ACT_UNSPEC:
76 return code;
77 default:
78 return TC_ACT_UNSPEC;
82 static int cls_bpf_classify(struct sk_buff *skb, const struct tcf_proto *tp,
83 struct tcf_result *res)
85 struct cls_bpf_head *head = rcu_dereference_bh(tp->root);
86 bool at_ingress = skb_at_tc_ingress(skb);
87 struct cls_bpf_prog *prog;
88 int ret = -1;
90 /* Needed here for accessing maps. */
91 rcu_read_lock();
92 list_for_each_entry_rcu(prog, &head->plist, link) {
93 int filter_res;
95 qdisc_skb_cb(skb)->tc_classid = prog->res.classid;
97 if (tc_skip_sw(prog->gen_flags)) {
98 filter_res = prog->exts_integrated ? TC_ACT_UNSPEC : 0;
99 } else if (at_ingress) {
100 /* It is safe to push/pull even if skb_shared() */
101 __skb_push(skb, skb->mac_len);
102 bpf_compute_data_end(skb);
103 filter_res = BPF_PROG_RUN(prog->filter, skb);
104 __skb_pull(skb, skb->mac_len);
105 } else {
106 bpf_compute_data_end(skb);
107 filter_res = BPF_PROG_RUN(prog->filter, skb);
110 if (prog->exts_integrated) {
111 res->class = 0;
112 res->classid = TC_H_MAJ(prog->res.classid) |
113 qdisc_skb_cb(skb)->tc_classid;
115 ret = cls_bpf_exec_opcode(filter_res);
116 if (ret == TC_ACT_UNSPEC)
117 continue;
118 break;
121 if (filter_res == 0)
122 continue;
123 if (filter_res != -1) {
124 res->class = 0;
125 res->classid = filter_res;
126 } else {
127 *res = prog->res;
130 ret = tcf_exts_exec(skb, &prog->exts, res);
131 if (ret < 0)
132 continue;
134 break;
136 rcu_read_unlock();
138 return ret;
141 static bool cls_bpf_is_ebpf(const struct cls_bpf_prog *prog)
143 return !prog->bpf_ops;
146 static int cls_bpf_offload_cmd(struct tcf_proto *tp, struct cls_bpf_prog *prog,
147 enum tc_clsbpf_command cmd)
149 struct net_device *dev = tp->q->dev_queue->dev;
150 struct tc_cls_bpf_offload bpf_offload = {};
151 struct tc_to_netdev offload;
152 int err;
154 offload.type = TC_SETUP_CLSBPF;
155 offload.cls_bpf = &bpf_offload;
157 bpf_offload.command = cmd;
158 bpf_offload.exts = &prog->exts;
159 bpf_offload.prog = prog->filter;
160 bpf_offload.name = prog->bpf_name;
161 bpf_offload.exts_integrated = prog->exts_integrated;
162 bpf_offload.gen_flags = prog->gen_flags;
164 err = dev->netdev_ops->ndo_setup_tc(dev, tp->q->handle,
165 tp->chain->index,
166 tp->protocol, &offload);
168 if (!err && (cmd == TC_CLSBPF_ADD || cmd == TC_CLSBPF_REPLACE))
169 prog->gen_flags |= TCA_CLS_FLAGS_IN_HW;
171 return err;
174 static int cls_bpf_offload(struct tcf_proto *tp, struct cls_bpf_prog *prog,
175 struct cls_bpf_prog *oldprog)
177 struct net_device *dev = tp->q->dev_queue->dev;
178 struct cls_bpf_prog *obj = prog;
179 enum tc_clsbpf_command cmd;
180 bool skip_sw;
181 int ret;
183 skip_sw = tc_skip_sw(prog->gen_flags) ||
184 (oldprog && tc_skip_sw(oldprog->gen_flags));
186 if (oldprog && oldprog->offloaded) {
187 if (tc_should_offload(dev, tp, prog->gen_flags)) {
188 cmd = TC_CLSBPF_REPLACE;
189 } else if (!tc_skip_sw(prog->gen_flags)) {
190 obj = oldprog;
191 cmd = TC_CLSBPF_DESTROY;
192 } else {
193 return -EINVAL;
195 } else {
196 if (!tc_should_offload(dev, tp, prog->gen_flags))
197 return skip_sw ? -EINVAL : 0;
198 cmd = TC_CLSBPF_ADD;
201 ret = cls_bpf_offload_cmd(tp, obj, cmd);
202 if (ret)
203 return skip_sw ? ret : 0;
205 obj->offloaded = true;
206 if (oldprog)
207 oldprog->offloaded = false;
209 return 0;
212 static void cls_bpf_stop_offload(struct tcf_proto *tp,
213 struct cls_bpf_prog *prog)
215 int err;
217 if (!prog->offloaded)
218 return;
220 err = cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_DESTROY);
221 if (err) {
222 pr_err("Stopping hardware offload failed: %d\n", err);
223 return;
226 prog->offloaded = false;
229 static void cls_bpf_offload_update_stats(struct tcf_proto *tp,
230 struct cls_bpf_prog *prog)
232 if (!prog->offloaded)
233 return;
235 cls_bpf_offload_cmd(tp, prog, TC_CLSBPF_STATS);
238 static int cls_bpf_init(struct tcf_proto *tp)
240 struct cls_bpf_head *head;
242 head = kzalloc(sizeof(*head), GFP_KERNEL);
243 if (head == NULL)
244 return -ENOBUFS;
246 INIT_LIST_HEAD_RCU(&head->plist);
247 rcu_assign_pointer(tp->root, head);
249 return 0;
252 static void __cls_bpf_delete_prog(struct cls_bpf_prog *prog)
254 tcf_exts_destroy(&prog->exts);
256 if (cls_bpf_is_ebpf(prog))
257 bpf_prog_put(prog->filter);
258 else
259 bpf_prog_destroy(prog->filter);
261 kfree(prog->bpf_name);
262 kfree(prog->bpf_ops);
263 kfree(prog);
266 static void cls_bpf_delete_prog_rcu(struct rcu_head *rcu)
268 __cls_bpf_delete_prog(container_of(rcu, struct cls_bpf_prog, rcu));
271 static void __cls_bpf_delete(struct tcf_proto *tp, struct cls_bpf_prog *prog)
273 cls_bpf_stop_offload(tp, prog);
274 list_del_rcu(&prog->link);
275 tcf_unbind_filter(tp, &prog->res);
276 call_rcu(&prog->rcu, cls_bpf_delete_prog_rcu);
279 static int cls_bpf_delete(struct tcf_proto *tp, unsigned long arg, bool *last)
281 struct cls_bpf_head *head = rtnl_dereference(tp->root);
283 __cls_bpf_delete(tp, (struct cls_bpf_prog *) arg);
284 *last = list_empty(&head->plist);
285 return 0;
288 static void cls_bpf_destroy(struct tcf_proto *tp)
290 struct cls_bpf_head *head = rtnl_dereference(tp->root);
291 struct cls_bpf_prog *prog, *tmp;
293 list_for_each_entry_safe(prog, tmp, &head->plist, link)
294 __cls_bpf_delete(tp, prog);
296 kfree_rcu(head, rcu);
299 static unsigned long cls_bpf_get(struct tcf_proto *tp, u32 handle)
301 struct cls_bpf_head *head = rtnl_dereference(tp->root);
302 struct cls_bpf_prog *prog;
303 unsigned long ret = 0UL;
305 list_for_each_entry(prog, &head->plist, link) {
306 if (prog->handle == handle) {
307 ret = (unsigned long) prog;
308 break;
312 return ret;
315 static int cls_bpf_prog_from_ops(struct nlattr **tb, struct cls_bpf_prog *prog)
317 struct sock_filter *bpf_ops;
318 struct sock_fprog_kern fprog_tmp;
319 struct bpf_prog *fp;
320 u16 bpf_size, bpf_num_ops;
321 int ret;
323 bpf_num_ops = nla_get_u16(tb[TCA_BPF_OPS_LEN]);
324 if (bpf_num_ops > BPF_MAXINSNS || bpf_num_ops == 0)
325 return -EINVAL;
327 bpf_size = bpf_num_ops * sizeof(*bpf_ops);
328 if (bpf_size != nla_len(tb[TCA_BPF_OPS]))
329 return -EINVAL;
331 bpf_ops = kzalloc(bpf_size, GFP_KERNEL);
332 if (bpf_ops == NULL)
333 return -ENOMEM;
335 memcpy(bpf_ops, nla_data(tb[TCA_BPF_OPS]), bpf_size);
337 fprog_tmp.len = bpf_num_ops;
338 fprog_tmp.filter = bpf_ops;
340 ret = bpf_prog_create(&fp, &fprog_tmp);
341 if (ret < 0) {
342 kfree(bpf_ops);
343 return ret;
346 prog->bpf_ops = bpf_ops;
347 prog->bpf_num_ops = bpf_num_ops;
348 prog->bpf_name = NULL;
349 prog->filter = fp;
351 return 0;
354 static int cls_bpf_prog_from_efd(struct nlattr **tb, struct cls_bpf_prog *prog,
355 const struct tcf_proto *tp)
357 struct bpf_prog *fp;
358 char *name = NULL;
359 u32 bpf_fd;
361 bpf_fd = nla_get_u32(tb[TCA_BPF_FD]);
363 fp = bpf_prog_get_type(bpf_fd, BPF_PROG_TYPE_SCHED_CLS);
364 if (IS_ERR(fp))
365 return PTR_ERR(fp);
367 if (tb[TCA_BPF_NAME]) {
368 name = nla_memdup(tb[TCA_BPF_NAME], GFP_KERNEL);
369 if (!name) {
370 bpf_prog_put(fp);
371 return -ENOMEM;
375 prog->bpf_ops = NULL;
376 prog->bpf_name = name;
377 prog->filter = fp;
379 if (fp->dst_needed && !(tp->q->flags & TCQ_F_INGRESS))
380 netif_keep_dst(qdisc_dev(tp->q));
382 return 0;
385 static int cls_bpf_modify_existing(struct net *net, struct tcf_proto *tp,
386 struct cls_bpf_prog *prog,
387 unsigned long base, struct nlattr **tb,
388 struct nlattr *est, bool ovr)
390 bool is_bpf, is_ebpf, have_exts = false;
391 struct tcf_exts exts;
392 u32 gen_flags = 0;
393 int ret;
395 is_bpf = tb[TCA_BPF_OPS_LEN] && tb[TCA_BPF_OPS];
396 is_ebpf = tb[TCA_BPF_FD];
397 if ((!is_bpf && !is_ebpf) || (is_bpf && is_ebpf))
398 return -EINVAL;
400 ret = tcf_exts_init(&exts, TCA_BPF_ACT, TCA_BPF_POLICE);
401 if (ret < 0)
402 return ret;
403 ret = tcf_exts_validate(net, tp, tb, est, &exts, ovr);
404 if (ret < 0)
405 goto errout;
407 if (tb[TCA_BPF_FLAGS]) {
408 u32 bpf_flags = nla_get_u32(tb[TCA_BPF_FLAGS]);
410 if (bpf_flags & ~TCA_BPF_FLAG_ACT_DIRECT) {
411 ret = -EINVAL;
412 goto errout;
415 have_exts = bpf_flags & TCA_BPF_FLAG_ACT_DIRECT;
417 if (tb[TCA_BPF_FLAGS_GEN]) {
418 gen_flags = nla_get_u32(tb[TCA_BPF_FLAGS_GEN]);
419 if (gen_flags & ~CLS_BPF_SUPPORTED_GEN_FLAGS ||
420 !tc_flags_valid(gen_flags)) {
421 ret = -EINVAL;
422 goto errout;
426 prog->exts_integrated = have_exts;
427 prog->gen_flags = gen_flags;
429 ret = is_bpf ? cls_bpf_prog_from_ops(tb, prog) :
430 cls_bpf_prog_from_efd(tb, prog, tp);
431 if (ret < 0)
432 goto errout;
434 if (tb[TCA_BPF_CLASSID]) {
435 prog->res.classid = nla_get_u32(tb[TCA_BPF_CLASSID]);
436 tcf_bind_filter(tp, &prog->res, base);
439 tcf_exts_change(tp, &prog->exts, &exts);
440 return 0;
442 errout:
443 tcf_exts_destroy(&exts);
444 return ret;
447 static u32 cls_bpf_grab_new_handle(struct tcf_proto *tp,
448 struct cls_bpf_head *head)
450 unsigned int i = 0x80000000;
451 u32 handle;
453 do {
454 if (++head->hgen == 0x7FFFFFFF)
455 head->hgen = 1;
456 } while (--i > 0 && cls_bpf_get(tp, head->hgen));
458 if (unlikely(i == 0)) {
459 pr_err("Insufficient number of handles\n");
460 handle = 0;
461 } else {
462 handle = head->hgen;
465 return handle;
468 static int cls_bpf_change(struct net *net, struct sk_buff *in_skb,
469 struct tcf_proto *tp, unsigned long base,
470 u32 handle, struct nlattr **tca,
471 unsigned long *arg, bool ovr)
473 struct cls_bpf_head *head = rtnl_dereference(tp->root);
474 struct cls_bpf_prog *oldprog = (struct cls_bpf_prog *) *arg;
475 struct nlattr *tb[TCA_BPF_MAX + 1];
476 struct cls_bpf_prog *prog;
477 int ret;
479 if (tca[TCA_OPTIONS] == NULL)
480 return -EINVAL;
482 ret = nla_parse_nested(tb, TCA_BPF_MAX, tca[TCA_OPTIONS], bpf_policy,
483 NULL);
484 if (ret < 0)
485 return ret;
487 prog = kzalloc(sizeof(*prog), GFP_KERNEL);
488 if (!prog)
489 return -ENOBUFS;
491 ret = tcf_exts_init(&prog->exts, TCA_BPF_ACT, TCA_BPF_POLICE);
492 if (ret < 0)
493 goto errout;
495 if (oldprog) {
496 if (handle && oldprog->handle != handle) {
497 ret = -EINVAL;
498 goto errout;
502 if (handle == 0)
503 prog->handle = cls_bpf_grab_new_handle(tp, head);
504 else
505 prog->handle = handle;
506 if (prog->handle == 0) {
507 ret = -EINVAL;
508 goto errout;
511 ret = cls_bpf_modify_existing(net, tp, prog, base, tb, tca[TCA_RATE],
512 ovr);
513 if (ret < 0)
514 goto errout;
516 ret = cls_bpf_offload(tp, prog, oldprog);
517 if (ret) {
518 __cls_bpf_delete_prog(prog);
519 return ret;
522 if (!tc_in_hw(prog->gen_flags))
523 prog->gen_flags |= TCA_CLS_FLAGS_NOT_IN_HW;
525 if (oldprog) {
526 list_replace_rcu(&oldprog->link, &prog->link);
527 tcf_unbind_filter(tp, &oldprog->res);
528 call_rcu(&oldprog->rcu, cls_bpf_delete_prog_rcu);
529 } else {
530 list_add_rcu(&prog->link, &head->plist);
533 *arg = (unsigned long) prog;
534 return 0;
536 errout:
537 tcf_exts_destroy(&prog->exts);
538 kfree(prog);
539 return ret;
542 static int cls_bpf_dump_bpf_info(const struct cls_bpf_prog *prog,
543 struct sk_buff *skb)
545 struct nlattr *nla;
547 if (nla_put_u16(skb, TCA_BPF_OPS_LEN, prog->bpf_num_ops))
548 return -EMSGSIZE;
550 nla = nla_reserve(skb, TCA_BPF_OPS, prog->bpf_num_ops *
551 sizeof(struct sock_filter));
552 if (nla == NULL)
553 return -EMSGSIZE;
555 memcpy(nla_data(nla), prog->bpf_ops, nla_len(nla));
557 return 0;
560 static int cls_bpf_dump_ebpf_info(const struct cls_bpf_prog *prog,
561 struct sk_buff *skb)
563 struct nlattr *nla;
565 if (prog->bpf_name &&
566 nla_put_string(skb, TCA_BPF_NAME, prog->bpf_name))
567 return -EMSGSIZE;
569 if (nla_put_u32(skb, TCA_BPF_ID, prog->filter->aux->id))
570 return -EMSGSIZE;
572 nla = nla_reserve(skb, TCA_BPF_TAG, sizeof(prog->filter->tag));
573 if (nla == NULL)
574 return -EMSGSIZE;
576 memcpy(nla_data(nla), prog->filter->tag, nla_len(nla));
578 return 0;
581 static int cls_bpf_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
582 struct sk_buff *skb, struct tcmsg *tm)
584 struct cls_bpf_prog *prog = (struct cls_bpf_prog *) fh;
585 struct nlattr *nest;
586 u32 bpf_flags = 0;
587 int ret;
589 if (prog == NULL)
590 return skb->len;
592 tm->tcm_handle = prog->handle;
594 cls_bpf_offload_update_stats(tp, prog);
596 nest = nla_nest_start(skb, TCA_OPTIONS);
597 if (nest == NULL)
598 goto nla_put_failure;
600 if (prog->res.classid &&
601 nla_put_u32(skb, TCA_BPF_CLASSID, prog->res.classid))
602 goto nla_put_failure;
604 if (cls_bpf_is_ebpf(prog))
605 ret = cls_bpf_dump_ebpf_info(prog, skb);
606 else
607 ret = cls_bpf_dump_bpf_info(prog, skb);
608 if (ret)
609 goto nla_put_failure;
611 if (tcf_exts_dump(skb, &prog->exts) < 0)
612 goto nla_put_failure;
614 if (prog->exts_integrated)
615 bpf_flags |= TCA_BPF_FLAG_ACT_DIRECT;
616 if (bpf_flags && nla_put_u32(skb, TCA_BPF_FLAGS, bpf_flags))
617 goto nla_put_failure;
618 if (prog->gen_flags &&
619 nla_put_u32(skb, TCA_BPF_FLAGS_GEN, prog->gen_flags))
620 goto nla_put_failure;
622 nla_nest_end(skb, nest);
624 if (tcf_exts_dump_stats(skb, &prog->exts) < 0)
625 goto nla_put_failure;
627 return skb->len;
629 nla_put_failure:
630 nla_nest_cancel(skb, nest);
631 return -1;
634 static void cls_bpf_walk(struct tcf_proto *tp, struct tcf_walker *arg)
636 struct cls_bpf_head *head = rtnl_dereference(tp->root);
637 struct cls_bpf_prog *prog;
639 list_for_each_entry(prog, &head->plist, link) {
640 if (arg->count < arg->skip)
641 goto skip;
642 if (arg->fn(tp, (unsigned long) prog, arg) < 0) {
643 arg->stop = 1;
644 break;
646 skip:
647 arg->count++;
651 static struct tcf_proto_ops cls_bpf_ops __read_mostly = {
652 .kind = "bpf",
653 .owner = THIS_MODULE,
654 .classify = cls_bpf_classify,
655 .init = cls_bpf_init,
656 .destroy = cls_bpf_destroy,
657 .get = cls_bpf_get,
658 .change = cls_bpf_change,
659 .delete = cls_bpf_delete,
660 .walk = cls_bpf_walk,
661 .dump = cls_bpf_dump,
664 static int __init cls_bpf_init_mod(void)
666 return register_tcf_proto_ops(&cls_bpf_ops);
669 static void __exit cls_bpf_exit_mod(void)
671 unregister_tcf_proto_ops(&cls_bpf_ops);
674 module_init(cls_bpf_init_mod);
675 module_exit(cls_bpf_exit_mod);