net: add rbnode to struct sk_buff
[linux-2.6/btrfs-unstable.git] / net / sched / cls_cgroup.c
blobd61a801222c10205e339a5d9d622b4ecce804ed4
1 /*
2 * net/sched/cls_cgroup.c Control Group Classifier
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Thomas Graf <tgraf@suug.ch>
12 #include <linux/module.h>
13 #include <linux/slab.h>
14 #include <linux/skbuff.h>
15 #include <linux/rcupdate.h>
16 #include <net/rtnetlink.h>
17 #include <net/pkt_cls.h>
18 #include <net/sock.h>
19 #include <net/cls_cgroup.h>
21 struct cls_cgroup_head {
22 u32 handle;
23 struct tcf_exts exts;
24 struct tcf_ematch_tree ematches;
25 struct tcf_proto *tp;
26 struct rcu_head rcu;
29 static int cls_cgroup_classify(struct sk_buff *skb, const struct tcf_proto *tp,
30 struct tcf_result *res)
32 struct cls_cgroup_head *head = rcu_dereference_bh(tp->root);
33 u32 classid;
35 classid = task_cls_state(current)->classid;
38 * Due to the nature of the classifier it is required to ignore all
39 * packets originating from softirq context as accessing `current'
40 * would lead to false results.
42 * This test assumes that all callers of dev_queue_xmit() explicitely
43 * disable bh. Knowing this, it is possible to detect softirq based
44 * calls by looking at the number of nested bh disable calls because
45 * softirqs always disables bh.
47 if (in_serving_softirq()) {
48 /* If there is an sk_classid we'll use that. */
49 if (!skb->sk)
50 return -1;
51 classid = skb->sk->sk_classid;
54 if (!classid)
55 return -1;
57 if (!tcf_em_tree_match(skb, &head->ematches, NULL))
58 return -1;
60 res->classid = classid;
61 res->class = 0;
62 return tcf_exts_exec(skb, &head->exts, res);
65 static unsigned long cls_cgroup_get(struct tcf_proto *tp, u32 handle)
67 return 0UL;
70 static void cls_cgroup_put(struct tcf_proto *tp, unsigned long f)
74 static int cls_cgroup_init(struct tcf_proto *tp)
76 return 0;
79 static const struct nla_policy cgroup_policy[TCA_CGROUP_MAX + 1] = {
80 [TCA_CGROUP_EMATCHES] = { .type = NLA_NESTED },
83 static void cls_cgroup_destroy_rcu(struct rcu_head *root)
85 struct cls_cgroup_head *head = container_of(root,
86 struct cls_cgroup_head,
87 rcu);
89 tcf_exts_destroy(&head->exts);
90 tcf_em_tree_destroy(&head->ematches);
91 kfree(head);
94 static int cls_cgroup_change(struct net *net, struct sk_buff *in_skb,
95 struct tcf_proto *tp, unsigned long base,
96 u32 handle, struct nlattr **tca,
97 unsigned long *arg, bool ovr)
99 struct nlattr *tb[TCA_CGROUP_MAX + 1];
100 struct cls_cgroup_head *head = rtnl_dereference(tp->root);
101 struct cls_cgroup_head *new;
102 struct tcf_ematch_tree t;
103 struct tcf_exts e;
104 int err;
106 if (!tca[TCA_OPTIONS])
107 return -EINVAL;
109 if (!head && !handle)
110 return -EINVAL;
112 if (head && handle != head->handle)
113 return -ENOENT;
115 new = kzalloc(sizeof(*head), GFP_KERNEL);
116 if (!new)
117 return -ENOBUFS;
119 tcf_exts_init(&new->exts, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
120 if (head)
121 new->handle = head->handle;
122 else
123 new->handle = handle;
125 new->tp = tp;
126 err = nla_parse_nested(tb, TCA_CGROUP_MAX, tca[TCA_OPTIONS],
127 cgroup_policy);
128 if (err < 0)
129 goto errout;
131 tcf_exts_init(&e, TCA_CGROUP_ACT, TCA_CGROUP_POLICE);
132 err = tcf_exts_validate(net, tp, tb, tca[TCA_RATE], &e, ovr);
133 if (err < 0)
134 goto errout;
136 err = tcf_em_tree_validate(tp, tb[TCA_CGROUP_EMATCHES], &t);
137 if (err < 0) {
138 tcf_exts_destroy(&e);
139 goto errout;
142 tcf_exts_change(tp, &new->exts, &e);
143 tcf_em_tree_change(tp, &new->ematches, &t);
145 rcu_assign_pointer(tp->root, new);
146 if (head)
147 call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
148 return 0;
149 errout:
150 kfree(new);
151 return err;
154 static void cls_cgroup_destroy(struct tcf_proto *tp)
156 struct cls_cgroup_head *head = rtnl_dereference(tp->root);
158 if (head) {
159 RCU_INIT_POINTER(tp->root, NULL);
160 call_rcu(&head->rcu, cls_cgroup_destroy_rcu);
164 static int cls_cgroup_delete(struct tcf_proto *tp, unsigned long arg)
166 return -EOPNOTSUPP;
169 static void cls_cgroup_walk(struct tcf_proto *tp, struct tcf_walker *arg)
171 struct cls_cgroup_head *head = rtnl_dereference(tp->root);
173 if (arg->count < arg->skip)
174 goto skip;
176 if (arg->fn(tp, (unsigned long) head, arg) < 0) {
177 arg->stop = 1;
178 return;
180 skip:
181 arg->count++;
184 static int cls_cgroup_dump(struct net *net, struct tcf_proto *tp, unsigned long fh,
185 struct sk_buff *skb, struct tcmsg *t)
187 struct cls_cgroup_head *head = rtnl_dereference(tp->root);
188 unsigned char *b = skb_tail_pointer(skb);
189 struct nlattr *nest;
191 t->tcm_handle = head->handle;
193 nest = nla_nest_start(skb, TCA_OPTIONS);
194 if (nest == NULL)
195 goto nla_put_failure;
197 if (tcf_exts_dump(skb, &head->exts) < 0 ||
198 tcf_em_tree_dump(skb, &head->ematches, TCA_CGROUP_EMATCHES) < 0)
199 goto nla_put_failure;
201 nla_nest_end(skb, nest);
203 if (tcf_exts_dump_stats(skb, &head->exts) < 0)
204 goto nla_put_failure;
206 return skb->len;
208 nla_put_failure:
209 nlmsg_trim(skb, b);
210 return -1;
213 static struct tcf_proto_ops cls_cgroup_ops __read_mostly = {
214 .kind = "cgroup",
215 .init = cls_cgroup_init,
216 .change = cls_cgroup_change,
217 .classify = cls_cgroup_classify,
218 .destroy = cls_cgroup_destroy,
219 .get = cls_cgroup_get,
220 .put = cls_cgroup_put,
221 .delete = cls_cgroup_delete,
222 .walk = cls_cgroup_walk,
223 .dump = cls_cgroup_dump,
224 .owner = THIS_MODULE,
227 static int __init init_cgroup_cls(void)
229 return register_tcf_proto_ops(&cls_cgroup_ops);
232 static void __exit exit_cgroup_cls(void)
234 unregister_tcf_proto_ops(&cls_cgroup_ops);
237 module_init(init_cgroup_cls);
238 module_exit(exit_cgroup_cls);
239 MODULE_LICENSE("GPL");