Import 2.3.10pre3
[davej-history.git] / net / sched / cls_api.c
blob9d2a95ea6316a20f6ec2d206fb073ee1d7ed9d48
1 /*
2 * net/sched/cls_api.c Packet classifier API.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
11 * Changes:
13 * Eduardo J. Blanco <ejbs@netlabs.com.uy> :990222: kmod support
16 #include <asm/uaccess.h>
17 #include <asm/system.h>
18 #include <asm/bitops.h>
19 #include <linux/config.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/sched.h>
23 #include <linux/string.h>
24 #include <linux/mm.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
27 #include <linux/in.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/netdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/init.h>
34 #include <linux/kmod.h>
35 #include <net/sock.h>
36 #include <net/pkt_sched.h>
38 /* The list of all installed classifier types */
40 static struct tcf_proto_ops *tcf_proto_base;
42 /* Protects list of registered TC modules. It is pure SMP lock. */
43 static rwlock_t cls_mod_lock = RW_LOCK_UNLOCKED;
45 /* Find classifier type by string name */
47 struct tcf_proto_ops * tcf_proto_lookup_ops(struct rtattr *kind)
49 struct tcf_proto_ops *t = NULL;
51 if (kind) {
52 read_lock(&cls_mod_lock);
53 for (t = tcf_proto_base; t; t = t->next) {
54 if (rtattr_strcmp(kind, t->kind) == 0)
55 break;
57 read_unlock(&cls_mod_lock);
59 return t;
62 /* Register(unregister) new classifier type */
64 int register_tcf_proto_ops(struct tcf_proto_ops *ops)
66 struct tcf_proto_ops *t, **tp;
68 write_lock(&cls_mod_lock);
69 for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next) {
70 if (strcmp(ops->kind, t->kind) == 0) {
71 write_unlock(&cls_mod_lock);
72 return -EEXIST;
76 ops->next = NULL;
77 *tp = ops;
78 write_unlock(&cls_mod_lock);
79 return 0;
82 int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
84 struct tcf_proto_ops *t, **tp;
86 write_lock(&cls_mod_lock);
87 for (tp = &tcf_proto_base; (t=*tp) != NULL; tp = &t->next)
88 if (t == ops)
89 break;
91 if (!t) {
92 write_unlock(&cls_mod_lock);
93 return -ENOENT;
95 *tp = t->next;
96 write_unlock(&cls_mod_lock);
97 return 0;
100 #ifdef CONFIG_RTNETLINK
102 static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
103 struct tcf_proto *tp, unsigned long fh, int event);
106 /* Select new prio value from the range, managed by kernel. */
108 static __inline__ u32 tcf_auto_prio(struct tcf_proto *tp)
110 u32 first = TC_H_MAKE(0xC0000000U,0U);
112 if (tp)
113 first = tp->prio-1;
115 return first;
118 /* Add/change/delete/get a filter node */
120 static int tc_ctl_tfilter(struct sk_buff *skb, struct nlmsghdr *n, void *arg)
122 struct rtattr **tca = arg;
123 struct tcmsg *t = NLMSG_DATA(n);
124 u32 protocol = TC_H_MIN(t->tcm_info);
125 u32 prio = TC_H_MAJ(t->tcm_info);
126 u32 nprio = prio;
127 u32 parent = t->tcm_parent;
128 struct device *dev;
129 struct Qdisc *q;
130 struct tcf_proto **back, **chain;
131 struct tcf_proto *tp = NULL;
132 struct tcf_proto_ops *tp_ops;
133 struct Qdisc_class_ops *cops;
134 unsigned long cl = 0;
135 unsigned long fh;
136 int err;
138 if (prio == 0) {
139 /* If no priority is given, user wants we allocated it. */
140 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
141 return -ENOENT;
142 prio = TC_H_MAKE(0x80000000U,0U);
145 /* Find head of filter chain. */
147 /* Find link */
148 if ((dev = dev_get_by_index(t->tcm_ifindex)) == NULL)
149 return -ENODEV;
151 /* Find qdisc */
152 if (!parent) {
153 q = dev->qdisc_sleeping;
154 parent = q->handle;
155 } else if ((q = qdisc_lookup(dev, TC_H_MAJ(t->tcm_parent))) == NULL)
156 return -EINVAL;
158 /* Is it classful? */
159 if ((cops = q->ops->cl_ops) == NULL)
160 return -EINVAL;
162 /* Do we search for filter, attached to class? */
163 if (TC_H_MIN(parent)) {
164 cl = cops->get(q, parent);
165 if (cl == 0)
166 return -ENOENT;
169 /* And the last stroke */
170 chain = cops->tcf_chain(q, cl);
171 err = -EINVAL;
172 if (chain == NULL)
173 goto errout;
175 /* Check the chain for existence of proto-tcf with this priority */
176 for (back = chain; (tp=*back) != NULL; back = &tp->next) {
177 if (tp->prio >= prio) {
178 if (tp->prio == prio) {
179 if (!nprio || (tp->protocol != protocol && protocol))
180 goto errout;
181 } else
182 tp = NULL;
183 break;
187 if (tp == NULL) {
188 /* Proto-tcf does not exist, create new one */
190 if (tca[TCA_KIND-1] == NULL || !protocol)
191 goto errout;
193 err = -ENOENT;
194 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
195 goto errout;
198 /* Create new proto tcf */
200 err = -ENOBUFS;
201 if ((tp = kmalloc(sizeof(*tp), GFP_KERNEL)) == NULL)
202 goto errout;
203 tp_ops = tcf_proto_lookup_ops(tca[TCA_KIND-1]);
204 #ifdef CONFIG_KMOD
205 if (tp_ops==NULL && tca[TCA_KIND-1] != NULL) {
206 struct rtattr *kind = tca[TCA_KIND-1];
207 char module_name[4 + IFNAMSIZ + 1];
209 if (RTA_PAYLOAD(kind) <= IFNAMSIZ) {
210 sprintf(module_name, "cls_%s", (char*)RTA_DATA(kind));
211 request_module (module_name);
212 tp_ops = tcf_proto_lookup_ops(kind);
215 #endif
216 if (tp_ops == NULL) {
217 err = -EINVAL;
218 kfree(tp);
219 goto errout;
221 memset(tp, 0, sizeof(*tp));
222 tp->ops = tp_ops;
223 tp->protocol = protocol;
224 tp->prio = nprio ? : tcf_auto_prio(*back);
225 tp->q = q;
226 tp->classify = tp_ops->classify;
227 tp->classid = parent;
228 err = tp_ops->init(tp);
229 if (err) {
230 kfree(tp);
231 goto errout;
233 write_lock(&qdisc_tree_lock);
234 spin_lock_bh(&dev->queue_lock);
235 tp->next = *back;
236 *back = tp;
237 spin_unlock_bh(&dev->queue_lock);
238 write_unlock(&qdisc_tree_lock);
239 } else if (tca[TCA_KIND-1] && rtattr_strcmp(tca[TCA_KIND-1], tp->ops->kind))
240 goto errout;
242 fh = tp->ops->get(tp, t->tcm_handle);
244 if (fh == 0) {
245 if (n->nlmsg_type == RTM_DELTFILTER && t->tcm_handle == 0) {
246 write_lock(&qdisc_tree_lock);
247 spin_lock_bh(&dev->queue_lock);
248 *back = tp->next;
249 spin_unlock_bh(&dev->queue_lock);
250 write_unlock(&qdisc_tree_lock);
252 tp->ops->destroy(tp);
253 kfree(tp);
254 err = 0;
255 goto errout;
258 err = -ENOENT;
259 if (n->nlmsg_type != RTM_NEWTFILTER || !(n->nlmsg_flags&NLM_F_CREATE))
260 goto errout;
261 } else {
262 switch (n->nlmsg_type) {
263 case RTM_NEWTFILTER:
264 err = -EEXIST;
265 if (n->nlmsg_flags&NLM_F_EXCL)
266 goto errout;
267 break;
268 case RTM_DELTFILTER:
269 err = tp->ops->delete(tp, fh);
270 goto errout;
271 case RTM_GETTFILTER:
272 err = tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
273 goto errout;
274 default:
275 err = -EINVAL;
276 goto errout;
280 err = tp->ops->change(tp, cl, t->tcm_handle, tca, &fh);
281 if (err == 0)
282 tfilter_notify(skb, n, tp, fh, RTM_NEWTFILTER);
284 errout:
285 if (cl)
286 cops->put(q, cl);
287 return err;
290 static int
291 tcf_fill_node(struct sk_buff *skb, struct tcf_proto *tp, unsigned long fh,
292 u32 pid, u32 seq, unsigned flags, int event)
294 struct tcmsg *tcm;
295 struct nlmsghdr *nlh;
296 unsigned char *b = skb->tail;
298 nlh = NLMSG_PUT(skb, pid, seq, event, sizeof(*tcm));
299 nlh->nlmsg_flags = flags;
300 tcm = NLMSG_DATA(nlh);
301 tcm->tcm_family = AF_UNSPEC;
302 tcm->tcm_ifindex = tp->q->dev->ifindex;
303 tcm->tcm_parent = tp->classid;
304 tcm->tcm_handle = 0;
305 tcm->tcm_info = TC_H_MAKE(tp->prio, tp->protocol);
306 RTA_PUT(skb, TCA_KIND, IFNAMSIZ, tp->ops->kind);
307 if (tp->ops->dump && tp->ops->dump(tp, fh, skb, tcm) < 0)
308 goto rtattr_failure;
309 nlh->nlmsg_len = skb->tail - b;
310 return skb->len;
312 nlmsg_failure:
313 rtattr_failure:
314 skb_trim(skb, b - skb->data);
315 return -1;
318 static int tfilter_notify(struct sk_buff *oskb, struct nlmsghdr *n,
319 struct tcf_proto *tp, unsigned long fh, int event)
321 struct sk_buff *skb;
322 u32 pid = oskb ? NETLINK_CB(oskb).pid : 0;
324 skb = alloc_skb(NLMSG_GOODSIZE, GFP_KERNEL);
325 if (!skb)
326 return -ENOBUFS;
328 if (tcf_fill_node(skb, tp, fh, pid, n->nlmsg_seq, 0, event) <= 0) {
329 kfree_skb(skb);
330 return -EINVAL;
333 return rtnetlink_send(skb, pid, RTMGRP_TC, n->nlmsg_flags&NLM_F_ECHO);
336 struct tcf_dump_args
338 struct tcf_walker w;
339 struct sk_buff *skb;
340 struct netlink_callback *cb;
343 static int tcf_node_dump(struct tcf_proto *tp, unsigned long n, struct tcf_walker *arg)
345 struct tcf_dump_args *a = (void*)arg;
347 return tcf_fill_node(a->skb, tp, n, NETLINK_CB(a->cb->skb).pid,
348 a->cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER);
351 static int tc_dump_tfilter(struct sk_buff *skb, struct netlink_callback *cb)
353 int t;
354 int s_t;
355 struct device *dev;
356 struct Qdisc *q;
357 struct tcf_proto *tp, **chain;
358 struct tcmsg *tcm = (struct tcmsg*)NLMSG_DATA(cb->nlh);
359 unsigned long cl = 0;
360 struct Qdisc_class_ops *cops;
361 struct tcf_dump_args arg;
363 if (cb->nlh->nlmsg_len < NLMSG_LENGTH(sizeof(*tcm)))
364 return skb->len;
365 if ((dev = dev_get_by_index(tcm->tcm_ifindex)) == NULL)
366 return skb->len;
368 read_lock(&qdisc_tree_lock);
369 if (!tcm->tcm_parent)
370 q = dev->qdisc_sleeping;
371 else
372 q = qdisc_lookup(dev, TC_H_MAJ(tcm->tcm_parent));
373 if (q == NULL) {
374 read_unlock(&qdisc_tree_lock);
375 return skb->len;
377 if ((cops = q->ops->cl_ops) == NULL)
378 goto errout;
379 if (TC_H_MIN(tcm->tcm_parent)) {
380 cl = cops->get(q, tcm->tcm_parent);
381 if (cl == 0)
382 goto errout;
384 chain = cops->tcf_chain(q, cl);
385 if (chain == NULL)
386 goto errout;
388 s_t = cb->args[0];
390 for (tp=*chain, t=0; tp; tp = tp->next, t++) {
391 if (t < s_t) continue;
392 if (TC_H_MAJ(tcm->tcm_info) &&
393 TC_H_MAJ(tcm->tcm_info) != tp->prio)
394 continue;
395 if (TC_H_MIN(tcm->tcm_info) &&
396 TC_H_MIN(tcm->tcm_info) != tp->protocol)
397 continue;
398 if (t > s_t)
399 memset(&cb->args[1], 0, sizeof(cb->args)-sizeof(cb->args[0]));
400 if (cb->args[1] == 0) {
401 if (tcf_fill_node(skb, tp, 0, NETLINK_CB(cb->skb).pid,
402 cb->nlh->nlmsg_seq, NLM_F_MULTI, RTM_NEWTFILTER) <= 0) {
403 break;
405 cb->args[1] = 1;
407 if (tp->ops->walk == NULL)
408 continue;
409 arg.w.fn = tcf_node_dump;
410 arg.skb = skb;
411 arg.cb = cb;
412 arg.w.stop = 0;
413 arg.w.skip = cb->args[1]-1;
414 arg.w.count = 0;
415 tp->ops->walk(tp, &arg.w);
416 cb->args[1] = arg.w.count+1;
417 if (arg.w.stop)
418 break;
421 cb->args[0] = t;
423 errout:
424 if (cl)
425 cops->put(q, cl);
427 read_unlock(&qdisc_tree_lock);
428 return skb->len;
431 #endif
434 __initfunc(int tc_filter_init(void))
436 #ifdef CONFIG_RTNETLINK
437 struct rtnetlink_link *link_p = rtnetlink_links[PF_UNSPEC];
439 /* Setup rtnetlink links. It is made here to avoid
440 exporting large number of public symbols.
443 if (link_p) {
444 link_p[RTM_NEWTFILTER-RTM_BASE].doit = tc_ctl_tfilter;
445 link_p[RTM_DELTFILTER-RTM_BASE].doit = tc_ctl_tfilter;
446 link_p[RTM_GETTFILTER-RTM_BASE].doit = tc_ctl_tfilter;
447 link_p[RTM_GETTFILTER-RTM_BASE].dumpit = tc_dump_tfilter;
449 #endif
450 #define INIT_TC_FILTER(name) { \
451 extern struct tcf_proto_ops cls_##name##_ops; \
452 register_tcf_proto_ops(&cls_##name##_ops); \
455 #ifdef CONFIG_NET_CLS_U32
456 INIT_TC_FILTER(u32);
457 #endif
458 #ifdef CONFIG_NET_CLS_ROUTE4
459 INIT_TC_FILTER(route4);
460 #endif
461 #ifdef CONFIG_NET_CLS_FW
462 INIT_TC_FILTER(fw);
463 #endif
464 #ifdef CONFIG_NET_CLS_RSVP
465 INIT_TC_FILTER(rsvp);
466 #endif
467 #ifdef CONFIG_NET_CLS_RSVP6
468 INIT_TC_FILTER(rsvp6);
469 #endif
470 return 0;