[PKT_SCHED]: Disable dsmark debugging messages by default
[linux-2.6/mini2440.git] / net / sched / sch_dsmark.c
blobd8bd2a569c7ca6310f42af3c92510e75eca387cd
1 /* net/sched/sch_dsmark.c - Differentiated Services field marker */
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
6 #include <linux/config.h>
7 #include <linux/module.h>
8 #include <linux/init.h>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/skbuff.h>
13 #include <linux/netdevice.h> /* for pkt_sched */
14 #include <linux/rtnetlink.h>
15 #include <net/pkt_sched.h>
16 #include <net/dsfield.h>
17 #include <net/inet_ecn.h>
18 #include <asm/byteorder.h>
21 #if 0 /* control */
22 #define DPRINTK(format,args...) printk(KERN_DEBUG format,##args)
23 #else
24 #define DPRINTK(format,args...)
25 #endif
27 #if 0 /* data */
28 #define D2PRINTK(format,args...) printk(KERN_DEBUG format,##args)
29 #else
30 #define D2PRINTK(format,args...)
31 #endif
34 #define PRIV(sch) qdisc_priv(sch)
38 * classid class marking
39 * ------- ----- -------
40 * n/a 0 n/a
41 * x:0 1 use entry [0]
42 * ... ... ...
43 * x:y y>0 y+1 use entry [y]
44 * ... ... ...
45 * x:indices-1 indices use entry [indices-1]
46 * ... ... ...
47 * x:y y+1 use entry [y & (indices-1)]
48 * ... ... ...
49 * 0xffff 0x10000 use entry [indices-1]
53 #define NO_DEFAULT_INDEX (1 << 16)
55 struct dsmark_qdisc_data {
56 struct Qdisc *q;
57 struct tcf_proto *filter_list;
58 __u8 *mask; /* "owns" the array */
59 __u8 *value;
60 __u16 indices;
61 __u32 default_index; /* index range is 0...0xffff */
62 int set_tc_index;
66 /* ------------------------- Class/flow operations ------------------------- */
69 static int dsmark_graft(struct Qdisc *sch,unsigned long arg,
70 struct Qdisc *new,struct Qdisc **old)
72 struct dsmark_qdisc_data *p = PRIV(sch);
74 DPRINTK("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",sch,p,new,
75 old);
77 if (new == NULL) {
78 new = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops);
79 if (new == NULL)
80 new = &noop_qdisc;
83 sch_tree_lock(sch);
84 *old = xchg(&p->q,new);
85 if (*old)
86 qdisc_reset(*old);
87 sch->q.qlen = 0;
88 sch_tree_unlock(sch); /* @@@ move up ? */
89 return 0;
93 static struct Qdisc *dsmark_leaf(struct Qdisc *sch, unsigned long arg)
95 struct dsmark_qdisc_data *p = PRIV(sch);
97 return p->q;
101 static unsigned long dsmark_get(struct Qdisc *sch,u32 classid)
103 struct dsmark_qdisc_data *p __attribute__((unused)) = PRIV(sch);
105 DPRINTK("dsmark_get(sch %p,[qdisc %p],classid %x)\n",sch,p,classid);
106 return TC_H_MIN(classid)+1;
110 static unsigned long dsmark_bind_filter(struct Qdisc *sch,
111 unsigned long parent, u32 classid)
113 return dsmark_get(sch,classid);
117 static void dsmark_put(struct Qdisc *sch, unsigned long cl)
122 static int dsmark_change(struct Qdisc *sch, u32 classid, u32 parent,
123 struct rtattr **tca, unsigned long *arg)
125 struct dsmark_qdisc_data *p = PRIV(sch);
126 struct rtattr *opt = tca[TCA_OPTIONS-1];
127 struct rtattr *tb[TCA_DSMARK_MAX];
129 DPRINTK("dsmark_change(sch %p,[qdisc %p],classid %x,parent %x),"
130 "arg 0x%lx\n",sch,p,classid,parent,*arg);
131 if (*arg > p->indices)
132 return -ENOENT;
133 if (!opt || rtattr_parse_nested(tb, TCA_DSMARK_MAX, opt))
134 return -EINVAL;
135 if (tb[TCA_DSMARK_MASK-1]) {
136 if (!RTA_PAYLOAD(tb[TCA_DSMARK_MASK-1]))
137 return -EINVAL;
138 p->mask[*arg-1] = *(__u8 *) RTA_DATA(tb[TCA_DSMARK_MASK-1]);
140 if (tb[TCA_DSMARK_VALUE-1]) {
141 if (!RTA_PAYLOAD(tb[TCA_DSMARK_VALUE-1]))
142 return -EINVAL;
143 p->value[*arg-1] = *(__u8 *) RTA_DATA(tb[TCA_DSMARK_VALUE-1]);
145 return 0;
149 static int dsmark_delete(struct Qdisc *sch,unsigned long arg)
151 struct dsmark_qdisc_data *p = PRIV(sch);
153 if (!arg || arg > p->indices)
154 return -EINVAL;
155 p->mask[arg-1] = 0xff;
156 p->value[arg-1] = 0;
157 return 0;
161 static void dsmark_walk(struct Qdisc *sch,struct qdisc_walker *walker)
163 struct dsmark_qdisc_data *p = PRIV(sch);
164 int i;
166 DPRINTK("dsmark_walk(sch %p,[qdisc %p],walker %p)\n",sch,p,walker);
167 if (walker->stop)
168 return;
169 for (i = 0; i < p->indices; i++) {
170 if (p->mask[i] == 0xff && !p->value[i])
171 goto ignore;
172 if (walker->count >= walker->skip) {
173 if (walker->fn(sch, i+1, walker) < 0) {
174 walker->stop = 1;
175 break;
178 ignore:
179 walker->count++;
184 static struct tcf_proto **dsmark_find_tcf(struct Qdisc *sch,unsigned long cl)
186 struct dsmark_qdisc_data *p = PRIV(sch);
188 return &p->filter_list;
192 /* --------------------------- Qdisc operations ---------------------------- */
195 static int dsmark_enqueue(struct sk_buff *skb,struct Qdisc *sch)
197 struct dsmark_qdisc_data *p = PRIV(sch);
198 struct tcf_result res;
199 int result;
200 int ret = NET_XMIT_POLICED;
202 D2PRINTK("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
203 if (p->set_tc_index) {
204 /* FIXME: Safe with non-linear skbs? --RR */
205 switch (skb->protocol) {
206 case __constant_htons(ETH_P_IP):
207 skb->tc_index = ipv4_get_dsfield(skb->nh.iph)
208 & ~INET_ECN_MASK;
209 break;
210 case __constant_htons(ETH_P_IPV6):
211 skb->tc_index = ipv6_get_dsfield(skb->nh.ipv6h)
212 & ~INET_ECN_MASK;
213 break;
214 default:
215 skb->tc_index = 0;
216 break;
219 result = TC_POLICE_OK; /* be nice to gcc */
220 if (TC_H_MAJ(skb->priority) == sch->handle) {
221 skb->tc_index = TC_H_MIN(skb->priority);
222 } else {
223 result = tc_classify(skb,p->filter_list,&res);
224 D2PRINTK("result %d class 0x%04x\n",result,res.classid);
225 switch (result) {
226 #ifdef CONFIG_NET_CLS_POLICE
227 case TC_POLICE_SHOT:
228 kfree_skb(skb);
229 break;
230 #if 0
231 case TC_POLICE_RECLASSIFY:
232 /* FIXME: what to do here ??? */
233 #endif
234 #endif
235 case TC_POLICE_OK:
236 skb->tc_index = TC_H_MIN(res.classid);
237 break;
238 case TC_POLICE_UNSPEC:
239 /* fall through */
240 default:
241 if (p->default_index != NO_DEFAULT_INDEX)
242 skb->tc_index = p->default_index;
243 break;
246 if (
247 #ifdef CONFIG_NET_CLS_POLICE
248 result == TC_POLICE_SHOT ||
249 #endif
251 ((ret = p->q->enqueue(skb,p->q)) != 0)) {
252 sch->qstats.drops++;
253 return ret;
255 sch->bstats.bytes += skb->len;
256 sch->bstats.packets++;
257 sch->q.qlen++;
258 return ret;
262 static struct sk_buff *dsmark_dequeue(struct Qdisc *sch)
264 struct dsmark_qdisc_data *p = PRIV(sch);
265 struct sk_buff *skb;
266 int index;
268 D2PRINTK("dsmark_dequeue(sch %p,[qdisc %p])\n",sch,p);
269 skb = p->q->ops->dequeue(p->q);
270 if (!skb)
271 return NULL;
272 sch->q.qlen--;
273 index = skb->tc_index & (p->indices-1);
274 D2PRINTK("index %d->%d\n",skb->tc_index,index);
275 switch (skb->protocol) {
276 case __constant_htons(ETH_P_IP):
277 ipv4_change_dsfield(skb->nh.iph,
278 p->mask[index],p->value[index]);
279 break;
280 case __constant_htons(ETH_P_IPV6):
281 ipv6_change_dsfield(skb->nh.ipv6h,
282 p->mask[index],p->value[index]);
283 break;
284 default:
286 * Only complain if a change was actually attempted.
287 * This way, we can send non-IP traffic through dsmark
288 * and don't need yet another qdisc as a bypass.
290 if (p->mask[index] != 0xff || p->value[index])
291 printk(KERN_WARNING "dsmark_dequeue: "
292 "unsupported protocol %d\n",
293 htons(skb->protocol));
294 break;
296 return skb;
300 static int dsmark_requeue(struct sk_buff *skb,struct Qdisc *sch)
302 int ret;
303 struct dsmark_qdisc_data *p = PRIV(sch);
305 D2PRINTK("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n",skb,sch,p);
306 if ((ret = p->q->ops->requeue(skb, p->q)) == 0) {
307 sch->q.qlen++;
308 sch->qstats.requeues++;
309 return 0;
311 sch->qstats.drops++;
312 return ret;
316 static unsigned int dsmark_drop(struct Qdisc *sch)
318 struct dsmark_qdisc_data *p = PRIV(sch);
319 unsigned int len;
321 DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n",sch,p);
322 if (!p->q->ops->drop)
323 return 0;
324 if (!(len = p->q->ops->drop(p->q)))
325 return 0;
326 sch->q.qlen--;
327 return len;
331 static int dsmark_init(struct Qdisc *sch,struct rtattr *opt)
333 struct dsmark_qdisc_data *p = PRIV(sch);
334 struct rtattr *tb[TCA_DSMARK_MAX];
335 __u16 tmp;
337 DPRINTK("dsmark_init(sch %p,[qdisc %p],opt %p)\n",sch,p,opt);
338 if (!opt ||
339 rtattr_parse(tb,TCA_DSMARK_MAX,RTA_DATA(opt),RTA_PAYLOAD(opt)) < 0 ||
340 !tb[TCA_DSMARK_INDICES-1] ||
341 RTA_PAYLOAD(tb[TCA_DSMARK_INDICES-1]) < sizeof(__u16))
342 return -EINVAL;
343 p->indices = *(__u16 *) RTA_DATA(tb[TCA_DSMARK_INDICES-1]);
344 if (!p->indices)
345 return -EINVAL;
346 for (tmp = p->indices; tmp != 1; tmp >>= 1) {
347 if (tmp & 1)
348 return -EINVAL;
350 p->default_index = NO_DEFAULT_INDEX;
351 if (tb[TCA_DSMARK_DEFAULT_INDEX-1]) {
352 if (RTA_PAYLOAD(tb[TCA_DSMARK_DEFAULT_INDEX-1]) < sizeof(__u16))
353 return -EINVAL;
354 p->default_index =
355 *(__u16 *) RTA_DATA(tb[TCA_DSMARK_DEFAULT_INDEX-1]);
357 p->set_tc_index = !!tb[TCA_DSMARK_SET_TC_INDEX-1];
358 p->mask = kmalloc(p->indices*2,GFP_KERNEL);
359 if (!p->mask)
360 return -ENOMEM;
361 p->value = p->mask+p->indices;
362 memset(p->mask,0xff,p->indices);
363 memset(p->value,0,p->indices);
364 if (!(p->q = qdisc_create_dflt(sch->dev, &pfifo_qdisc_ops)))
365 p->q = &noop_qdisc;
366 DPRINTK("dsmark_init: qdisc %p\n",&p->q);
367 return 0;
371 static void dsmark_reset(struct Qdisc *sch)
373 struct dsmark_qdisc_data *p = PRIV(sch);
375 DPRINTK("dsmark_reset(sch %p,[qdisc %p])\n",sch,p);
376 qdisc_reset(p->q);
377 sch->q.qlen = 0;
381 static void dsmark_destroy(struct Qdisc *sch)
383 struct dsmark_qdisc_data *p = PRIV(sch);
384 struct tcf_proto *tp;
386 DPRINTK("dsmark_destroy(sch %p,[qdisc %p])\n",sch,p);
387 while (p->filter_list) {
388 tp = p->filter_list;
389 p->filter_list = tp->next;
390 tcf_destroy(tp);
392 qdisc_destroy(p->q);
393 kfree(p->mask);
397 static int dsmark_dump_class(struct Qdisc *sch, unsigned long cl,
398 struct sk_buff *skb, struct tcmsg *tcm)
400 struct dsmark_qdisc_data *p = PRIV(sch);
401 unsigned char *b = skb->tail;
402 struct rtattr *rta;
404 DPRINTK("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n",sch,p,cl);
405 if (!cl || cl > p->indices)
406 return -EINVAL;
407 tcm->tcm_handle = TC_H_MAKE(TC_H_MAJ(sch->handle),cl-1);
408 rta = (struct rtattr *) b;
409 RTA_PUT(skb,TCA_OPTIONS,0,NULL);
410 RTA_PUT(skb,TCA_DSMARK_MASK,1,&p->mask[cl-1]);
411 RTA_PUT(skb,TCA_DSMARK_VALUE,1,&p->value[cl-1]);
412 rta->rta_len = skb->tail-b;
413 return skb->len;
415 rtattr_failure:
416 skb_trim(skb,b-skb->data);
417 return -1;
420 static int dsmark_dump(struct Qdisc *sch, struct sk_buff *skb)
422 struct dsmark_qdisc_data *p = PRIV(sch);
423 unsigned char *b = skb->tail;
424 struct rtattr *rta;
426 rta = (struct rtattr *) b;
427 RTA_PUT(skb,TCA_OPTIONS,0,NULL);
428 RTA_PUT(skb,TCA_DSMARK_INDICES,sizeof(__u16),&p->indices);
429 if (p->default_index != NO_DEFAULT_INDEX) {
430 __u16 tmp = p->default_index;
432 RTA_PUT(skb,TCA_DSMARK_DEFAULT_INDEX, sizeof(__u16), &tmp);
434 if (p->set_tc_index)
435 RTA_PUT(skb, TCA_DSMARK_SET_TC_INDEX, 0, NULL);
436 rta->rta_len = skb->tail-b;
437 return skb->len;
439 rtattr_failure:
440 skb_trim(skb,b-skb->data);
441 return -1;
444 static struct Qdisc_class_ops dsmark_class_ops = {
445 .graft = dsmark_graft,
446 .leaf = dsmark_leaf,
447 .get = dsmark_get,
448 .put = dsmark_put,
449 .change = dsmark_change,
450 .delete = dsmark_delete,
451 .walk = dsmark_walk,
452 .tcf_chain = dsmark_find_tcf,
453 .bind_tcf = dsmark_bind_filter,
454 .unbind_tcf = dsmark_put,
455 .dump = dsmark_dump_class,
458 static struct Qdisc_ops dsmark_qdisc_ops = {
459 .next = NULL,
460 .cl_ops = &dsmark_class_ops,
461 .id = "dsmark",
462 .priv_size = sizeof(struct dsmark_qdisc_data),
463 .enqueue = dsmark_enqueue,
464 .dequeue = dsmark_dequeue,
465 .requeue = dsmark_requeue,
466 .drop = dsmark_drop,
467 .init = dsmark_init,
468 .reset = dsmark_reset,
469 .destroy = dsmark_destroy,
470 .change = NULL,
471 .dump = dsmark_dump,
472 .owner = THIS_MODULE,
475 static int __init dsmark_module_init(void)
477 return register_qdisc(&dsmark_qdisc_ops);
479 static void __exit dsmark_module_exit(void)
481 unregister_qdisc(&dsmark_qdisc_ops);
483 module_init(dsmark_module_init)
484 module_exit(dsmark_module_exit)
485 MODULE_LICENSE("GPL");