1 /* net/sched/sch_dsmark.c - Differentiated Services field marker */
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/slab.h>
9 #include <linux/types.h>
10 #include <linux/string.h>
11 #include <linux/errno.h>
12 #include <linux/skbuff.h>
13 #include <linux/rtnetlink.h>
14 #include <linux/bitops.h>
15 #include <net/pkt_sched.h>
16 #include <net/dsfield.h>
17 #include <net/inet_ecn.h>
18 #include <asm/byteorder.h>
21 * classid class marking
22 * ------- ----- -------
26 * x:y y>0 y+1 use entry [y]
28 * x:indices-1 indices use entry [indices-1]
30 * x:y y+1 use entry [y & (indices-1)]
32 * 0xffff 0x10000 use entry [indices-1]
36 #define NO_DEFAULT_INDEX (1 << 16)
38 struct dsmark_qdisc_data
{
40 struct tcf_proto
*filter_list
;
41 u8
*mask
; /* "owns" the array */
44 u32 default_index
; /* index range is 0...0xffff */
48 static inline int dsmark_valid_index(struct dsmark_qdisc_data
*p
, u16 index
)
50 return (index
<= p
->indices
&& index
> 0);
53 /* ------------------------- Class/flow operations ------------------------- */
55 static int dsmark_graft(struct Qdisc
*sch
, unsigned long arg
,
56 struct Qdisc
*new, struct Qdisc
**old
)
58 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
60 pr_debug("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",
64 new = qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
,
73 qdisc_tree_decrease_qlen(*old
, (*old
)->q
.qlen
);
80 static struct Qdisc
*dsmark_leaf(struct Qdisc
*sch
, unsigned long arg
)
82 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
86 static unsigned long dsmark_get(struct Qdisc
*sch
, u32 classid
)
88 pr_debug("dsmark_get(sch %p,[qdisc %p],classid %x)\n",
89 sch
, qdisc_priv(sch
), classid
);
91 return TC_H_MIN(classid
) + 1;
94 static unsigned long dsmark_bind_filter(struct Qdisc
*sch
,
95 unsigned long parent
, u32 classid
)
97 return dsmark_get(sch
, classid
);
100 static void dsmark_put(struct Qdisc
*sch
, unsigned long cl
)
104 static const struct nla_policy dsmark_policy
[TCA_DSMARK_MAX
+ 1] = {
105 [TCA_DSMARK_INDICES
] = { .type
= NLA_U16
},
106 [TCA_DSMARK_DEFAULT_INDEX
] = { .type
= NLA_U16
},
107 [TCA_DSMARK_SET_TC_INDEX
] = { .type
= NLA_FLAG
},
108 [TCA_DSMARK_MASK
] = { .type
= NLA_U8
},
109 [TCA_DSMARK_VALUE
] = { .type
= NLA_U8
},
112 static int dsmark_change(struct Qdisc
*sch
, u32 classid
, u32 parent
,
113 struct nlattr
**tca
, unsigned long *arg
)
115 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
116 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
117 struct nlattr
*tb
[TCA_DSMARK_MAX
+ 1];
121 pr_debug("dsmark_change(sch %p,[qdisc %p],classid %x,parent %x),"
122 "arg 0x%lx\n", sch
, p
, classid
, parent
, *arg
);
124 if (!dsmark_valid_index(p
, *arg
)) {
132 err
= nla_parse_nested(tb
, TCA_DSMARK_MAX
, opt
, dsmark_policy
);
136 if (tb
[TCA_DSMARK_MASK
])
137 mask
= nla_get_u8(tb
[TCA_DSMARK_MASK
]);
139 if (tb
[TCA_DSMARK_VALUE
])
140 p
->value
[*arg
- 1] = nla_get_u8(tb
[TCA_DSMARK_VALUE
]);
142 if (tb
[TCA_DSMARK_MASK
])
143 p
->mask
[*arg
- 1] = mask
;
151 static int dsmark_delete(struct Qdisc
*sch
, unsigned long arg
)
153 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
155 if (!dsmark_valid_index(p
, arg
))
158 p
->mask
[arg
- 1] = 0xff;
159 p
->value
[arg
- 1] = 0;
164 static void dsmark_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
166 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
169 pr_debug("dsmark_walk(sch %p,[qdisc %p],walker %p)\n", sch
, p
, walker
);
174 for (i
= 0; i
< p
->indices
; i
++) {
175 if (p
->mask
[i
] == 0xff && !p
->value
[i
])
177 if (walker
->count
>= walker
->skip
) {
178 if (walker
->fn(sch
, i
+ 1, walker
) < 0) {
188 static inline struct tcf_proto
**dsmark_find_tcf(struct Qdisc
*sch
,
191 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
192 return &p
->filter_list
;
195 /* --------------------------- Qdisc operations ---------------------------- */
197 static int dsmark_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
199 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
202 pr_debug("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb
, sch
, p
);
204 if (p
->set_tc_index
) {
205 switch (skb
->protocol
) {
206 case htons(ETH_P_IP
):
207 if (skb_cow_head(skb
, sizeof(struct iphdr
)))
210 skb
->tc_index
= ipv4_get_dsfield(ip_hdr(skb
))
214 case htons(ETH_P_IPV6
):
215 if (skb_cow_head(skb
, sizeof(struct ipv6hdr
)))
218 skb
->tc_index
= ipv6_get_dsfield(ipv6_hdr(skb
))
227 if (TC_H_MAJ(skb
->priority
) == sch
->handle
)
228 skb
->tc_index
= TC_H_MIN(skb
->priority
);
230 struct tcf_result res
;
231 int result
= tc_classify(skb
, p
->filter_list
, &res
);
233 pr_debug("result %d class 0x%04x\n", result
, res
.classid
);
236 #ifdef CONFIG_NET_CLS_ACT
240 return NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
246 skb
->tc_index
= TC_H_MIN(res
.classid
);
250 if (p
->default_index
!= NO_DEFAULT_INDEX
)
251 skb
->tc_index
= p
->default_index
;
256 err
= qdisc_enqueue(skb
, p
->q
);
257 if (err
!= NET_XMIT_SUCCESS
) {
258 if (net_xmit_drop_count(err
))
265 return NET_XMIT_SUCCESS
;
268 qdisc_drop(skb
, sch
);
269 return NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
272 static struct sk_buff
*dsmark_dequeue(struct Qdisc
*sch
)
274 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
278 pr_debug("dsmark_dequeue(sch %p,[qdisc %p])\n", sch
, p
);
280 skb
= p
->q
->ops
->dequeue(p
->q
);
284 qdisc_bstats_update(sch
, skb
);
287 index
= skb
->tc_index
& (p
->indices
- 1);
288 pr_debug("index %d->%d\n", skb
->tc_index
, index
);
290 switch (skb
->protocol
) {
291 case htons(ETH_P_IP
):
292 ipv4_change_dsfield(ip_hdr(skb
), p
->mask
[index
],
295 case htons(ETH_P_IPV6
):
296 ipv6_change_dsfield(ipv6_hdr(skb
), p
->mask
[index
],
301 * Only complain if a change was actually attempted.
302 * This way, we can send non-IP traffic through dsmark
303 * and don't need yet another qdisc as a bypass.
305 if (p
->mask
[index
] != 0xff || p
->value
[index
])
306 pr_warning("dsmark_dequeue: unsupported protocol %d\n",
307 ntohs(skb
->protocol
));
314 static struct sk_buff
*dsmark_peek(struct Qdisc
*sch
)
316 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
318 pr_debug("dsmark_peek(sch %p,[qdisc %p])\n", sch
, p
);
320 return p
->q
->ops
->peek(p
->q
);
323 static unsigned int dsmark_drop(struct Qdisc
*sch
)
325 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
328 pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch
, p
);
330 if (p
->q
->ops
->drop
== NULL
)
333 len
= p
->q
->ops
->drop(p
->q
);
340 static int dsmark_init(struct Qdisc
*sch
, struct nlattr
*opt
)
342 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
343 struct nlattr
*tb
[TCA_DSMARK_MAX
+ 1];
345 u32 default_index
= NO_DEFAULT_INDEX
;
349 pr_debug("dsmark_init(sch %p,[qdisc %p],opt %p)\n", sch
, p
, opt
);
354 err
= nla_parse_nested(tb
, TCA_DSMARK_MAX
, opt
, dsmark_policy
);
359 indices
= nla_get_u16(tb
[TCA_DSMARK_INDICES
]);
361 if (hweight32(indices
) != 1)
364 if (tb
[TCA_DSMARK_DEFAULT_INDEX
])
365 default_index
= nla_get_u16(tb
[TCA_DSMARK_DEFAULT_INDEX
]);
367 mask
= kmalloc(indices
* 2, GFP_KERNEL
);
374 memset(p
->mask
, 0xff, indices
);
376 p
->value
= p
->mask
+ indices
;
377 memset(p
->value
, 0, indices
);
379 p
->indices
= indices
;
380 p
->default_index
= default_index
;
381 p
->set_tc_index
= nla_get_flag(tb
[TCA_DSMARK_SET_TC_INDEX
]);
383 p
->q
= qdisc_create_dflt(sch
->dev_queue
, &pfifo_qdisc_ops
, sch
->handle
);
387 pr_debug("dsmark_init: qdisc %p\n", p
->q
);
394 static void dsmark_reset(struct Qdisc
*sch
)
396 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
398 pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch
, p
);
403 static void dsmark_destroy(struct Qdisc
*sch
)
405 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
407 pr_debug("dsmark_destroy(sch %p,[qdisc %p])\n", sch
, p
);
409 tcf_destroy_chain(&p
->filter_list
);
414 static int dsmark_dump_class(struct Qdisc
*sch
, unsigned long cl
,
415 struct sk_buff
*skb
, struct tcmsg
*tcm
)
417 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
418 struct nlattr
*opts
= NULL
;
420 pr_debug("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n", sch
, p
, cl
);
422 if (!dsmark_valid_index(p
, cl
))
425 tcm
->tcm_handle
= TC_H_MAKE(TC_H_MAJ(sch
->handle
), cl
- 1);
426 tcm
->tcm_info
= p
->q
->handle
;
428 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
430 goto nla_put_failure
;
431 if (nla_put_u8(skb
, TCA_DSMARK_MASK
, p
->mask
[cl
- 1]) ||
432 nla_put_u8(skb
, TCA_DSMARK_VALUE
, p
->value
[cl
- 1]))
433 goto nla_put_failure
;
435 return nla_nest_end(skb
, opts
);
438 nla_nest_cancel(skb
, opts
);
442 static int dsmark_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
444 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
445 struct nlattr
*opts
= NULL
;
447 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
449 goto nla_put_failure
;
450 if (nla_put_u16(skb
, TCA_DSMARK_INDICES
, p
->indices
))
451 goto nla_put_failure
;
453 if (p
->default_index
!= NO_DEFAULT_INDEX
&&
454 nla_put_u16(skb
, TCA_DSMARK_DEFAULT_INDEX
, p
->default_index
))
455 goto nla_put_failure
;
457 if (p
->set_tc_index
&&
458 nla_put_flag(skb
, TCA_DSMARK_SET_TC_INDEX
))
459 goto nla_put_failure
;
461 return nla_nest_end(skb
, opts
);
464 nla_nest_cancel(skb
, opts
);
468 static const struct Qdisc_class_ops dsmark_class_ops
= {
469 .graft
= dsmark_graft
,
473 .change
= dsmark_change
,
474 .delete = dsmark_delete
,
476 .tcf_chain
= dsmark_find_tcf
,
477 .bind_tcf
= dsmark_bind_filter
,
478 .unbind_tcf
= dsmark_put
,
479 .dump
= dsmark_dump_class
,
482 static struct Qdisc_ops dsmark_qdisc_ops __read_mostly
= {
484 .cl_ops
= &dsmark_class_ops
,
486 .priv_size
= sizeof(struct dsmark_qdisc_data
),
487 .enqueue
= dsmark_enqueue
,
488 .dequeue
= dsmark_dequeue
,
492 .reset
= dsmark_reset
,
493 .destroy
= dsmark_destroy
,
496 .owner
= THIS_MODULE
,
499 static int __init
dsmark_module_init(void)
501 return register_qdisc(&dsmark_qdisc_ops
);
504 static void __exit
dsmark_module_exit(void)
506 unregister_qdisc(&dsmark_qdisc_ops
);
509 module_init(dsmark_module_init
)
510 module_exit(dsmark_module_exit
)
512 MODULE_LICENSE("GPL");