1 /* net/sched/sch_dsmark.c - Differentiated Services field marker */
3 /* Written 1998-2000 by Werner Almesberger, EPFL ICA */
6 #include <linux/module.h>
7 #include <linux/init.h>
8 #include <linux/types.h>
9 #include <linux/string.h>
10 #include <linux/errno.h>
11 #include <linux/skbuff.h>
12 #include <linux/rtnetlink.h>
13 #include <linux/bitops.h>
14 #include <net/pkt_sched.h>
15 #include <net/dsfield.h>
16 #include <net/inet_ecn.h>
17 #include <asm/byteorder.h>
20 * classid class marking
21 * ------- ----- -------
25 * x:y y>0 y+1 use entry [y]
27 * x:indices-1 indices use entry [indices-1]
29 * x:y y+1 use entry [y & (indices-1)]
31 * 0xffff 0x10000 use entry [indices-1]
35 #define NO_DEFAULT_INDEX (1 << 16)
37 struct dsmark_qdisc_data
{
39 struct tcf_proto
*filter_list
;
40 u8
*mask
; /* "owns" the array */
43 u32 default_index
; /* index range is 0...0xffff */
47 static inline int dsmark_valid_index(struct dsmark_qdisc_data
*p
, u16 index
)
49 return (index
<= p
->indices
&& index
> 0);
52 /* ------------------------- Class/flow operations ------------------------- */
54 static int dsmark_graft(struct Qdisc
*sch
, unsigned long arg
,
55 struct Qdisc
*new, struct Qdisc
**old
)
57 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
59 pr_debug("dsmark_graft(sch %p,[qdisc %p],new %p,old %p)\n",
63 new = qdisc_create_dflt(qdisc_dev(sch
), sch
->dev_queue
,
71 *old
= xchg(&p
->q
, new);
72 qdisc_tree_decrease_qlen(*old
, (*old
)->q
.qlen
);
79 static struct Qdisc
*dsmark_leaf(struct Qdisc
*sch
, unsigned long arg
)
81 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
85 static unsigned long dsmark_get(struct Qdisc
*sch
, u32 classid
)
87 pr_debug("dsmark_get(sch %p,[qdisc %p],classid %x)\n",
88 sch
, qdisc_priv(sch
), classid
);
90 return TC_H_MIN(classid
) + 1;
93 static unsigned long dsmark_bind_filter(struct Qdisc
*sch
,
94 unsigned long parent
, u32 classid
)
96 return dsmark_get(sch
, classid
);
99 static void dsmark_put(struct Qdisc
*sch
, unsigned long cl
)
103 static const struct nla_policy dsmark_policy
[TCA_DSMARK_MAX
+ 1] = {
104 [TCA_DSMARK_INDICES
] = { .type
= NLA_U16
},
105 [TCA_DSMARK_DEFAULT_INDEX
] = { .type
= NLA_U16
},
106 [TCA_DSMARK_SET_TC_INDEX
] = { .type
= NLA_FLAG
},
107 [TCA_DSMARK_MASK
] = { .type
= NLA_U8
},
108 [TCA_DSMARK_VALUE
] = { .type
= NLA_U8
},
111 static int dsmark_change(struct Qdisc
*sch
, u32 classid
, u32 parent
,
112 struct nlattr
**tca
, unsigned long *arg
)
114 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
115 struct nlattr
*opt
= tca
[TCA_OPTIONS
];
116 struct nlattr
*tb
[TCA_DSMARK_MAX
+ 1];
120 pr_debug("dsmark_change(sch %p,[qdisc %p],classid %x,parent %x),"
121 "arg 0x%lx\n", sch
, p
, classid
, parent
, *arg
);
123 if (!dsmark_valid_index(p
, *arg
)) {
131 err
= nla_parse_nested(tb
, TCA_DSMARK_MAX
, opt
, dsmark_policy
);
135 if (tb
[TCA_DSMARK_MASK
])
136 mask
= nla_get_u8(tb
[TCA_DSMARK_MASK
]);
138 if (tb
[TCA_DSMARK_VALUE
])
139 p
->value
[*arg
-1] = nla_get_u8(tb
[TCA_DSMARK_VALUE
]);
141 if (tb
[TCA_DSMARK_MASK
])
142 p
->mask
[*arg
-1] = mask
;
150 static int dsmark_delete(struct Qdisc
*sch
, unsigned long arg
)
152 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
154 if (!dsmark_valid_index(p
, arg
))
157 p
->mask
[arg
-1] = 0xff;
163 static void dsmark_walk(struct Qdisc
*sch
, struct qdisc_walker
*walker
)
165 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
168 pr_debug("dsmark_walk(sch %p,[qdisc %p],walker %p)\n", sch
, p
, walker
);
173 for (i
= 0; i
< p
->indices
; i
++) {
174 if (p
->mask
[i
] == 0xff && !p
->value
[i
])
176 if (walker
->count
>= walker
->skip
) {
177 if (walker
->fn(sch
, i
+1, walker
) < 0) {
187 static inline struct tcf_proto
**dsmark_find_tcf(struct Qdisc
*sch
,
190 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
191 return &p
->filter_list
;
194 /* --------------------------- Qdisc operations ---------------------------- */
196 static int dsmark_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
198 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
201 pr_debug("dsmark_enqueue(skb %p,sch %p,[qdisc %p])\n", skb
, sch
, p
);
203 if (p
->set_tc_index
) {
204 switch (skb
->protocol
) {
205 case htons(ETH_P_IP
):
206 if (skb_cow_head(skb
, sizeof(struct iphdr
)))
209 skb
->tc_index
= ipv4_get_dsfield(ip_hdr(skb
))
213 case htons(ETH_P_IPV6
):
214 if (skb_cow_head(skb
, sizeof(struct ipv6hdr
)))
217 skb
->tc_index
= ipv6_get_dsfield(ipv6_hdr(skb
))
226 if (TC_H_MAJ(skb
->priority
) == sch
->handle
)
227 skb
->tc_index
= TC_H_MIN(skb
->priority
);
229 struct tcf_result res
;
230 int result
= tc_classify(skb
, p
->filter_list
, &res
);
232 pr_debug("result %d class 0x%04x\n", result
, res
.classid
);
235 #ifdef CONFIG_NET_CLS_ACT
239 return NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
245 skb
->tc_index
= TC_H_MIN(res
.classid
);
249 if (p
->default_index
!= NO_DEFAULT_INDEX
)
250 skb
->tc_index
= p
->default_index
;
255 err
= qdisc_enqueue(skb
, p
->q
);
256 if (err
!= NET_XMIT_SUCCESS
) {
257 if (net_xmit_drop_count(err
))
262 sch
->bstats
.bytes
+= qdisc_pkt_len(skb
);
263 sch
->bstats
.packets
++;
266 return NET_XMIT_SUCCESS
;
271 return NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
274 static struct sk_buff
*dsmark_dequeue(struct Qdisc
*sch
)
276 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
280 pr_debug("dsmark_dequeue(sch %p,[qdisc %p])\n", sch
, p
);
282 skb
= p
->q
->ops
->dequeue(p
->q
);
288 index
= skb
->tc_index
& (p
->indices
- 1);
289 pr_debug("index %d->%d\n", skb
->tc_index
, index
);
291 switch (skb
->protocol
) {
292 case htons(ETH_P_IP
):
293 ipv4_change_dsfield(ip_hdr(skb
), p
->mask
[index
],
296 case htons(ETH_P_IPV6
):
297 ipv6_change_dsfield(ipv6_hdr(skb
), p
->mask
[index
],
302 * Only complain if a change was actually attempted.
303 * This way, we can send non-IP traffic through dsmark
304 * and don't need yet another qdisc as a bypass.
306 if (p
->mask
[index
] != 0xff || p
->value
[index
])
308 "dsmark_dequeue: unsupported protocol %d\n",
309 ntohs(skb
->protocol
));
316 static int dsmark_requeue(struct sk_buff
*skb
, struct Qdisc
*sch
)
318 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
321 pr_debug("dsmark_requeue(skb %p,sch %p,[qdisc %p])\n", skb
, sch
, p
);
323 err
= p
->q
->ops
->requeue(skb
, p
->q
);
324 if (err
!= NET_XMIT_SUCCESS
) {
325 if (net_xmit_drop_count(err
))
331 sch
->qstats
.requeues
++;
333 return NET_XMIT_SUCCESS
;
336 static unsigned int dsmark_drop(struct Qdisc
*sch
)
338 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
341 pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch
, p
);
343 if (p
->q
->ops
->drop
== NULL
)
346 len
= p
->q
->ops
->drop(p
->q
);
353 static int dsmark_init(struct Qdisc
*sch
, struct nlattr
*opt
)
355 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
356 struct nlattr
*tb
[TCA_DSMARK_MAX
+ 1];
358 u32 default_index
= NO_DEFAULT_INDEX
;
362 pr_debug("dsmark_init(sch %p,[qdisc %p],opt %p)\n", sch
, p
, opt
);
367 err
= nla_parse_nested(tb
, TCA_DSMARK_MAX
, opt
, dsmark_policy
);
372 indices
= nla_get_u16(tb
[TCA_DSMARK_INDICES
]);
374 if (hweight32(indices
) != 1)
377 if (tb
[TCA_DSMARK_DEFAULT_INDEX
])
378 default_index
= nla_get_u16(tb
[TCA_DSMARK_DEFAULT_INDEX
]);
380 mask
= kmalloc(indices
* 2, GFP_KERNEL
);
387 memset(p
->mask
, 0xff, indices
);
389 p
->value
= p
->mask
+ indices
;
390 memset(p
->value
, 0, indices
);
392 p
->indices
= indices
;
393 p
->default_index
= default_index
;
394 p
->set_tc_index
= nla_get_flag(tb
[TCA_DSMARK_SET_TC_INDEX
]);
396 p
->q
= qdisc_create_dflt(qdisc_dev(sch
), sch
->dev_queue
,
397 &pfifo_qdisc_ops
, sch
->handle
);
401 pr_debug("dsmark_init: qdisc %p\n", p
->q
);
408 static void dsmark_reset(struct Qdisc
*sch
)
410 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
412 pr_debug("dsmark_reset(sch %p,[qdisc %p])\n", sch
, p
);
417 static void dsmark_destroy(struct Qdisc
*sch
)
419 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
421 pr_debug("dsmark_destroy(sch %p,[qdisc %p])\n", sch
, p
);
423 tcf_destroy_chain(&p
->filter_list
);
428 static int dsmark_dump_class(struct Qdisc
*sch
, unsigned long cl
,
429 struct sk_buff
*skb
, struct tcmsg
*tcm
)
431 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
432 struct nlattr
*opts
= NULL
;
434 pr_debug("dsmark_dump_class(sch %p,[qdisc %p],class %ld\n", sch
, p
, cl
);
436 if (!dsmark_valid_index(p
, cl
))
439 tcm
->tcm_handle
= TC_H_MAKE(TC_H_MAJ(sch
->handle
), cl
-1);
440 tcm
->tcm_info
= p
->q
->handle
;
442 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
444 goto nla_put_failure
;
445 NLA_PUT_U8(skb
, TCA_DSMARK_MASK
, p
->mask
[cl
-1]);
446 NLA_PUT_U8(skb
, TCA_DSMARK_VALUE
, p
->value
[cl
-1]);
448 return nla_nest_end(skb
, opts
);
451 nla_nest_cancel(skb
, opts
);
455 static int dsmark_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
457 struct dsmark_qdisc_data
*p
= qdisc_priv(sch
);
458 struct nlattr
*opts
= NULL
;
460 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
462 goto nla_put_failure
;
463 NLA_PUT_U16(skb
, TCA_DSMARK_INDICES
, p
->indices
);
465 if (p
->default_index
!= NO_DEFAULT_INDEX
)
466 NLA_PUT_U16(skb
, TCA_DSMARK_DEFAULT_INDEX
, p
->default_index
);
469 NLA_PUT_FLAG(skb
, TCA_DSMARK_SET_TC_INDEX
);
471 return nla_nest_end(skb
, opts
);
474 nla_nest_cancel(skb
, opts
);
478 static const struct Qdisc_class_ops dsmark_class_ops
= {
479 .graft
= dsmark_graft
,
483 .change
= dsmark_change
,
484 .delete = dsmark_delete
,
486 .tcf_chain
= dsmark_find_tcf
,
487 .bind_tcf
= dsmark_bind_filter
,
488 .unbind_tcf
= dsmark_put
,
489 .dump
= dsmark_dump_class
,
492 static struct Qdisc_ops dsmark_qdisc_ops __read_mostly
= {
494 .cl_ops
= &dsmark_class_ops
,
496 .priv_size
= sizeof(struct dsmark_qdisc_data
),
497 .enqueue
= dsmark_enqueue
,
498 .dequeue
= dsmark_dequeue
,
499 .requeue
= dsmark_requeue
,
502 .reset
= dsmark_reset
,
503 .destroy
= dsmark_destroy
,
506 .owner
= THIS_MODULE
,
509 static int __init
dsmark_module_init(void)
511 return register_qdisc(&dsmark_qdisc_ops
);
514 static void __exit
dsmark_module_exit(void)
516 unregister_qdisc(&dsmark_qdisc_ops
);
519 module_init(dsmark_module_init
)
520 module_exit(dsmark_module_exit
)
522 MODULE_LICENSE("GPL");