2 * net/sched/sch_choke.c CHOKE scheduler
4 * Copyright (c) 2011 Stephen Hemminger <shemminger@vyatta.com>
5 * Copyright (c) 2011 Eric Dumazet <eric.dumazet@gmail.com>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * version 2 as published by the Free Software Foundation.
13 #include <linux/module.h>
14 #include <linux/types.h>
15 #include <linux/kernel.h>
16 #include <linux/skbuff.h>
17 #include <linux/reciprocal_div.h>
18 #include <linux/vmalloc.h>
19 #include <net/pkt_sched.h>
20 #include <net/inet_ecn.h>
24 #include <linux/ipv6.h>
28 CHOKe stateless AQM for fair bandwidth allocation
29 =================================================
31 CHOKe (CHOose and Keep for responsive flows, CHOose and Kill for
32 unresponsive flows) is a variant of RED that penalizes misbehaving flows but
33 maintains no flow state. The difference from RED is an additional step
34 during the enqueuing process. If average queue size is over the
35 low threshold (qmin), a packet is chosen at random from the queue.
36 If both the new and chosen packet are from the same flow, both
37 are dropped. Unlike RED, CHOKe is not really a "classful" qdisc because it
38 needs to access packets in queue randomly. It has a minimal class
39 interface to allow overriding the builtin flow classifier with
43 R. Pan, B. Prabhakar, and K. Psounis, "CHOKe, A Stateless
44 Active Queue Management Scheme for Approximating Fair Bandwidth Allocation",
47 A. Tang, J. Wang, S. Low, "Understanding CHOKe: Throughput and Spatial
48 Characteristics", IEEE/ACM Transactions on Networking, 2004
52 /* Upper bound on size of sk_buff table (packets) */
53 #define CHOKE_MAX_QUEUE (128*1024 - 1)
55 struct choke_sched_data
{
60 struct red_parms parms
;
63 struct tcf_proto
*filter_list
;
65 u32 prob_drop
; /* Early probability drops */
66 u32 prob_mark
; /* Early probability marks */
67 u32 forced_drop
; /* Forced drops, qavg > max_thresh */
68 u32 forced_mark
; /* Forced marks, qavg > max_thresh */
69 u32 pdrop
; /* Drops due to queue limits */
70 u32 other
; /* Drops due to drop() calls */
71 u32 matched
; /* Drops to flow match */
77 unsigned int tab_mask
; /* size - 1 */
82 /* deliver a random number between 0 and N - 1 */
83 static u32
random_N(unsigned int N
)
85 return reciprocal_divide(random32(), N
);
88 /* number of elements in queue including holes */
89 static unsigned int choke_len(const struct choke_sched_data
*q
)
91 return (q
->tail
- q
->head
) & q
->tab_mask
;
94 /* Is ECN parameter configured */
95 static int use_ecn(const struct choke_sched_data
*q
)
97 return q
->flags
& TC_RED_ECN
;
100 /* Should packets over max just be dropped (versus marked) */
101 static int use_harddrop(const struct choke_sched_data
*q
)
103 return q
->flags
& TC_RED_HARDDROP
;
106 /* Move head pointer forward to skip over holes */
107 static void choke_zap_head_holes(struct choke_sched_data
*q
)
110 q
->head
= (q
->head
+ 1) & q
->tab_mask
;
111 if (q
->head
== q
->tail
)
113 } while (q
->tab
[q
->head
] == NULL
);
116 /* Move tail pointer backwards to reuse holes */
117 static void choke_zap_tail_holes(struct choke_sched_data
*q
)
120 q
->tail
= (q
->tail
- 1) & q
->tab_mask
;
121 if (q
->head
== q
->tail
)
123 } while (q
->tab
[q
->tail
] == NULL
);
126 /* Drop packet from queue array by creating a "hole" */
127 static void choke_drop_by_idx(struct Qdisc
*sch
, unsigned int idx
)
129 struct choke_sched_data
*q
= qdisc_priv(sch
);
130 struct sk_buff
*skb
= q
->tab
[idx
];
135 choke_zap_head_holes(q
);
137 choke_zap_tail_holes(q
);
139 sch
->qstats
.backlog
-= qdisc_pkt_len(skb
);
140 qdisc_drop(skb
, sch
);
141 qdisc_tree_decrease_qlen(sch
, 1);
146 * Compare flow of two packets
147 * Returns true only if source and destination address and port match.
148 * false for special cases
150 static bool choke_match_flow(struct sk_buff
*skb1
,
151 struct sk_buff
*skb2
)
153 int off1
, off2
, poff
;
154 const u32
*ports1
, *ports2
;
155 u32 _ports1
, _ports2
;
159 if (skb1
->protocol
!= skb2
->protocol
)
162 /* Use rxhash value as quick check */
163 hash1
= skb_get_rxhash(skb1
);
164 if (!hash1
|| hash1
!= skb_get_rxhash(skb2
))
167 /* Probably match, but be sure to avoid hash collisions */
168 off1
= skb_network_offset(skb1
);
169 off2
= skb_network_offset(skb2
);
171 switch (skb1
->protocol
) {
172 case __constant_htons(ETH_P_IP
): {
173 const struct iphdr
*ip1
, *ip2
;
174 struct iphdr _ip1
, _ip2
;
176 ip1
= skb_header_pointer(skb1
, off1
, sizeof(_ip1
), &_ip1
);
177 ip2
= skb_header_pointer(skb2
, off2
, sizeof(_ip2
), &_ip2
);
180 ip_proto
= ip1
->protocol
;
181 if (ip_proto
!= ip2
->protocol
||
182 ip1
->saddr
!= ip2
->saddr
|| ip1
->daddr
!= ip2
->daddr
)
185 if (ip_is_fragment(ip1
) | ip_is_fragment(ip2
))
187 off1
+= ip1
->ihl
* 4;
188 off2
+= ip2
->ihl
* 4;
192 case __constant_htons(ETH_P_IPV6
): {
193 const struct ipv6hdr
*ip1
, *ip2
;
194 struct ipv6hdr _ip1
, _ip2
;
196 ip1
= skb_header_pointer(skb1
, off1
, sizeof(_ip1
), &_ip1
);
197 ip2
= skb_header_pointer(skb2
, off2
, sizeof(_ip2
), &_ip2
);
201 ip_proto
= ip1
->nexthdr
;
202 if (ip_proto
!= ip2
->nexthdr
||
203 ipv6_addr_cmp(&ip1
->saddr
, &ip2
->saddr
) ||
204 ipv6_addr_cmp(&ip1
->daddr
, &ip2
->daddr
))
210 default: /* Maybe compare MAC header here? */
214 poff
= proto_ports_offset(ip_proto
);
221 ports1
= skb_header_pointer(skb1
, off1
, sizeof(_ports1
), &_ports1
);
222 ports2
= skb_header_pointer(skb2
, off2
, sizeof(_ports2
), &_ports2
);
223 if (!ports1
|| !ports2
)
226 return *ports1
== *ports2
;
229 struct choke_skb_cb
{
233 static inline struct choke_skb_cb
*choke_skb_cb(const struct sk_buff
*skb
)
235 BUILD_BUG_ON(sizeof(skb
->cb
) <
236 sizeof(struct qdisc_skb_cb
) + sizeof(struct choke_skb_cb
));
237 return (struct choke_skb_cb
*)qdisc_skb_cb(skb
)->data
;
240 static inline void choke_set_classid(struct sk_buff
*skb
, u16 classid
)
242 choke_skb_cb(skb
)->classid
= classid
;
245 static u16
choke_get_classid(const struct sk_buff
*skb
)
247 return choke_skb_cb(skb
)->classid
;
251 * Classify flow using either:
252 * 1. pre-existing classification result in skb
253 * 2. fast internal classification
254 * 3. use TC filter based classification
256 static bool choke_classify(struct sk_buff
*skb
,
257 struct Qdisc
*sch
, int *qerr
)
260 struct choke_sched_data
*q
= qdisc_priv(sch
);
261 struct tcf_result res
;
264 result
= tc_classify(skb
, q
->filter_list
, &res
);
266 #ifdef CONFIG_NET_CLS_ACT
270 *qerr
= NET_XMIT_SUCCESS
| __NET_XMIT_STOLEN
;
275 choke_set_classid(skb
, TC_H_MIN(res
.classid
));
283 * Select a packet at random from queue
284 * HACK: since queue can have holes from previous deletion; retry several
285 * times to find a random skb but then just give up and return the head
286 * Will return NULL if queue is empty (q->head == q->tail)
288 static struct sk_buff
*choke_peek_random(const struct choke_sched_data
*q
,
295 *pidx
= (q
->head
+ random_N(choke_len(q
))) & q
->tab_mask
;
299 } while (--retrys
> 0);
301 return q
->tab
[*pidx
= q
->head
];
305 * Compare new packet with random packet in queue
306 * returns true if matched and sets *pidx
308 static bool choke_match_random(const struct choke_sched_data
*q
,
309 struct sk_buff
*nskb
,
312 struct sk_buff
*oskb
;
314 if (q
->head
== q
->tail
)
317 oskb
= choke_peek_random(q
, pidx
);
319 return choke_get_classid(nskb
) == choke_get_classid(oskb
);
321 return choke_match_flow(oskb
, nskb
);
324 static int choke_enqueue(struct sk_buff
*skb
, struct Qdisc
*sch
)
326 struct choke_sched_data
*q
= qdisc_priv(sch
);
327 struct red_parms
*p
= &q
->parms
;
328 int ret
= NET_XMIT_SUCCESS
| __NET_XMIT_BYPASS
;
330 if (q
->filter_list
) {
331 /* If using external classifiers, get result and record it. */
332 if (!choke_classify(skb
, sch
, &ret
))
333 goto other_drop
; /* Packet was eaten by filter */
336 /* Compute average queue usage (see RED) */
337 p
->qavg
= red_calc_qavg(p
, sch
->q
.qlen
);
338 if (red_is_idling(p
))
339 red_end_of_idle_period(p
);
341 /* Is queue small? */
342 if (p
->qavg
<= p
->qth_min
)
347 /* Draw a packet at random from queue and compare flow */
348 if (choke_match_random(q
, skb
, &idx
)) {
350 choke_drop_by_idx(sch
, idx
);
351 goto congestion_drop
;
354 /* Queue is large, always mark/drop */
355 if (p
->qavg
> p
->qth_max
) {
358 sch
->qstats
.overlimits
++;
359 if (use_harddrop(q
) || !use_ecn(q
) ||
360 !INET_ECN_set_ce(skb
)) {
361 q
->stats
.forced_drop
++;
362 goto congestion_drop
;
365 q
->stats
.forced_mark
++;
366 } else if (++p
->qcount
) {
367 if (red_mark_probability(p
, p
->qavg
)) {
369 p
->qR
= red_random(p
);
371 sch
->qstats
.overlimits
++;
372 if (!use_ecn(q
) || !INET_ECN_set_ce(skb
)) {
373 q
->stats
.prob_drop
++;
374 goto congestion_drop
;
377 q
->stats
.prob_mark
++;
380 p
->qR
= red_random(p
);
383 /* Admit new packet */
384 if (sch
->q
.qlen
< q
->limit
) {
385 q
->tab
[q
->tail
] = skb
;
386 q
->tail
= (q
->tail
+ 1) & q
->tab_mask
;
388 sch
->qstats
.backlog
+= qdisc_pkt_len(skb
);
389 return NET_XMIT_SUCCESS
;
395 return NET_XMIT_DROP
;
398 qdisc_drop(skb
, sch
);
402 if (ret
& __NET_XMIT_BYPASS
)
408 static struct sk_buff
*choke_dequeue(struct Qdisc
*sch
)
410 struct choke_sched_data
*q
= qdisc_priv(sch
);
413 if (q
->head
== q
->tail
) {
414 if (!red_is_idling(&q
->parms
))
415 red_start_of_idle_period(&q
->parms
);
419 skb
= q
->tab
[q
->head
];
420 q
->tab
[q
->head
] = NULL
;
421 choke_zap_head_holes(q
);
423 sch
->qstats
.backlog
-= qdisc_pkt_len(skb
);
424 qdisc_bstats_update(sch
, skb
);
429 static unsigned int choke_drop(struct Qdisc
*sch
)
431 struct choke_sched_data
*q
= qdisc_priv(sch
);
434 len
= qdisc_queue_drop(sch
);
438 if (!red_is_idling(&q
->parms
))
439 red_start_of_idle_period(&q
->parms
);
445 static void choke_reset(struct Qdisc
*sch
)
447 struct choke_sched_data
*q
= qdisc_priv(sch
);
449 red_restart(&q
->parms
);
452 static const struct nla_policy choke_policy
[TCA_CHOKE_MAX
+ 1] = {
453 [TCA_CHOKE_PARMS
] = { .len
= sizeof(struct tc_red_qopt
) },
454 [TCA_CHOKE_STAB
] = { .len
= RED_STAB_SIZE
},
458 static void choke_free(void *addr
)
461 if (is_vmalloc_addr(addr
))
468 static int choke_change(struct Qdisc
*sch
, struct nlattr
*opt
)
470 struct choke_sched_data
*q
= qdisc_priv(sch
);
471 struct nlattr
*tb
[TCA_CHOKE_MAX
+ 1];
472 const struct tc_red_qopt
*ctl
;
474 struct sk_buff
**old
= NULL
;
480 err
= nla_parse_nested(tb
, TCA_CHOKE_MAX
, opt
, choke_policy
);
484 if (tb
[TCA_CHOKE_PARMS
] == NULL
||
485 tb
[TCA_CHOKE_STAB
] == NULL
)
488 ctl
= nla_data(tb
[TCA_CHOKE_PARMS
]);
490 if (ctl
->limit
> CHOKE_MAX_QUEUE
)
493 mask
= roundup_pow_of_two(ctl
->limit
+ 1) - 1;
494 if (mask
!= q
->tab_mask
) {
495 struct sk_buff
**ntab
;
497 ntab
= kcalloc(mask
+ 1, sizeof(struct sk_buff
*), GFP_KERNEL
);
499 ntab
= vzalloc((mask
+ 1) * sizeof(struct sk_buff
*));
506 unsigned int oqlen
= sch
->q
.qlen
, tail
= 0;
508 while (q
->head
!= q
->tail
) {
509 struct sk_buff
*skb
= q
->tab
[q
->head
];
511 q
->head
= (q
->head
+ 1) & q
->tab_mask
;
518 sch
->qstats
.backlog
-= qdisc_pkt_len(skb
);
520 qdisc_drop(skb
, sch
);
522 qdisc_tree_decrease_qlen(sch
, oqlen
- sch
->q
.qlen
);
532 q
->flags
= ctl
->flags
;
533 q
->limit
= ctl
->limit
;
535 red_set_parms(&q
->parms
, ctl
->qth_min
, ctl
->qth_max
, ctl
->Wlog
,
536 ctl
->Plog
, ctl
->Scell_log
,
537 nla_data(tb
[TCA_CHOKE_STAB
]));
539 if (q
->head
== q
->tail
)
540 red_end_of_idle_period(&q
->parms
);
542 sch_tree_unlock(sch
);
547 static int choke_init(struct Qdisc
*sch
, struct nlattr
*opt
)
549 return choke_change(sch
, opt
);
552 static int choke_dump(struct Qdisc
*sch
, struct sk_buff
*skb
)
554 struct choke_sched_data
*q
= qdisc_priv(sch
);
555 struct nlattr
*opts
= NULL
;
556 struct tc_red_qopt opt
= {
559 .qth_min
= q
->parms
.qth_min
>> q
->parms
.Wlog
,
560 .qth_max
= q
->parms
.qth_max
>> q
->parms
.Wlog
,
561 .Wlog
= q
->parms
.Wlog
,
562 .Plog
= q
->parms
.Plog
,
563 .Scell_log
= q
->parms
.Scell_log
,
566 opts
= nla_nest_start(skb
, TCA_OPTIONS
);
568 goto nla_put_failure
;
570 NLA_PUT(skb
, TCA_CHOKE_PARMS
, sizeof(opt
), &opt
);
571 return nla_nest_end(skb
, opts
);
574 nla_nest_cancel(skb
, opts
);
578 static int choke_dump_stats(struct Qdisc
*sch
, struct gnet_dump
*d
)
580 struct choke_sched_data
*q
= qdisc_priv(sch
);
581 struct tc_choke_xstats st
= {
582 .early
= q
->stats
.prob_drop
+ q
->stats
.forced_drop
,
583 .marked
= q
->stats
.prob_mark
+ q
->stats
.forced_mark
,
584 .pdrop
= q
->stats
.pdrop
,
585 .other
= q
->stats
.other
,
586 .matched
= q
->stats
.matched
,
589 return gnet_stats_copy_app(d
, &st
, sizeof(st
));
592 static void choke_destroy(struct Qdisc
*sch
)
594 struct choke_sched_data
*q
= qdisc_priv(sch
);
596 tcf_destroy_chain(&q
->filter_list
);
600 static struct Qdisc
*choke_leaf(struct Qdisc
*sch
, unsigned long arg
)
605 static unsigned long choke_get(struct Qdisc
*sch
, u32 classid
)
610 static void choke_put(struct Qdisc
*q
, unsigned long cl
)
614 static unsigned long choke_bind(struct Qdisc
*sch
, unsigned long parent
,
620 static struct tcf_proto
**choke_find_tcf(struct Qdisc
*sch
, unsigned long cl
)
622 struct choke_sched_data
*q
= qdisc_priv(sch
);
626 return &q
->filter_list
;
629 static int choke_dump_class(struct Qdisc
*sch
, unsigned long cl
,
630 struct sk_buff
*skb
, struct tcmsg
*tcm
)
632 tcm
->tcm_handle
|= TC_H_MIN(cl
);
636 static void choke_walk(struct Qdisc
*sch
, struct qdisc_walker
*arg
)
639 if (arg
->fn(sch
, 1, arg
) < 0) {
647 static const struct Qdisc_class_ops choke_class_ops
= {
651 .tcf_chain
= choke_find_tcf
,
652 .bind_tcf
= choke_bind
,
653 .unbind_tcf
= choke_put
,
654 .dump
= choke_dump_class
,
658 static struct sk_buff
*choke_peek_head(struct Qdisc
*sch
)
660 struct choke_sched_data
*q
= qdisc_priv(sch
);
662 return (q
->head
!= q
->tail
) ? q
->tab
[q
->head
] : NULL
;
665 static struct Qdisc_ops choke_qdisc_ops __read_mostly
= {
667 .priv_size
= sizeof(struct choke_sched_data
),
669 .enqueue
= choke_enqueue
,
670 .dequeue
= choke_dequeue
,
671 .peek
= choke_peek_head
,
674 .destroy
= choke_destroy
,
675 .reset
= choke_reset
,
676 .change
= choke_change
,
678 .dump_stats
= choke_dump_stats
,
679 .owner
= THIS_MODULE
,
682 static int __init
choke_module_init(void)
684 return register_qdisc(&choke_qdisc_ops
);
687 static void __exit
choke_module_exit(void)
689 unregister_qdisc(&choke_qdisc_ops
);
692 module_init(choke_module_init
)
693 module_exit(choke_module_exit
)
695 MODULE_LICENSE("GPL");