memcg: fix css reference leak and endless loop in mem_cgroup_iter
[linux-2.6/btrfs-unstable.git] / include / net / codel.h
blob3b04ff5f6f8d60489a13171d3071ffa597c2c140
1 #ifndef __NET_SCHED_CODEL_H
2 #define __NET_SCHED_CODEL_H
4 /*
5 * Codel - The Controlled-Delay Active Queue Management algorithm
7 * Copyright (C) 2011-2012 Kathleen Nichols <nichols@pollere.com>
8 * Copyright (C) 2011-2012 Van Jacobson <van@pollere.net>
9 * Copyright (C) 2012 Michael D. Taht <dave.taht@bufferbloat.net>
10 * Copyright (C) 2012 Eric Dumazet <edumazet@google.com>
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice, this list of conditions, and the following disclaimer,
17 * without modification.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 * 3. The names of the authors may not be used to endorse or promote products
22 * derived from this software without specific prior written permission.
24 * Alternatively, provided that this notice is retained in full, this
25 * software may be distributed under the terms of the GNU General
26 * Public License ("GPL") version 2, in which case the provisions of the
27 * GPL apply INSTEAD OF those given above.
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
30 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
31 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
32 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
33 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
34 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
35 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
36 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
37 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
38 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
39 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
40 * DAMAGE.
44 #include <linux/types.h>
45 #include <linux/ktime.h>
46 #include <linux/skbuff.h>
47 #include <net/pkt_sched.h>
48 #include <net/inet_ecn.h>
49 #include <linux/reciprocal_div.h>
51 /* Controlling Queue Delay (CoDel) algorithm
52 * =========================================
53 * Source : Kathleen Nichols and Van Jacobson
54 * http://queue.acm.org/detail.cfm?id=2209336
56 * Implemented on linux by Dave Taht and Eric Dumazet
60 /* CoDel uses a 1024 nsec clock, encoded in u32
61 * This gives a range of 2199 seconds, because of signed compares
63 typedef u32 codel_time_t;
64 typedef s32 codel_tdiff_t;
65 #define CODEL_SHIFT 10
66 #define MS2TIME(a) ((a * NSEC_PER_MSEC) >> CODEL_SHIFT)
68 static inline codel_time_t codel_get_time(void)
70 u64 ns = ktime_to_ns(ktime_get());
72 return ns >> CODEL_SHIFT;
75 /* Dealing with timer wrapping, according to RFC 1982, as desc in wikipedia:
76 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
77 * codel_time_after(a,b) returns true if the time a is after time b.
79 #define codel_time_after(a, b) \
80 (typecheck(codel_time_t, a) && \
81 typecheck(codel_time_t, b) && \
82 ((s32)((a) - (b)) > 0))
83 #define codel_time_before(a, b) codel_time_after(b, a)
85 #define codel_time_after_eq(a, b) \
86 (typecheck(codel_time_t, a) && \
87 typecheck(codel_time_t, b) && \
88 ((s32)((a) - (b)) >= 0))
89 #define codel_time_before_eq(a, b) codel_time_after_eq(b, a)
91 /* Qdiscs using codel plugin must use codel_skb_cb in their own cb[] */
92 struct codel_skb_cb {
93 codel_time_t enqueue_time;
96 static struct codel_skb_cb *get_codel_cb(const struct sk_buff *skb)
98 qdisc_cb_private_validate(skb, sizeof(struct codel_skb_cb));
99 return (struct codel_skb_cb *)qdisc_skb_cb(skb)->data;
102 static codel_time_t codel_get_enqueue_time(const struct sk_buff *skb)
104 return get_codel_cb(skb)->enqueue_time;
107 static void codel_set_enqueue_time(struct sk_buff *skb)
109 get_codel_cb(skb)->enqueue_time = codel_get_time();
112 static inline u32 codel_time_to_us(codel_time_t val)
114 u64 valns = ((u64)val << CODEL_SHIFT);
116 do_div(valns, NSEC_PER_USEC);
117 return (u32)valns;
121 * struct codel_params - contains codel parameters
122 * @target: target queue size (in time units)
123 * @interval: width of moving time window
124 * @ecn: is Explicit Congestion Notification enabled
126 struct codel_params {
127 codel_time_t target;
128 codel_time_t interval;
129 bool ecn;
133 * struct codel_vars - contains codel variables
134 * @count: how many drops we've done since the last time we
135 * entered dropping state
136 * @lastcount: count at entry to dropping state
137 * @dropping: set to true if in dropping state
138 * @rec_inv_sqrt: reciprocal value of sqrt(count) >> 1
139 * @first_above_time: when we went (or will go) continuously above target
140 * for interval
141 * @drop_next: time to drop next packet, or when we dropped last
142 * @ldelay: sojourn time of last dequeued packet
144 struct codel_vars {
145 u32 count;
146 u32 lastcount;
147 bool dropping;
148 u16 rec_inv_sqrt;
149 codel_time_t first_above_time;
150 codel_time_t drop_next;
151 codel_time_t ldelay;
154 #define REC_INV_SQRT_BITS (8 * sizeof(u16)) /* or sizeof_in_bits(rec_inv_sqrt) */
155 /* needed shift to get a Q0.32 number from rec_inv_sqrt */
156 #define REC_INV_SQRT_SHIFT (32 - REC_INV_SQRT_BITS)
159 * struct codel_stats - contains codel shared variables and stats
160 * @maxpacket: largest packet we've seen so far
161 * @drop_count: temp count of dropped packets in dequeue()
162 * ecn_mark: number of packets we ECN marked instead of dropping
164 struct codel_stats {
165 u32 maxpacket;
166 u32 drop_count;
167 u32 ecn_mark;
170 static void codel_params_init(struct codel_params *params)
172 params->interval = MS2TIME(100);
173 params->target = MS2TIME(5);
174 params->ecn = false;
177 static void codel_vars_init(struct codel_vars *vars)
179 memset(vars, 0, sizeof(*vars));
182 static void codel_stats_init(struct codel_stats *stats)
184 stats->maxpacket = 256;
188 * http://en.wikipedia.org/wiki/Methods_of_computing_square_roots#Iterative_methods_for_reciprocal_square_roots
189 * new_invsqrt = (invsqrt / 2) * (3 - count * invsqrt^2)
191 * Here, invsqrt is a fixed point number (< 1.0), 32bit mantissa, aka Q0.32
193 static void codel_Newton_step(struct codel_vars *vars)
195 u32 invsqrt = ((u32)vars->rec_inv_sqrt) << REC_INV_SQRT_SHIFT;
196 u32 invsqrt2 = ((u64)invsqrt * invsqrt) >> 32;
197 u64 val = (3LL << 32) - ((u64)vars->count * invsqrt2);
199 val >>= 2; /* avoid overflow in following multiply */
200 val = (val * invsqrt) >> (32 - 2 + 1);
202 vars->rec_inv_sqrt = val >> REC_INV_SQRT_SHIFT;
206 * CoDel control_law is t + interval/sqrt(count)
207 * We maintain in rec_inv_sqrt the reciprocal value of sqrt(count) to avoid
208 * both sqrt() and divide operation.
210 static codel_time_t codel_control_law(codel_time_t t,
211 codel_time_t interval,
212 u32 rec_inv_sqrt)
214 return t + reciprocal_divide(interval, rec_inv_sqrt << REC_INV_SQRT_SHIFT);
218 static bool codel_should_drop(const struct sk_buff *skb,
219 struct Qdisc *sch,
220 struct codel_vars *vars,
221 struct codel_params *params,
222 struct codel_stats *stats,
223 codel_time_t now)
225 bool ok_to_drop;
227 if (!skb) {
228 vars->first_above_time = 0;
229 return false;
232 vars->ldelay = now - codel_get_enqueue_time(skb);
233 sch->qstats.backlog -= qdisc_pkt_len(skb);
235 if (unlikely(qdisc_pkt_len(skb) > stats->maxpacket))
236 stats->maxpacket = qdisc_pkt_len(skb);
238 if (codel_time_before(vars->ldelay, params->target) ||
239 sch->qstats.backlog <= stats->maxpacket) {
240 /* went below - stay below for at least interval */
241 vars->first_above_time = 0;
242 return false;
244 ok_to_drop = false;
245 if (vars->first_above_time == 0) {
246 /* just went above from below. If we stay above
247 * for at least interval we'll say it's ok to drop
249 vars->first_above_time = now + params->interval;
250 } else if (codel_time_after(now, vars->first_above_time)) {
251 ok_to_drop = true;
253 return ok_to_drop;
256 typedef struct sk_buff * (*codel_skb_dequeue_t)(struct codel_vars *vars,
257 struct Qdisc *sch);
259 static struct sk_buff *codel_dequeue(struct Qdisc *sch,
260 struct codel_params *params,
261 struct codel_vars *vars,
262 struct codel_stats *stats,
263 codel_skb_dequeue_t dequeue_func)
265 struct sk_buff *skb = dequeue_func(vars, sch);
266 codel_time_t now;
267 bool drop;
269 if (!skb) {
270 vars->dropping = false;
271 return skb;
273 now = codel_get_time();
274 drop = codel_should_drop(skb, sch, vars, params, stats, now);
275 if (vars->dropping) {
276 if (!drop) {
277 /* sojourn time below target - leave dropping state */
278 vars->dropping = false;
279 } else if (codel_time_after_eq(now, vars->drop_next)) {
280 /* It's time for the next drop. Drop the current
281 * packet and dequeue the next. The dequeue might
282 * take us out of dropping state.
283 * If not, schedule the next drop.
284 * A large backlog might result in drop rates so high
285 * that the next drop should happen now,
286 * hence the while loop.
288 while (vars->dropping &&
289 codel_time_after_eq(now, vars->drop_next)) {
290 vars->count++; /* dont care of possible wrap
291 * since there is no more divide
293 codel_Newton_step(vars);
294 if (params->ecn && INET_ECN_set_ce(skb)) {
295 stats->ecn_mark++;
296 vars->drop_next =
297 codel_control_law(vars->drop_next,
298 params->interval,
299 vars->rec_inv_sqrt);
300 goto end;
302 qdisc_drop(skb, sch);
303 stats->drop_count++;
304 skb = dequeue_func(vars, sch);
305 if (!codel_should_drop(skb, sch,
306 vars, params, stats, now)) {
307 /* leave dropping state */
308 vars->dropping = false;
309 } else {
310 /* and schedule the next drop */
311 vars->drop_next =
312 codel_control_law(vars->drop_next,
313 params->interval,
314 vars->rec_inv_sqrt);
318 } else if (drop) {
319 u32 delta;
321 if (params->ecn && INET_ECN_set_ce(skb)) {
322 stats->ecn_mark++;
323 } else {
324 qdisc_drop(skb, sch);
325 stats->drop_count++;
327 skb = dequeue_func(vars, sch);
328 drop = codel_should_drop(skb, sch, vars, params,
329 stats, now);
331 vars->dropping = true;
332 /* if min went above target close to when we last went below it
333 * assume that the drop rate that controlled the queue on the
334 * last cycle is a good starting point to control it now.
336 delta = vars->count - vars->lastcount;
337 if (delta > 1 &&
338 codel_time_before(now - vars->drop_next,
339 16 * params->interval)) {
340 vars->count = delta;
341 /* we dont care if rec_inv_sqrt approximation
342 * is not very precise :
343 * Next Newton steps will correct it quadratically.
345 codel_Newton_step(vars);
346 } else {
347 vars->count = 1;
348 vars->rec_inv_sqrt = ~0U >> REC_INV_SQRT_SHIFT;
350 vars->lastcount = vars->count;
351 vars->drop_next = codel_control_law(now, params->interval,
352 vars->rec_inv_sqrt);
354 end:
355 return skb;
357 #endif