2 * net/sched/gen_estimator.c Simple rate estimator.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
9 * Authors: Alexey Kuznetsov, <kuznet@ms2.inr.ac.ru>
12 * Jamal Hadi Salim - moved it to net/core and reshulfed
13 * names to make it usable in general net subsystem.
16 #include <asm/uaccess.h>
17 #include <asm/system.h>
18 #include <linux/bitops.h>
19 #include <linux/module.h>
20 #include <linux/types.h>
21 #include <linux/kernel.h>
22 #include <linux/jiffies.h>
23 #include <linux/string.h>
25 #include <linux/socket.h>
26 #include <linux/sockios.h>
28 #include <linux/errno.h>
29 #include <linux/interrupt.h>
30 #include <linux/netdevice.h>
31 #include <linux/skbuff.h>
32 #include <linux/rtnetlink.h>
33 #include <linux/init.h>
34 #include <linux/rbtree.h>
36 #include <net/gen_stats.h>
39 This code is NOT intended to be used for statistics collection,
40 its purpose is to provide a base for statistical multiplexing
41 for controlled load service.
42 If you need only statistics, run a user level daemon which
43 periodically reads byte counters.
45 Unfortunately, rate estimation is not a very easy task.
46 F.e. I did not find a simple way to estimate the current peak rate
47 and even failed to formulate the problem 8)8)
49 So I preferred not to built an estimator into the scheduler,
50 but run this task separately.
51 Ideally, it should be kernel thread(s), but for now it runs
52 from timers, which puts apparent top bounds on the number of rated
53 flows, has minimal overhead on small, but is enough
54 to handle controlled load service, sets of aggregates.
56 We measure rate over A=(1<<interval) seconds and evaluate EWMA:
58 avrate = avrate*(1-W) + rate*W
60 where W is chosen as negative power of 2: W = 2^(-ewma_log)
62 The resulting time constant is:
69 * The stored value for avbps is scaled by 2^5, so that maximal
70 rate is ~1Gbit, avpps is scaled by 2^10.
72 * Minimal interval is HZ/4=250msec (it is the greatest common divisor
73 for HZ=100 and HZ=1024 8)), maximal interval
74 is (HZ*2^EST_MAX_INTERVAL)/4 = 8sec. Shorter intervals
75 are too expensive, longer ones can be implemented
76 at user level painlessly.
79 #define EST_MAX_INTERVAL 5
83 struct list_head list
;
84 struct gnet_stats_basic
*bstats
;
85 struct gnet_stats_rate_est
*rate_est
;
86 spinlock_t
*stats_lock
;
92 struct rcu_head e_rcu
;
96 struct gen_estimator_head
98 struct timer_list timer
;
99 struct list_head list
;
102 static struct gen_estimator_head elist
[EST_MAX_INTERVAL
+1];
104 /* Protects against NULL dereference */
105 static DEFINE_RWLOCK(est_lock
);
107 /* Protects against soft lockup during large deletion */
108 static struct rb_root est_root
= RB_ROOT
;
110 static void est_timer(unsigned long arg
)
113 struct gen_estimator
*e
;
116 list_for_each_entry_rcu(e
, &elist
[idx
].list
, list
) {
121 spin_lock(e
->stats_lock
);
122 read_lock(&est_lock
);
123 if (e
->bstats
== NULL
)
126 nbytes
= e
->bstats
->bytes
;
127 npackets
= e
->bstats
->packets
;
128 rate
= (nbytes
- e
->last_bytes
)<<(7 - idx
);
129 e
->last_bytes
= nbytes
;
130 e
->avbps
+= ((long)rate
- (long)e
->avbps
) >> e
->ewma_log
;
131 e
->rate_est
->bps
= (e
->avbps
+0xF)>>5;
133 rate
= (npackets
- e
->last_packets
)<<(12 - idx
);
134 e
->last_packets
= npackets
;
135 e
->avpps
+= ((long)rate
- (long)e
->avpps
) >> e
->ewma_log
;
136 e
->rate_est
->pps
= (e
->avpps
+0x1FF)>>10;
138 read_unlock(&est_lock
);
139 spin_unlock(e
->stats_lock
);
142 if (!list_empty(&elist
[idx
].list
))
143 mod_timer(&elist
[idx
].timer
, jiffies
+ ((HZ
/4) << idx
));
147 static void gen_add_node(struct gen_estimator
*est
)
149 struct rb_node
**p
= &est_root
.rb_node
, *parent
= NULL
;
152 struct gen_estimator
*e
;
155 e
= rb_entry(parent
, struct gen_estimator
, node
);
157 if (est
->bstats
> e
->bstats
)
158 p
= &parent
->rb_right
;
160 p
= &parent
->rb_left
;
162 rb_link_node(&est
->node
, parent
, p
);
163 rb_insert_color(&est
->node
, &est_root
);
166 static struct gen_estimator
*gen_find_node(struct gnet_stats_basic
*bstats
,
167 struct gnet_stats_rate_est
*rate_est
)
169 struct rb_node
*p
= est_root
.rb_node
;
172 struct gen_estimator
*e
;
174 e
= rb_entry(p
, struct gen_estimator
, node
);
176 if (bstats
> e
->bstats
)
178 else if (bstats
< e
->bstats
|| rate_est
!= e
->rate_est
)
187 * gen_new_estimator - create a new rate estimator
188 * @bstats: basic statistics
189 * @rate_est: rate estimator statistics
190 * @stats_lock: statistics lock
191 * @opt: rate estimator configuration TLV
193 * Creates a new rate estimator with &bstats as source and &rate_est
194 * as destination. A new timer with the interval specified in the
195 * configuration TLV is created. Upon each interval, the latest statistics
196 * will be read from &bstats and the estimated rate will be stored in
197 * &rate_est with the statistics lock grabed during this period.
199 * Returns 0 on success or a negative error code.
201 * NOTE: Called under rtnl_mutex
203 int gen_new_estimator(struct gnet_stats_basic
*bstats
,
204 struct gnet_stats_rate_est
*rate_est
,
205 spinlock_t
*stats_lock
,
208 struct gen_estimator
*est
;
209 struct gnet_estimator
*parm
= nla_data(opt
);
212 if (nla_len(opt
) < sizeof(*parm
))
215 if (parm
->interval
< -2 || parm
->interval
> 3)
218 est
= kzalloc(sizeof(*est
), GFP_KERNEL
);
222 idx
= parm
->interval
+ 2;
223 est
->bstats
= bstats
;
224 est
->rate_est
= rate_est
;
225 est
->stats_lock
= stats_lock
;
226 est
->ewma_log
= parm
->ewma_log
;
227 est
->last_bytes
= bstats
->bytes
;
228 est
->avbps
= rate_est
->bps
<<5;
229 est
->last_packets
= bstats
->packets
;
230 est
->avpps
= rate_est
->pps
<<10;
232 if (!elist
[idx
].timer
.function
) {
233 INIT_LIST_HEAD(&elist
[idx
].list
);
234 setup_timer(&elist
[idx
].timer
, est_timer
, idx
);
237 if (list_empty(&elist
[idx
].list
))
238 mod_timer(&elist
[idx
].timer
, jiffies
+ ((HZ
/4) << idx
));
240 list_add_rcu(&est
->list
, &elist
[idx
].list
);
246 static void __gen_kill_estimator(struct rcu_head
*head
)
248 struct gen_estimator
*e
= container_of(head
,
249 struct gen_estimator
, e_rcu
);
254 * gen_kill_estimator - remove a rate estimator
255 * @bstats: basic statistics
256 * @rate_est: rate estimator statistics
258 * Removes the rate estimator specified by &bstats and &rate_est.
260 * NOTE: Called under rtnl_mutex
262 void gen_kill_estimator(struct gnet_stats_basic
*bstats
,
263 struct gnet_stats_rate_est
*rate_est
)
265 struct gen_estimator
*e
;
267 while ((e
= gen_find_node(bstats
, rate_est
))) {
268 rb_erase(&e
->node
, &est_root
);
270 write_lock_bh(&est_lock
);
272 write_unlock_bh(&est_lock
);
274 list_del_rcu(&e
->list
);
275 call_rcu(&e
->e_rcu
, __gen_kill_estimator
);
280 * gen_replace_estimator - replace rate estimator configuration
281 * @bstats: basic statistics
282 * @rate_est: rate estimator statistics
283 * @stats_lock: statistics lock
284 * @opt: rate estimator configuration TLV
286 * Replaces the configuration of a rate estimator by calling
287 * gen_kill_estimator() and gen_new_estimator().
289 * Returns 0 on success or a negative error code.
291 int gen_replace_estimator(struct gnet_stats_basic
*bstats
,
292 struct gnet_stats_rate_est
*rate_est
,
293 spinlock_t
*stats_lock
, struct nlattr
*opt
)
295 gen_kill_estimator(bstats
, rate_est
);
296 return gen_new_estimator(bstats
, rate_est
, stats_lock
, opt
);
300 EXPORT_SYMBOL(gen_kill_estimator
);
301 EXPORT_SYMBOL(gen_new_estimator
);
302 EXPORT_SYMBOL(gen_replace_estimator
);