Merge tag 'fuse-fixes-6.11-rc7' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-stable.git] / lib / flex_proportions.c
blob84ecccddc77182debc51ca7876866989a21b9fda
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Floating proportions with flexible aging period
5 * Copyright (C) 2011, SUSE, Jan Kara <jack@suse.cz>
7 * The goal of this code is: Given different types of event, measure proportion
8 * of each type of event over time. The proportions are measured with
9 * exponentially decaying history to give smooth transitions. A formula
10 * expressing proportion of event of type 'j' is:
12 * p_{j} = (\Sum_{i>=0} x_{i,j}/2^{i+1})/(\Sum_{i>=0} x_i/2^{i+1})
14 * Where x_{i,j} is j's number of events in i-th last time period and x_i is
15 * total number of events in i-th last time period.
17 * Note that p_{j}'s are normalised, i.e.
19 * \Sum_{j} p_{j} = 1,
21 * This formula can be straightforwardly computed by maintaining denominator
22 * (let's call it 'd') and for each event type its numerator (let's call it
23 * 'n_j'). When an event of type 'j' happens, we simply need to do:
24 * n_j++; d++;
26 * When a new period is declared, we could do:
27 * d /= 2
28 * for each j
29 * n_j /= 2
31 * To avoid iteration over all event types, we instead shift numerator of event
32 * j lazily when someone asks for a proportion of event j or when event j
33 * occurs. This can bit trivially implemented by remembering last period in
34 * which something happened with proportion of type j.
36 #include <linux/flex_proportions.h>
38 int fprop_global_init(struct fprop_global *p, gfp_t gfp)
40 int err;
42 p->period = 0;
43 /* Use 1 to avoid dealing with periods with 0 events... */
44 err = percpu_counter_init(&p->events, 1, gfp);
45 if (err)
46 return err;
47 seqcount_init(&p->sequence);
48 return 0;
51 void fprop_global_destroy(struct fprop_global *p)
53 percpu_counter_destroy(&p->events);
57 * Declare @periods new periods. It is upto the caller to make sure period
58 * transitions cannot happen in parallel.
60 * The function returns true if the proportions are still defined and false
61 * if aging zeroed out all events. This can be used to detect whether declaring
62 * further periods has any effect.
64 bool fprop_new_period(struct fprop_global *p, int periods)
66 s64 events = percpu_counter_sum(&p->events);
69 * Don't do anything if there are no events.
71 if (events <= 1)
72 return false;
73 preempt_disable_nested();
74 write_seqcount_begin(&p->sequence);
75 if (periods < 64)
76 events -= events >> periods;
77 /* Use addition to avoid losing events happening between sum and set */
78 percpu_counter_add(&p->events, -events);
79 p->period += periods;
80 write_seqcount_end(&p->sequence);
81 preempt_enable_nested();
83 return true;
87 * ---- PERCPU ----
89 #define PROP_BATCH (8*(1+ilog2(nr_cpu_ids)))
91 int fprop_local_init_percpu(struct fprop_local_percpu *pl, gfp_t gfp)
93 int err;
95 err = percpu_counter_init(&pl->events, 0, gfp);
96 if (err)
97 return err;
98 pl->period = 0;
99 raw_spin_lock_init(&pl->lock);
100 return 0;
103 void fprop_local_destroy_percpu(struct fprop_local_percpu *pl)
105 percpu_counter_destroy(&pl->events);
108 static void fprop_reflect_period_percpu(struct fprop_global *p,
109 struct fprop_local_percpu *pl)
111 unsigned int period = p->period;
112 unsigned long flags;
114 /* Fast path - period didn't change */
115 if (pl->period == period)
116 return;
117 raw_spin_lock_irqsave(&pl->lock, flags);
118 /* Someone updated pl->period while we were spinning? */
119 if (pl->period >= period) {
120 raw_spin_unlock_irqrestore(&pl->lock, flags);
121 return;
123 /* Aging zeroed our fraction? */
124 if (period - pl->period < BITS_PER_LONG) {
125 s64 val = percpu_counter_read(&pl->events);
127 if (val < (nr_cpu_ids * PROP_BATCH))
128 val = percpu_counter_sum(&pl->events);
130 percpu_counter_add_batch(&pl->events,
131 -val + (val >> (period-pl->period)), PROP_BATCH);
132 } else
133 percpu_counter_set(&pl->events, 0);
134 pl->period = period;
135 raw_spin_unlock_irqrestore(&pl->lock, flags);
138 /* Event of type pl happened */
139 void __fprop_add_percpu(struct fprop_global *p, struct fprop_local_percpu *pl,
140 long nr)
142 fprop_reflect_period_percpu(p, pl);
143 percpu_counter_add_batch(&pl->events, nr, PROP_BATCH);
144 percpu_counter_add(&p->events, nr);
147 void fprop_fraction_percpu(struct fprop_global *p,
148 struct fprop_local_percpu *pl,
149 unsigned long *numerator, unsigned long *denominator)
151 unsigned int seq;
152 s64 num, den;
154 do {
155 seq = read_seqcount_begin(&p->sequence);
156 fprop_reflect_period_percpu(p, pl);
157 num = percpu_counter_read_positive(&pl->events);
158 den = percpu_counter_read_positive(&p->events);
159 } while (read_seqcount_retry(&p->sequence, seq));
162 * Make fraction <= 1 and denominator > 0 even in presence of percpu
163 * counter errors
165 if (den <= num) {
166 if (num)
167 den = num;
168 else
169 den = 1;
171 *denominator = den;
172 *numerator = num;
176 * Like __fprop_add_percpu() except that event is counted only if the given
177 * type has fraction smaller than @max_frac/FPROP_FRAC_BASE
179 void __fprop_add_percpu_max(struct fprop_global *p,
180 struct fprop_local_percpu *pl, int max_frac, long nr)
182 if (unlikely(max_frac < FPROP_FRAC_BASE)) {
183 unsigned long numerator, denominator;
184 s64 tmp;
186 fprop_fraction_percpu(p, pl, &numerator, &denominator);
187 /* Adding 'nr' to fraction exceeds max_frac/FPROP_FRAC_BASE? */
188 tmp = (u64)denominator * max_frac -
189 ((u64)numerator << FPROP_FRAC_SHIFT);
190 if (tmp < 0) {
191 /* Maximum fraction already exceeded? */
192 return;
193 } else if (tmp < nr * (FPROP_FRAC_BASE - max_frac)) {
194 /* Add just enough for the fraction to saturate */
195 nr = div_u64(tmp + FPROP_FRAC_BASE - max_frac - 1,
196 FPROP_FRAC_BASE - max_frac);
200 __fprop_add_percpu(p, pl, nr);