bpf: split eBPF out of NET
[linux-2.6/btrfs-unstable.git] / include / linux / proportions.h
blob00e8e8fa73584beac318b7f1a689e1397107090f
1 /*
2 * FLoating proportions
4 * Copyright (C) 2007 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
6 * This file contains the public data structure and API definitions.
7 */
9 #ifndef _LINUX_PROPORTIONS_H
10 #define _LINUX_PROPORTIONS_H
12 #include <linux/percpu_counter.h>
13 #include <linux/spinlock.h>
14 #include <linux/mutex.h>
15 #include <linux/gfp.h>
17 struct prop_global {
19 * The period over which we differentiate
21 * period = 2^shift
23 int shift;
25 * The total event counter aka 'time'.
27 * Treated as an unsigned long; the lower 'shift - 1' bits are the
28 * counter bits, the remaining upper bits the period counter.
30 struct percpu_counter events;
34 * global proportion descriptor
36 * this is needed to consitently flip prop_global structures.
38 struct prop_descriptor {
39 int index;
40 struct prop_global pg[2];
41 struct mutex mutex; /* serialize the prop_global switch */
44 int prop_descriptor_init(struct prop_descriptor *pd, int shift, gfp_t gfp);
45 void prop_change_shift(struct prop_descriptor *pd, int new_shift);
48 * ----- PERCPU ------
51 struct prop_local_percpu {
53 * the local events counter
55 struct percpu_counter events;
58 * snapshot of the last seen global state
60 int shift;
61 unsigned long period;
62 raw_spinlock_t lock; /* protect the snapshot state */
65 int prop_local_init_percpu(struct prop_local_percpu *pl, gfp_t gfp);
66 void prop_local_destroy_percpu(struct prop_local_percpu *pl);
67 void __prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl);
68 void prop_fraction_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl,
69 long *numerator, long *denominator);
71 static inline
72 void prop_inc_percpu(struct prop_descriptor *pd, struct prop_local_percpu *pl)
74 unsigned long flags;
76 local_irq_save(flags);
77 __prop_inc_percpu(pd, pl);
78 local_irq_restore(flags);
82 * Limit the time part in order to ensure there are some bits left for the
83 * cycle counter and fraction multiply.
85 #if BITS_PER_LONG == 32
86 #define PROP_MAX_SHIFT (3*BITS_PER_LONG/4)
87 #else
88 #define PROP_MAX_SHIFT (BITS_PER_LONG/2)
89 #endif
91 #define PROP_FRAC_SHIFT (BITS_PER_LONG - PROP_MAX_SHIFT - 1)
92 #define PROP_FRAC_BASE (1UL << PROP_FRAC_SHIFT)
94 void __prop_inc_percpu_max(struct prop_descriptor *pd,
95 struct prop_local_percpu *pl, long frac);
99 * ----- SINGLE ------
102 struct prop_local_single {
104 * the local events counter
106 unsigned long events;
109 * snapshot of the last seen global state
110 * and a lock protecting this state
112 unsigned long period;
113 int shift;
114 raw_spinlock_t lock; /* protect the snapshot state */
117 #define INIT_PROP_LOCAL_SINGLE(name) \
118 { .lock = __RAW_SPIN_LOCK_UNLOCKED(name.lock), \
121 int prop_local_init_single(struct prop_local_single *pl);
122 void prop_local_destroy_single(struct prop_local_single *pl);
123 void __prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl);
124 void prop_fraction_single(struct prop_descriptor *pd, struct prop_local_single *pl,
125 long *numerator, long *denominator);
127 static inline
128 void prop_inc_single(struct prop_descriptor *pd, struct prop_local_single *pl)
130 unsigned long flags;
132 local_irq_save(flags);
133 __prop_inc_single(pd, pl);
134 local_irq_restore(flags);
137 #endif /* _LINUX_PROPORTIONS_H */