librpc: Shorten dcerpc_binding_handle_call a bit
[Samba/gebeck_regimport.git] / lib / ccan / tally / tally.c
blob774373ca0f88e0b4a48bed5029107cf28bded0d1
1 /* Licensed under LGPLv3+ - see LICENSE file for details */
2 #include <ccan/tally/tally.h>
3 #include <ccan/build_assert/build_assert.h>
4 #include <ccan/likely/likely.h>
5 #include <stdint.h>
6 #include <limits.h>
7 #include <string.h>
8 #include <stdio.h>
9 #include <assert.h>
10 #include <stdlib.h>
12 #define SIZET_BITS (sizeof(size_t)*CHAR_BIT)
14 /* We use power of 2 steps. I tried being tricky, but it got buggy. */
15 struct tally {
16 ssize_t min, max;
17 size_t total[2];
18 /* This allows limited frequency analysis. */
19 unsigned buckets, step_bits;
20 size_t counts[1 /* Actually: [buckets] */ ];
23 struct tally *tally_new(unsigned buckets)
25 struct tally *tally;
27 /* There is always 1 bucket. */
28 if (buckets == 0) {
29 buckets = 1;
32 /* Overly cautious check for overflow. */
33 if (sizeof(*tally) * buckets / sizeof(*tally) != buckets) {
34 return NULL;
37 tally = (struct tally *)malloc(
38 sizeof(*tally) + sizeof(tally->counts[0])*(buckets-1));
39 if (tally == NULL) {
40 return NULL;
43 tally->max = ((size_t)1 << (SIZET_BITS - 1));
44 tally->min = ~tally->max;
45 tally->total[0] = tally->total[1] = 0;
46 tally->buckets = buckets;
47 tally->step_bits = 0;
48 memset(tally->counts, 0, sizeof(tally->counts[0])*buckets);
49 return tally;
52 static unsigned bucket_of(ssize_t min, unsigned step_bits, ssize_t val)
54 /* Don't over-shift. */
55 if (step_bits == SIZET_BITS) {
56 return 0;
58 assert(step_bits < SIZET_BITS);
59 return (size_t)(val - min) >> step_bits;
62 /* Return the min value in bucket b. */
63 static ssize_t bucket_min(ssize_t min, unsigned step_bits, unsigned b)
65 /* Don't over-shift. */
66 if (step_bits == SIZET_BITS) {
67 return min;
69 assert(step_bits < SIZET_BITS);
70 return min + ((ssize_t)b << step_bits);
73 /* Does shifting by this many bits truncate the number? */
74 static bool shift_overflows(size_t num, unsigned bits)
76 if (bits == 0) {
77 return false;
80 return ((num << bits) >> 1) != (num << (bits - 1));
83 /* When min or max change, we may need to shuffle the frequency counts. */
84 static void renormalize(struct tally *tally,
85 ssize_t new_min, ssize_t new_max)
87 size_t range, spill;
88 unsigned int i, old_min;
90 /* Uninitialized? Don't do anything... */
91 if (tally->max < tally->min) {
92 goto update;
95 /* If we don't have sufficient range, increase step bits until
96 * buckets cover entire range of ssize_t anyway. */
97 range = (new_max - new_min) + 1;
98 while (!shift_overflows(tally->buckets, tally->step_bits)
99 && range > ((size_t)tally->buckets << tally->step_bits)) {
100 /* Collapse down. */
101 for (i = 1; i < tally->buckets; i++) {
102 tally->counts[i/2] += tally->counts[i];
103 tally->counts[i] = 0;
105 tally->step_bits++;
108 /* Now if minimum has dropped, move buckets up. */
109 old_min = bucket_of(new_min, tally->step_bits, tally->min);
110 memmove(tally->counts + old_min,
111 tally->counts,
112 sizeof(tally->counts[0]) * (tally->buckets - old_min));
113 memset(tally->counts, 0, sizeof(tally->counts[0]) * old_min);
115 /* If we moved boundaries, adjust buckets to that ratio. */
116 spill = (tally->min - new_min) % (1 << tally->step_bits);
117 for (i = 0; i < tally->buckets-1; i++) {
118 size_t adjust = (tally->counts[i] >> tally->step_bits) * spill;
119 tally->counts[i] -= adjust;
120 tally->counts[i+1] += adjust;
123 update:
124 tally->min = new_min;
125 tally->max = new_max;
128 void tally_add(struct tally *tally, ssize_t val)
130 ssize_t new_min = tally->min, new_max = tally->max;
131 bool need_renormalize = false;
133 if (val < tally->min) {
134 new_min = val;
135 need_renormalize = true;
137 if (val > tally->max) {
138 new_max = val;
139 need_renormalize = true;
141 if (need_renormalize) {
142 renormalize(tally, new_min, new_max);
145 /* 128-bit arithmetic! If we didn't want exact mean, we could just
146 * pull it out of counts. */
147 if (val > 0 && tally->total[0] + val < tally->total[0]) {
148 tally->total[1]++;
149 } else if (val < 0 && tally->total[0] + val > tally->total[0]) {
150 tally->total[1]--;
152 tally->total[0] += val;
153 tally->counts[bucket_of(tally->min, tally->step_bits, val)]++;
156 size_t tally_num(const struct tally *tally)
158 size_t i, num = 0;
159 for (i = 0; i < tally->buckets; i++) {
160 num += tally->counts[i];
162 return num;
165 ssize_t tally_min(const struct tally *tally)
167 return tally->min;
170 ssize_t tally_max(const struct tally *tally)
172 return tally->max;
175 /* FIXME: Own ccan module please! */
176 static unsigned fls64(uint64_t val)
178 #if HAVE_BUILTIN_CLZL
179 if (val <= ULONG_MAX) {
180 /* This is significantly faster! */
181 return val ? sizeof(long) * CHAR_BIT - __builtin_clzl(val) : 0;
182 } else {
183 #endif
184 uint64_t r = 64;
186 if (!val) {
187 return 0;
189 if (!(val & 0xffffffff00000000ull)) {
190 val <<= 32;
191 r -= 32;
193 if (!(val & 0xffff000000000000ull)) {
194 val <<= 16;
195 r -= 16;
197 if (!(val & 0xff00000000000000ull)) {
198 val <<= 8;
199 r -= 8;
201 if (!(val & 0xf000000000000000ull)) {
202 val <<= 4;
203 r -= 4;
205 if (!(val & 0xc000000000000000ull)) {
206 val <<= 2;
207 r -= 2;
209 if (!(val & 0x8000000000000000ull)) {
210 val <<= 1;
211 r -= 1;
213 return r;
214 #if HAVE_BUILTIN_CLZL
216 #endif
219 /* This is stolen straight from Hacker's Delight. */
220 static uint64_t divlu64(uint64_t u1, uint64_t u0, uint64_t v)
222 const uint64_t b = 4294967296ULL; /* Number base (32 bits). */
223 uint32_t un[4], /* Dividend and divisor */
224 vn[2]; /* normalized and broken */
225 /* up into halfwords. */
226 uint32_t q[2]; /* Quotient as halfwords. */
227 uint64_t un1, un0, /* Dividend and divisor */
228 vn0; /* as fullwords. */
229 uint64_t qhat; /* Estimated quotient digit. */
230 uint64_t rhat; /* A remainder. */
231 uint64_t p; /* Product of two digits. */
232 int64_t s, i, j, t, k;
234 if (u1 >= v) { /* If overflow, return the largest */
235 return (uint64_t)-1; /* possible quotient. */
238 s = 64 - fls64(v); /* 0 <= s <= 63. */
239 vn0 = v << s; /* Normalize divisor. */
240 vn[1] = vn0 >> 32; /* Break divisor up into */
241 vn[0] = vn0 & 0xFFFFFFFF; /* two 32-bit halves. */
243 // Shift dividend left.
244 un1 = ((u1 << s) | (u0 >> (64 - s))) & (-s >> 63);
245 un0 = u0 << s;
246 un[3] = un1 >> 32; /* Break dividend up into */
247 un[2] = un1; /* four 32-bit halfwords */
248 un[1] = un0 >> 32; /* Note: storing into */
249 un[0] = un0; /* halfwords truncates. */
251 for (j = 1; j >= 0; j--) {
252 /* Compute estimate qhat of q[j]. */
253 qhat = (un[j+2]*b + un[j+1])/vn[1];
254 rhat = (un[j+2]*b + un[j+1]) - qhat*vn[1];
255 again:
256 if (qhat >= b || qhat*vn[0] > b*rhat + un[j]) {
257 qhat = qhat - 1;
258 rhat = rhat + vn[1];
259 if (rhat < b) {
260 goto again;
264 /* Multiply and subtract. */
265 k = 0;
266 for (i = 0; i < 2; i++) {
267 p = qhat*vn[i];
268 t = un[i+j] - k - (p & 0xFFFFFFFF);
269 un[i+j] = t;
270 k = (p >> 32) - (t >> 32);
272 t = un[j+2] - k;
273 un[j+2] = t;
275 q[j] = qhat; /* Store quotient digit. */
276 if (t < 0) { /* If we subtracted too */
277 q[j] = q[j] - 1; /* much, add back. */
278 k = 0;
279 for (i = 0; i < 2; i++) {
280 t = un[i+j] + vn[i] + k;
281 un[i+j] = t;
282 k = t >> 32;
284 un[j+2] = un[j+2] + k;
286 } /* End j. */
288 return q[1]*b + q[0];
291 static int64_t divls64(int64_t u1, uint64_t u0, int64_t v)
293 int64_t q, uneg, vneg, diff, borrow;
295 uneg = u1 >> 63; /* -1 if u < 0. */
296 if (uneg) { /* Compute the absolute */
297 u0 = -u0; /* value of the dividend u. */
298 borrow = (u0 != 0);
299 u1 = -u1 - borrow;
302 vneg = v >> 63; /* -1 if v < 0. */
303 v = (v ^ vneg) - vneg; /* Absolute value of v. */
305 if ((uint64_t)u1 >= (uint64_t)v) {
306 goto overflow;
309 q = divlu64(u1, u0, v);
311 diff = uneg ^ vneg; /* Negate q if signs of */
312 q = (q ^ diff) - diff; /* u and v differed. */
314 if ((diff ^ q) < 0 && q != 0) { /* If overflow, return the
315 largest */
316 overflow: /* possible neg. quotient. */
317 q = 0x8000000000000000ULL;
319 return q;
322 ssize_t tally_mean(const struct tally *tally)
324 size_t count = tally_num(tally);
325 if (!count) {
326 return 0;
329 if (sizeof(tally->total[0]) == sizeof(uint32_t)) {
330 /* Use standard 64-bit arithmetic. */
331 int64_t total = tally->total[0]
332 | (((uint64_t)tally->total[1]) << 32);
333 return total / count;
335 return divls64(tally->total[1], tally->total[0], count);
338 ssize_t tally_total(const struct tally *tally, ssize_t *overflow)
340 if (overflow) {
341 *overflow = tally->total[1];
342 return tally->total[0];
345 /* If result is negative, make sure we can represent it. */
346 if (tally->total[1] & ((size_t)1 << (SIZET_BITS-1))) {
347 /* Must have only underflowed once, and must be able to
348 * represent result at ssize_t. */
349 if ((~tally->total[1])+1 != 0
350 || (ssize_t)tally->total[0] >= 0) {
351 /* Underflow, return minimum. */
352 return (ssize_t)((size_t)1 << (SIZET_BITS - 1));
354 } else {
355 /* Result is positive, must not have overflowed, and must be
356 * able to represent as ssize_t. */
357 if (tally->total[1] || (ssize_t)tally->total[0] < 0) {
358 /* Overflow. Return maximum. */
359 return (ssize_t)~((size_t)1 << (SIZET_BITS - 1));
362 return tally->total[0];
365 static ssize_t bucket_range(const struct tally *tally, unsigned b, size_t *err)
367 ssize_t min, max;
369 min = bucket_min(tally->min, tally->step_bits, b);
370 if (b == tally->buckets - 1) {
371 max = tally->max;
372 } else {
373 max = bucket_min(tally->min, tally->step_bits, b+1) - 1;
376 /* FIXME: Think harder about cumulative error; is this enough?. */
377 *err = (max - min + 1) / 2;
378 /* Avoid overflow. */
379 return min + (max - min) / 2;
382 ssize_t tally_approx_median(const struct tally *tally, size_t *err)
384 size_t count = tally_num(tally), total = 0;
385 unsigned int i;
387 for (i = 0; i < tally->buckets; i++) {
388 total += tally->counts[i];
389 if (total * 2 >= count) {
390 break;
393 return bucket_range(tally, i, err);
396 ssize_t tally_approx_mode(const struct tally *tally, size_t *err)
398 unsigned int i, min_best = 0, max_best = 0;
400 for (i = 0; i < tally->buckets; i++) {
401 if (tally->counts[i] > tally->counts[min_best]) {
402 min_best = max_best = i;
403 } else if (tally->counts[i] == tally->counts[min_best]) {
404 max_best = i;
408 /* We can have more than one best, making our error huge. */
409 if (min_best != max_best) {
410 ssize_t min, max;
411 min = bucket_range(tally, min_best, err);
412 max = bucket_range(tally, max_best, err);
413 max += *err;
414 *err += (size_t)(max - min);
415 return min + (max - min) / 2;
418 return bucket_range(tally, min_best, err);
421 static unsigned get_max_bucket(const struct tally *tally)
423 unsigned int i;
425 for (i = tally->buckets; i > 0; i--) {
426 if (tally->counts[i-1]) {
427 break;
430 return i;
433 char *tally_histogram(const struct tally *tally,
434 unsigned width, unsigned height)
436 unsigned int i, count, max_bucket, largest_bucket;
437 struct tally *tmp;
438 char *graph, *p;
440 assert(width >= TALLY_MIN_HISTO_WIDTH);
441 assert(height >= TALLY_MIN_HISTO_HEIGHT);
443 /* Ignore unused buckets. */
444 max_bucket = get_max_bucket(tally);
446 /* FIXME: It'd be nice to smooth here... */
447 if (height >= max_bucket) {
448 height = max_bucket;
449 tmp = NULL;
450 } else {
451 /* We create a temporary then renormalize so < height. */
452 /* FIXME: Antialias properly! */
453 tmp = tally_new(tally->buckets);
454 if (!tmp) {
455 return NULL;
457 tmp->min = tally->min;
458 tmp->max = tally->max;
459 tmp->step_bits = tally->step_bits;
460 memcpy(tmp->counts, tally->counts,
461 sizeof(tally->counts[0]) * tmp->buckets);
462 while ((max_bucket = get_max_bucket(tmp)) >= height) {
463 renormalize(tmp, tmp->min, tmp->max * 2);
465 /* Restore max */
466 tmp->max = tally->max;
467 tally = tmp;
468 height = max_bucket;
471 /* Figure out longest line, for scale. */
472 largest_bucket = 0;
473 for (i = 0; i < tally->buckets; i++) {
474 if (tally->counts[i] > largest_bucket) {
475 largest_bucket = tally->counts[i];
479 p = graph = (char *)malloc(height * (width + 1) + 1);
480 if (!graph) {
481 free(tmp);
482 return NULL;
485 for (i = 0; i < height; i++) {
486 unsigned covered = 1, row;
488 /* People expect minimum at the bottom. */
489 row = height - i - 1;
490 count = (double)tally->counts[row] / largest_bucket * (width-1)+1;
492 if (row == 0) {
493 covered = snprintf(p, width, "%zi", tally->min);
494 } else if (row == height - 1) {
495 covered = snprintf(p, width, "%zi", tally->max);
496 } else if (row == bucket_of(tally->min, tally->step_bits, 0)) {
497 *p = '+';
498 } else {
499 *p = '|';
502 if (covered > width) {
503 covered = width;
505 p += covered;
507 if (count > covered) {
508 count -= covered;
509 } else {
510 count = 0;
513 memset(p, '*', count);
514 p += count;
515 *p = '\n';
516 p++;
518 *p = '\0';
519 free(tmp);
520 return graph;