exec: Introduce ram_block_discard_(disable|require)()
[qemu/kevin.git] / tests / qht-bench.c
blobeb88a90137611690e89b07501d3784dfe97d01a7
1 /*
2 * Copyright (C) 2016, Emilio G. Cota <cota@braap.org>
4 * License: GNU GPL, version 2 or later.
5 * See the COPYING file in the top-level directory.
6 */
7 #include "qemu/osdep.h"
8 #include "qemu/processor.h"
9 #include "qemu/atomic.h"
10 #include "qemu/qht.h"
11 #include "qemu/rcu.h"
12 #include "qemu/xxhash.h"
14 struct thread_stats {
15 size_t rd;
16 size_t not_rd;
17 size_t in;
18 size_t not_in;
19 size_t rm;
20 size_t not_rm;
21 size_t rz;
22 size_t not_rz;
25 struct thread_info {
26 void (*func)(struct thread_info *);
27 struct thread_stats stats;
28 uint64_t r;
29 bool write_op; /* writes alternate between insertions and removals */
30 bool resize_down;
31 } QEMU_ALIGNED(64); /* avoid false sharing among threads */
33 static struct qht ht;
34 static QemuThread *rw_threads;
36 #define DEFAULT_RANGE (4096)
37 #define DEFAULT_QHT_N_ELEMS DEFAULT_RANGE
39 static unsigned int duration = 1;
40 static unsigned int n_rw_threads = 1;
41 static unsigned long lookup_range = DEFAULT_RANGE;
42 static unsigned long update_range = DEFAULT_RANGE;
43 static size_t init_range = DEFAULT_RANGE;
44 static size_t init_size = DEFAULT_RANGE;
45 static size_t n_ready_threads;
46 static long populate_offset;
47 static long *keys;
49 static size_t resize_min;
50 static size_t resize_max;
51 static struct thread_info *rz_info;
52 static unsigned long resize_delay = 1000;
53 static double resize_rate; /* 0.0 to 1.0 */
54 static unsigned int n_rz_threads = 1;
55 static QemuThread *rz_threads;
56 static bool precompute_hash;
58 static double update_rate; /* 0.0 to 1.0 */
59 static uint64_t update_threshold;
60 static uint64_t resize_threshold;
62 static size_t qht_n_elems = DEFAULT_QHT_N_ELEMS;
63 static int qht_mode;
65 static bool test_start;
66 static bool test_stop;
68 static struct thread_info *rw_info;
70 static const char commands_string[] =
71 " -d = duration, in seconds\n"
72 " -n = number of threads\n"
73 "\n"
74 " -o = offset at which keys start\n"
75 " -p = precompute hashes\n"
76 "\n"
77 " -g = set -s,-k,-K,-l,-r to the same value\n"
78 " -s = initial size hint\n"
79 " -k = initial number of keys\n"
80 " -K = initial range of keys (will be rounded up to pow2)\n"
81 " -l = lookup range of keys (will be rounded up to pow2)\n"
82 " -r = update range of keys (will be rounded up to pow2)\n"
83 "\n"
84 " -u = update rate (0.0 to 100.0), 50/50 split of insertions/removals\n"
85 "\n"
86 " -R = enable auto-resize\n"
87 " -S = resize rate (0.0 to 100.0)\n"
88 " -D = delay (in us) between potential resizes\n"
89 " -N = number of resize threads";
91 static void usage_complete(int argc, char *argv[])
93 fprintf(stderr, "Usage: %s [options]\n", argv[0]);
94 fprintf(stderr, "options:\n%s\n", commands_string);
95 exit(-1);
98 static bool is_equal(const void *ap, const void *bp)
100 const long *a = ap;
101 const long *b = bp;
103 return *a == *b;
106 static uint32_t h(unsigned long v)
108 return qemu_xxhash2(v);
111 static uint32_t hval(unsigned long v)
113 return v;
116 static uint32_t (*hfunc)(unsigned long v) = h;
119 * From: https://en.wikipedia.org/wiki/Xorshift
120 * This is faster than rand_r(), and gives us a wider range (RAND_MAX is only
121 * guaranteed to be >= INT_MAX).
123 static uint64_t xorshift64star(uint64_t x)
125 x ^= x >> 12; /* a */
126 x ^= x << 25; /* b */
127 x ^= x >> 27; /* c */
128 return x * UINT64_C(2685821657736338717);
131 static void do_rz(struct thread_info *info)
133 struct thread_stats *stats = &info->stats;
135 if (info->r < resize_threshold) {
136 size_t size = info->resize_down ? resize_min : resize_max;
137 bool resized;
139 resized = qht_resize(&ht, size);
140 info->resize_down = !info->resize_down;
142 if (resized) {
143 stats->rz++;
144 } else {
145 stats->not_rz++;
148 g_usleep(resize_delay);
151 static void do_rw(struct thread_info *info)
153 struct thread_stats *stats = &info->stats;
154 uint32_t hash;
155 long *p;
157 if (info->r >= update_threshold) {
158 bool read;
160 p = &keys[info->r & (lookup_range - 1)];
161 hash = hfunc(*p);
162 read = qht_lookup(&ht, p, hash);
163 if (read) {
164 stats->rd++;
165 } else {
166 stats->not_rd++;
168 } else {
169 p = &keys[info->r & (update_range - 1)];
170 hash = hfunc(*p);
171 if (info->write_op) {
172 bool written = false;
174 if (qht_lookup(&ht, p, hash) == NULL) {
175 written = qht_insert(&ht, p, hash, NULL);
177 if (written) {
178 stats->in++;
179 } else {
180 stats->not_in++;
182 } else {
183 bool removed = false;
185 if (qht_lookup(&ht, p, hash)) {
186 removed = qht_remove(&ht, p, hash);
188 if (removed) {
189 stats->rm++;
190 } else {
191 stats->not_rm++;
194 info->write_op = !info->write_op;
198 static void *thread_func(void *p)
200 struct thread_info *info = p;
202 rcu_register_thread();
204 atomic_inc(&n_ready_threads);
205 while (!atomic_read(&test_start)) {
206 cpu_relax();
209 rcu_read_lock();
210 while (!atomic_read(&test_stop)) {
211 info->r = xorshift64star(info->r);
212 info->func(info);
214 rcu_read_unlock();
216 rcu_unregister_thread();
217 return NULL;
220 /* sets everything except info->func */
221 static void prepare_thread_info(struct thread_info *info, int i)
223 /* seed for the RNG; each thread should have a different one */
224 info->r = (i + 1) ^ time(NULL);
225 /* the first update will be a write */
226 info->write_op = true;
227 /* the first resize will be down */
228 info->resize_down = true;
230 memset(&info->stats, 0, sizeof(info->stats));
233 static void
234 th_create_n(QemuThread **threads, struct thread_info **infos, const char *name,
235 void (*func)(struct thread_info *), int offset, int n)
237 struct thread_info *info;
238 QemuThread *th;
239 int i;
241 th = g_malloc(sizeof(*th) * n);
242 *threads = th;
244 info = qemu_memalign(64, sizeof(*info) * n);
245 *infos = info;
247 for (i = 0; i < n; i++) {
248 prepare_thread_info(&info[i], offset + i);
249 info[i].func = func;
250 qemu_thread_create(&th[i], name, thread_func, &info[i],
251 QEMU_THREAD_JOINABLE);
255 static void create_threads(void)
257 th_create_n(&rw_threads, &rw_info, "rw", do_rw, 0, n_rw_threads);
258 th_create_n(&rz_threads, &rz_info, "rz", do_rz, n_rw_threads, n_rz_threads);
261 static void pr_params(void)
263 printf("Parameters:\n");
264 printf(" duration: %d s\n", duration);
265 printf(" # of threads: %u\n", n_rw_threads);
266 printf(" initial # of keys: %zu\n", init_size);
267 printf(" initial size hint: %zu\n", qht_n_elems);
268 printf(" auto-resize: %s\n",
269 qht_mode & QHT_MODE_AUTO_RESIZE ? "on" : "off");
270 if (resize_rate) {
271 printf(" resize_rate: %f%%\n", resize_rate * 100.0);
272 printf(" resize range: %zu-%zu\n", resize_min, resize_max);
273 printf(" # resize threads %u\n", n_rz_threads);
275 printf(" update rate: %f%%\n", update_rate * 100.0);
276 printf(" offset: %ld\n", populate_offset);
277 printf(" initial key range: %zu\n", init_range);
278 printf(" lookup range: %lu\n", lookup_range);
279 printf(" update range: %lu\n", update_range);
282 static void do_threshold(double rate, uint64_t *threshold)
284 if (rate == 1.0) {
285 *threshold = UINT64_MAX;
286 } else {
287 *threshold = (rate * 0xffff000000000000ull)
288 + (rate * 0x0000ffffffffffffull);
292 static void htable_init(void)
294 unsigned long n = MAX(init_range, update_range);
295 uint64_t r = time(NULL);
296 size_t retries = 0;
297 size_t i;
299 /* avoid allocating memory later by allocating all the keys now */
300 keys = g_malloc(sizeof(*keys) * n);
301 for (i = 0; i < n; i++) {
302 long val = populate_offset + i;
304 keys[i] = precompute_hash ? h(val) : hval(val);
307 /* some sanity checks */
308 g_assert_cmpuint(lookup_range, <=, n);
310 /* compute thresholds */
311 do_threshold(update_rate, &update_threshold);
312 do_threshold(resize_rate, &resize_threshold);
314 if (resize_rate) {
315 resize_min = n / 2;
316 resize_max = n;
317 assert(resize_min < resize_max);
318 } else {
319 n_rz_threads = 0;
322 /* initialize the hash table */
323 qht_init(&ht, is_equal, qht_n_elems, qht_mode);
324 assert(init_size <= init_range);
326 pr_params();
328 fprintf(stderr, "Initialization: populating %zu items...", init_size);
329 for (i = 0; i < init_size; i++) {
330 for (;;) {
331 uint32_t hash;
332 long *p;
334 r = xorshift64star(r);
335 p = &keys[r & (init_range - 1)];
336 hash = hfunc(*p);
337 if (qht_insert(&ht, p, hash, NULL)) {
338 break;
340 retries++;
343 fprintf(stderr, " populated after %zu retries\n", retries);
346 static void add_stats(struct thread_stats *s, struct thread_info *info, int n)
348 int i;
350 for (i = 0; i < n; i++) {
351 struct thread_stats *stats = &info[i].stats;
353 s->rd += stats->rd;
354 s->not_rd += stats->not_rd;
356 s->in += stats->in;
357 s->not_in += stats->not_in;
359 s->rm += stats->rm;
360 s->not_rm += stats->not_rm;
362 s->rz += stats->rz;
363 s->not_rz += stats->not_rz;
367 static void pr_stats(void)
369 struct thread_stats s = {};
370 double tx;
372 add_stats(&s, rw_info, n_rw_threads);
373 add_stats(&s, rz_info, n_rz_threads);
375 printf("Results:\n");
377 if (resize_rate) {
378 printf(" Resizes: %zu (%.2f%% of %zu)\n",
379 s.rz, (double)s.rz / (s.rz + s.not_rz) * 100, s.rz + s.not_rz);
382 printf(" Read: %.2f M (%.2f%% of %.2fM)\n",
383 (double)s.rd / 1e6,
384 (double)s.rd / (s.rd + s.not_rd) * 100,
385 (double)(s.rd + s.not_rd) / 1e6);
386 printf(" Inserted: %.2f M (%.2f%% of %.2fM)\n",
387 (double)s.in / 1e6,
388 (double)s.in / (s.in + s.not_in) * 100,
389 (double)(s.in + s.not_in) / 1e6);
390 printf(" Removed: %.2f M (%.2f%% of %.2fM)\n",
391 (double)s.rm / 1e6,
392 (double)s.rm / (s.rm + s.not_rm) * 100,
393 (double)(s.rm + s.not_rm) / 1e6);
395 tx = (s.rd + s.not_rd + s.in + s.not_in + s.rm + s.not_rm) / 1e6 / duration;
396 printf(" Throughput: %.2f MT/s\n", tx);
397 printf(" Throughput/thread: %.2f MT/s/thread\n", tx / n_rw_threads);
400 static void run_test(void)
402 int i;
404 while (atomic_read(&n_ready_threads) != n_rw_threads + n_rz_threads) {
405 cpu_relax();
408 atomic_set(&test_start, true);
409 g_usleep(duration * G_USEC_PER_SEC);
410 atomic_set(&test_stop, true);
412 for (i = 0; i < n_rw_threads; i++) {
413 qemu_thread_join(&rw_threads[i]);
415 for (i = 0; i < n_rz_threads; i++) {
416 qemu_thread_join(&rz_threads[i]);
420 static void parse_args(int argc, char *argv[])
422 int c;
424 for (;;) {
425 c = getopt(argc, argv, "d:D:g:k:K:l:hn:N:o:pr:Rs:S:u:");
426 if (c < 0) {
427 break;
429 switch (c) {
430 case 'd':
431 duration = atoi(optarg);
432 break;
433 case 'D':
434 resize_delay = atol(optarg);
435 break;
436 case 'g':
437 init_range = pow2ceil(atol(optarg));
438 lookup_range = pow2ceil(atol(optarg));
439 update_range = pow2ceil(atol(optarg));
440 qht_n_elems = atol(optarg);
441 init_size = atol(optarg);
442 break;
443 case 'h':
444 usage_complete(argc, argv);
445 exit(0);
446 case 'k':
447 init_size = atol(optarg);
448 break;
449 case 'K':
450 init_range = pow2ceil(atol(optarg));
451 break;
452 case 'l':
453 lookup_range = pow2ceil(atol(optarg));
454 break;
455 case 'n':
456 n_rw_threads = atoi(optarg);
457 break;
458 case 'N':
459 n_rz_threads = atoi(optarg);
460 break;
461 case 'o':
462 populate_offset = atol(optarg);
463 break;
464 case 'p':
465 precompute_hash = true;
466 hfunc = hval;
467 break;
468 case 'r':
469 update_range = pow2ceil(atol(optarg));
470 break;
471 case 'R':
472 qht_mode |= QHT_MODE_AUTO_RESIZE;
473 break;
474 case 's':
475 qht_n_elems = atol(optarg);
476 break;
477 case 'S':
478 resize_rate = atof(optarg) / 100.0;
479 if (resize_rate > 1.0) {
480 resize_rate = 1.0;
482 break;
483 case 'u':
484 update_rate = atof(optarg) / 100.0;
485 if (update_rate > 1.0) {
486 update_rate = 1.0;
488 break;
493 int main(int argc, char *argv[])
495 parse_args(argc, argv);
496 htable_init();
497 create_threads();
498 run_test();
499 pr_stats();
500 return 0;