2 * qsp.c - QEMU Synchronization Profiler
4 * Copyright (C) 2018, Emilio G. Cota <cota@braap.org>
6 * License: GNU GPL, version 2 or later.
7 * See the COPYING file in the top-level directory.
9 * QSP profiles the time spent in synchronization primitives, which can
10 * help diagnose performance problems, e.g. scalability issues when
13 * The primitives currently supported are mutexes, recursive mutexes and
14 * condition variables. Note that not all related functions are intercepted;
15 * instead we profile only those functions that can have a performance impact,
16 * either due to blocking (e.g. cond_wait, mutex_lock) or cache line
17 * contention (e.g. mutex_lock, mutex_trylock).
19 * QSP's design focuses on speed and scalability. This is achieved
20 * by having threads do their profiling entirely on thread-local data.
21 * The appropriate thread-local data is found via a QHT, i.e. a concurrent hash
22 * table. To aggregate data in order to generate a report, we iterate over
23 * all entries in the hash table. Depending on the number of threads and
24 * synchronization objects this might be expensive, but note that it is
25 * very rarely called -- reports are generated only when requested by users.
27 * Reports are generated as a table where each row represents a call site. A
28 * call site is the triplet formed by the __file__ and __LINE__ of the caller
29 * as well as the address of the "object" (i.e. mutex, rec. mutex or condvar)
30 * being operated on. Optionally, call sites that operate on different objects
31 * of the same type can be coalesced, which can be particularly useful when
32 * profiling dynamically-allocated objects.
34 * Alternative designs considered:
36 * - Use an off-the-shelf profiler such as mutrace. This is not a viable option
37 * for us because QEMU has __malloc_hook set (by one of the libraries it
38 * uses); leaving this hook unset is required to avoid deadlock in mutrace.
40 * - Use a glib HT for each thread, protecting each HT with its own lock.
41 * This isn't simpler than the current design, and is 10% slower in the
42 * atomic_add-bench microbenchmark (-m option).
44 * - For reports, just use a binary tree as we aggregate data, instead of having
45 * an intermediate hash table. This would simplify the code only slightly, but
46 * would perform badly if there were many threads and objects to track.
48 * - Wrap operations on qsp entries with RCU read-side critical sections, so
49 * that qsp_reset() can delete entries. Unfortunately, the overhead of calling
50 * rcu_read_lock/unlock slows down atomic_add-bench -m by 24%. Having
51 * a snapshot that is updated on qsp_reset() avoids this overhead.
54 * - Lennart Poettering's mutrace: http://0pointer.de/blog/projects/mutrace.html
55 * - Lozi, David, Thomas, Lawall and Muller. "Remote Core Locking: Migrating
56 * Critical-Section Execution to Improve the Performance of Multithreaded
57 * Applications", USENIX ATC'12.
60 #include "qemu/osdep.h"
61 #include "qemu/qemu-print.h"
62 #include "qemu/thread.h"
63 #include "qemu/timer.h"
66 #include "qemu/xxhash.h"
77 const char *file
; /* i.e. __FILE__; shortened later */
81 typedef struct QSPCallSite QSPCallSite
;
85 const QSPCallSite
*callsite
;
88 unsigned int n_objs
; /* count of coalesced objs; only used for reporting */
90 typedef struct QSPEntry QSPEntry
;
96 typedef struct QSPSnapshot QSPSnapshot
;
98 /* initial sizing for hash tables */
99 #define QSP_INITIAL_SIZE 64
101 /* If this file is moved, QSP_REL_PATH should be updated accordingly */
102 #define QSP_REL_PATH "util/qsp.c"
104 /* this file's full path. Used to present all call sites with relative paths */
105 static size_t qsp_qemu_path_len
;
107 /* the address of qsp_thread gives us a unique 'thread ID' */
108 static __thread
int qsp_thread
;
111 * Call sites are the same for all threads, so we track them in a separate hash
112 * table to save memory.
114 static struct qht qsp_callsite_ht
;
116 static struct qht qsp_ht
;
117 static QSPSnapshot
*qsp_snapshot
;
118 static bool qsp_initialized
, qsp_initializing
;
120 static const char * const qsp_typenames
[] = {
121 [QSP_MUTEX
] = "mutex",
122 [QSP_BQL_MUTEX
] = "BQL mutex",
123 [QSP_REC_MUTEX
] = "rec_mutex",
124 [QSP_CONDVAR
] = "condvar",
127 QemuMutexLockFunc qemu_bql_mutex_lock_func
= qemu_mutex_lock_impl
;
128 QemuMutexLockFunc qemu_mutex_lock_func
= qemu_mutex_lock_impl
;
129 QemuMutexTrylockFunc qemu_mutex_trylock_func
= qemu_mutex_trylock_impl
;
130 QemuRecMutexLockFunc qemu_rec_mutex_lock_func
= qemu_rec_mutex_lock_impl
;
131 QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func
=
132 qemu_rec_mutex_trylock_impl
;
133 QemuCondWaitFunc qemu_cond_wait_func
= qemu_cond_wait_impl
;
134 QemuCondTimedWaitFunc qemu_cond_timedwait_func
= qemu_cond_timedwait_impl
;
137 * It pays off to _not_ hash callsite->file; hashing a string is slow, and
138 * without it we still get a pretty unique hash.
141 uint32_t do_qsp_callsite_hash(const QSPCallSite
*callsite
, uint64_t ab
)
143 uint64_t cd
= (uint64_t)(uintptr_t)callsite
->obj
;
144 uint32_t e
= callsite
->line
;
145 uint32_t f
= callsite
->type
;
147 return qemu_xxhash6(ab
, cd
, e
, f
);
151 uint32_t qsp_callsite_hash(const QSPCallSite
*callsite
)
153 return do_qsp_callsite_hash(callsite
, 0);
156 static inline uint32_t do_qsp_entry_hash(const QSPEntry
*entry
, uint64_t a
)
158 return do_qsp_callsite_hash(entry
->callsite
, a
);
161 static uint32_t qsp_entry_hash(const QSPEntry
*entry
)
163 return do_qsp_entry_hash(entry
, (uint64_t)(uintptr_t)entry
->thread_ptr
);
166 static uint32_t qsp_entry_no_thread_hash(const QSPEntry
*entry
)
168 return do_qsp_entry_hash(entry
, 0);
171 /* without the objects we need to hash the file name to get a decent hash */
172 static uint32_t qsp_entry_no_thread_obj_hash(const QSPEntry
*entry
)
174 const QSPCallSite
*callsite
= entry
->callsite
;
175 uint64_t ab
= g_str_hash(callsite
->file
);
176 uint64_t cd
= callsite
->line
;
177 uint32_t e
= callsite
->type
;
179 return qemu_xxhash5(ab
, cd
, e
);
182 static bool qsp_callsite_cmp(const void *ap
, const void *bp
)
184 const QSPCallSite
*a
= ap
;
185 const QSPCallSite
*b
= bp
;
189 a
->line
== b
->line
&&
190 a
->type
== b
->type
&&
191 (a
->file
== b
->file
|| !strcmp(a
->file
, b
->file
)));
194 static bool qsp_callsite_no_obj_cmp(const void *ap
, const void *bp
)
196 const QSPCallSite
*a
= ap
;
197 const QSPCallSite
*b
= bp
;
200 (a
->line
== b
->line
&&
201 a
->type
== b
->type
&&
202 (a
->file
== b
->file
|| !strcmp(a
->file
, b
->file
)));
205 static bool qsp_entry_no_thread_cmp(const void *ap
, const void *bp
)
207 const QSPEntry
*a
= ap
;
208 const QSPEntry
*b
= bp
;
210 return qsp_callsite_cmp(a
->callsite
, b
->callsite
);
213 static bool qsp_entry_no_thread_obj_cmp(const void *ap
, const void *bp
)
215 const QSPEntry
*a
= ap
;
216 const QSPEntry
*b
= bp
;
218 return qsp_callsite_no_obj_cmp(a
->callsite
, b
->callsite
);
221 static bool qsp_entry_cmp(const void *ap
, const void *bp
)
223 const QSPEntry
*a
= ap
;
224 const QSPEntry
*b
= bp
;
226 return a
->thread_ptr
== b
->thread_ptr
&&
227 qsp_callsite_cmp(a
->callsite
, b
->callsite
);
231 * Normally we'd call this from a constructor function, but we want it to work
232 * via libutil as well.
234 static void qsp_do_init(void)
236 /* make sure this file's path in the tree is up to date with QSP_REL_PATH */
237 g_assert(strstr(__FILE__
, QSP_REL_PATH
));
238 qsp_qemu_path_len
= strlen(__FILE__
) - strlen(QSP_REL_PATH
);
240 qht_init(&qsp_ht
, qsp_entry_cmp
, QSP_INITIAL_SIZE
,
241 QHT_MODE_AUTO_RESIZE
| QHT_MODE_RAW_MUTEXES
);
242 qht_init(&qsp_callsite_ht
, qsp_callsite_cmp
, QSP_INITIAL_SIZE
,
243 QHT_MODE_AUTO_RESIZE
| QHT_MODE_RAW_MUTEXES
);
246 static __attribute__((noinline
)) void qsp_init__slowpath(void)
248 if (atomic_cmpxchg(&qsp_initializing
, false, true) == false) {
250 atomic_set(&qsp_initialized
, true);
252 while (!atomic_read(&qsp_initialized
)) {
258 /* qsp_init() must be called from _all_ exported functions */
259 static inline void qsp_init(void)
261 if (likely(atomic_read(&qsp_initialized
))) {
264 qsp_init__slowpath();
267 static QSPCallSite
*qsp_callsite_find(const QSPCallSite
*orig
)
269 QSPCallSite
*callsite
;
272 hash
= qsp_callsite_hash(orig
);
273 callsite
= qht_lookup(&qsp_callsite_ht
, orig
, hash
);
274 if (callsite
== NULL
) {
275 void *existing
= NULL
;
277 callsite
= g_new(QSPCallSite
, 1);
278 memcpy(callsite
, orig
, sizeof(*callsite
));
279 qht_insert(&qsp_callsite_ht
, callsite
, hash
, &existing
);
280 if (unlikely(existing
)) {
289 qsp_entry_create(struct qht
*ht
, const QSPEntry
*entry
, uint32_t hash
)
292 void *existing
= NULL
;
294 e
= g_new0(QSPEntry
, 1);
295 e
->thread_ptr
= entry
->thread_ptr
;
296 e
->callsite
= qsp_callsite_find(entry
->callsite
);
298 qht_insert(ht
, e
, hash
, &existing
);
299 if (unlikely(existing
)) {
307 qsp_entry_find(struct qht
*ht
, const QSPEntry
*entry
, uint32_t hash
)
311 e
= qht_lookup(ht
, entry
, hash
);
313 e
= qsp_entry_create(ht
, entry
, hash
);
319 * Note: Entries are never removed, so callers do not have to be in an RCU
320 * read-side critical section.
322 static QSPEntry
*qsp_entry_get(const void *obj
, const char *file
, int line
,
325 QSPCallSite callsite
= {
336 orig
.thread_ptr
= &qsp_thread
;
337 orig
.callsite
= &callsite
;
339 hash
= qsp_entry_hash(&orig
);
340 return qsp_entry_find(&qsp_ht
, &orig
, hash
);
344 * @e is in the global hash table; it is only written to by the current thread,
345 * so we write to it atomically (as in "write once") to prevent torn reads.
347 static inline void do_qsp_entry_record(QSPEntry
*e
, int64_t delta
, bool acq
)
349 atomic_set_u64(&e
->ns
, e
->ns
+ delta
);
351 atomic_set_u64(&e
->n_acqs
, e
->n_acqs
+ 1);
355 static inline void qsp_entry_record(QSPEntry
*e
, int64_t delta
)
357 do_qsp_entry_record(e
, delta
, true);
360 #define QSP_GEN_VOID(type_, qsp_t_, func_, impl_) \
361 static void func_(type_ *obj, const char *file, int line) \
367 impl_(obj, file, line); \
370 e = qsp_entry_get(obj, file, line, qsp_t_); \
371 qsp_entry_record(e, t1 - t0); \
374 #define QSP_GEN_RET1(type_, qsp_t_, func_, impl_) \
375 static int func_(type_ *obj, const char *file, int line) \
382 err = impl_(obj, file, line); \
385 e = qsp_entry_get(obj, file, line, qsp_t_); \
386 do_qsp_entry_record(e, t1 - t0, !err); \
390 QSP_GEN_VOID(QemuMutex
, QSP_BQL_MUTEX
, qsp_bql_mutex_lock
, qemu_mutex_lock_impl
)
391 QSP_GEN_VOID(QemuMutex
, QSP_MUTEX
, qsp_mutex_lock
, qemu_mutex_lock_impl
)
392 QSP_GEN_RET1(QemuMutex
, QSP_MUTEX
, qsp_mutex_trylock
, qemu_mutex_trylock_impl
)
394 QSP_GEN_VOID(QemuRecMutex
, QSP_REC_MUTEX
, qsp_rec_mutex_lock
,
395 qemu_rec_mutex_lock_impl
)
396 QSP_GEN_RET1(QemuRecMutex
, QSP_REC_MUTEX
, qsp_rec_mutex_trylock
,
397 qemu_rec_mutex_trylock_impl
)
403 qsp_cond_wait(QemuCond
*cond
, QemuMutex
*mutex
, const char *file
, int line
)
409 qemu_cond_wait_impl(cond
, mutex
, file
, line
);
412 e
= qsp_entry_get(cond
, file
, line
, QSP_CONDVAR
);
413 qsp_entry_record(e
, t1
- t0
);
417 qsp_cond_timedwait(QemuCond
*cond
, QemuMutex
*mutex
, int ms
,
418 const char *file
, int line
)
425 ret
= qemu_cond_timedwait_impl(cond
, mutex
, ms
, file
, line
);
428 e
= qsp_entry_get(cond
, file
, line
, QSP_CONDVAR
);
429 qsp_entry_record(e
, t1
- t0
);
433 bool qsp_is_enabled(void)
435 return atomic_read(&qemu_mutex_lock_func
) == qsp_mutex_lock
;
438 void qsp_enable(void)
440 atomic_set(&qemu_mutex_lock_func
, qsp_mutex_lock
);
441 atomic_set(&qemu_mutex_trylock_func
, qsp_mutex_trylock
);
442 atomic_set(&qemu_bql_mutex_lock_func
, qsp_bql_mutex_lock
);
443 atomic_set(&qemu_rec_mutex_lock_func
, qsp_rec_mutex_lock
);
444 atomic_set(&qemu_rec_mutex_trylock_func
, qsp_rec_mutex_trylock
);
445 atomic_set(&qemu_cond_wait_func
, qsp_cond_wait
);
446 atomic_set(&qemu_cond_timedwait_func
, qsp_cond_timedwait
);
449 void qsp_disable(void)
451 atomic_set(&qemu_mutex_lock_func
, qemu_mutex_lock_impl
);
452 atomic_set(&qemu_mutex_trylock_func
, qemu_mutex_trylock_impl
);
453 atomic_set(&qemu_bql_mutex_lock_func
, qemu_mutex_lock_impl
);
454 atomic_set(&qemu_rec_mutex_lock_func
, qemu_rec_mutex_lock_impl
);
455 atomic_set(&qemu_rec_mutex_trylock_func
, qemu_rec_mutex_trylock_impl
);
456 atomic_set(&qemu_cond_wait_func
, qemu_cond_wait_impl
);
457 atomic_set(&qemu_cond_timedwait_func
, qemu_cond_timedwait_impl
);
460 static gint
qsp_tree_cmp(gconstpointer ap
, gconstpointer bp
, gpointer up
)
462 const QSPEntry
*a
= ap
;
463 const QSPEntry
*b
= bp
;
464 enum QSPSortBy sort_by
= *(enum QSPSortBy
*)up
;
465 const QSPCallSite
*ca
;
466 const QSPCallSite
*cb
;
469 case QSP_SORT_BY_TOTAL_WAIT_TIME
:
472 } else if (a
->ns
< b
->ns
) {
476 case QSP_SORT_BY_AVG_WAIT_TIME
:
478 double avg_a
= a
->n_acqs
? a
->ns
/ a
->n_acqs
: 0;
479 double avg_b
= b
->n_acqs
? b
->ns
/ b
->n_acqs
: 0;
483 } else if (avg_a
< avg_b
) {
489 g_assert_not_reached();
494 /* Break the tie with the object's address */
495 if (ca
->obj
< cb
->obj
) {
497 } else if (ca
->obj
> cb
->obj
) {
502 /* same obj. Break the tie with the callsite's file */
503 cmp
= strcmp(ca
->file
, cb
->file
);
507 /* same callsite file. Break the tie with the callsite's line */
508 g_assert(ca
->line
!= cb
->line
);
509 if (ca
->line
< cb
->line
) {
511 } else if (ca
->line
> cb
->line
) {
514 /* break the tie with the callsite's type */
515 return cb
->type
- ca
->type
;
520 static void qsp_sort(void *p
, uint32_t h
, void *userp
)
525 g_tree_insert(tree
, e
, NULL
);
528 static void qsp_aggregate(void *p
, uint32_t h
, void *up
)
531 const QSPEntry
*e
= p
;
535 hash
= qsp_entry_no_thread_hash(e
);
536 agg
= qsp_entry_find(ht
, e
, hash
);
538 * The entry is in the global hash table; read from it atomically (as in
541 agg
->ns
+= atomic_read_u64(&e
->ns
);
542 agg
->n_acqs
+= atomic_read_u64(&e
->n_acqs
);
545 static void qsp_iter_diff(void *p
, uint32_t hash
, void *htp
)
547 struct qht
*ht
= htp
;
551 new = qht_lookup(ht
, old
, hash
);
552 /* entries are never deleted, so we must have this one */
553 g_assert(new != NULL
);
554 /* our reading of the stats happened after the snapshot was taken */
555 g_assert(new->n_acqs
>= old
->n_acqs
);
556 g_assert(new->ns
>= old
->ns
);
558 new->n_acqs
-= old
->n_acqs
;
561 /* No point in reporting an empty entry */
562 if (new->n_acqs
== 0 && new->ns
== 0) {
563 bool removed
= qht_remove(ht
, new, hash
);
570 static void qsp_diff(struct qht
*orig
, struct qht
*new)
572 qht_iter(orig
, qsp_iter_diff
, new);
575 static void qsp_iter_callsite_coalesce(void *p
, uint32_t h
, void *htp
)
577 struct qht
*ht
= htp
;
582 hash
= qsp_entry_no_thread_obj_hash(old
);
583 e
= qht_lookup(ht
, old
, hash
);
585 e
= qsp_entry_create(ht
, old
, hash
);
587 } else if (e
->callsite
->obj
!= old
->callsite
->obj
) {
591 e
->n_acqs
+= old
->n_acqs
;
594 static void qsp_ht_delete(void *p
, uint32_t h
, void *htp
)
599 static void qsp_mktree(GTree
*tree
, bool callsite_coalesce
)
601 struct qht ht
, coalesce_ht
;
605 * First, see if there's a prior snapshot, so that we read the global hash
606 * table _after_ the snapshot has been created, which guarantees that
607 * the entries we'll read will be a superset of the snapshot's entries.
609 * We must remain in an RCU read-side critical section until we're done
612 WITH_RCU_READ_LOCK_GUARD() {
613 QSPSnapshot
*snap
= atomic_rcu_read(&qsp_snapshot
);
615 /* Aggregate all results from the global hash table into a local one */
616 qht_init(&ht
, qsp_entry_no_thread_cmp
, QSP_INITIAL_SIZE
,
617 QHT_MODE_AUTO_RESIZE
| QHT_MODE_RAW_MUTEXES
);
618 qht_iter(&qsp_ht
, qsp_aggregate
, &ht
);
620 /* compute the difference wrt the snapshot, if any */
622 qsp_diff(&snap
->ht
, &ht
);
627 if (callsite_coalesce
) {
628 qht_init(&coalesce_ht
, qsp_entry_no_thread_obj_cmp
, QSP_INITIAL_SIZE
,
629 QHT_MODE_AUTO_RESIZE
| QHT_MODE_RAW_MUTEXES
);
630 qht_iter(&ht
, qsp_iter_callsite_coalesce
, &coalesce_ht
);
632 /* free the previous hash table, and point htp to coalesce_ht */
633 qht_iter(&ht
, qsp_ht_delete
, NULL
);
638 /* sort the hash table elements by using a tree */
639 qht_iter(htp
, qsp_sort
, tree
);
641 /* free the hash table, but keep the elements (those are in the tree now) */
645 /* free string with g_free */
646 static char *qsp_at(const QSPCallSite
*callsite
)
648 GString
*s
= g_string_new(NULL
);
649 const char *shortened
;
651 /* remove the absolute path to qemu */
652 if (unlikely(strlen(callsite
->file
) < qsp_qemu_path_len
)) {
653 shortened
= callsite
->file
;
655 shortened
= callsite
->file
+ qsp_qemu_path_len
;
657 g_string_append_printf(s
, "%s:%u", shortened
, callsite
->line
);
658 return g_string_free(s
, FALSE
);
661 struct QSPReportEntry
{
664 const char *typename
;
670 typedef struct QSPReportEntry QSPReportEntry
;
673 QSPReportEntry
*entries
;
675 size_t max_n_entries
;
677 typedef struct QSPReport QSPReport
;
679 static gboolean
qsp_tree_report(gpointer key
, gpointer value
, gpointer udata
)
681 const QSPEntry
*e
= key
;
682 QSPReport
*report
= udata
;
683 QSPReportEntry
*entry
;
685 if (report
->n_entries
== report
->max_n_entries
) {
688 entry
= &report
->entries
[report
->n_entries
];
691 entry
->obj
= e
->callsite
->obj
;
692 entry
->n_objs
= e
->n_objs
;
693 entry
->callsite_at
= qsp_at(e
->callsite
);
694 entry
->typename
= qsp_typenames
[e
->callsite
->type
];
695 entry
->time_s
= e
->ns
* 1e-9;
696 entry
->n_acqs
= e
->n_acqs
;
697 entry
->ns_avg
= e
->n_acqs
? e
->ns
/ e
->n_acqs
: 0;
701 static void pr_report(const QSPReport
*rep
)
705 int callsite_len
= 0;
710 /* find out the maximum length of all 'callsite' fields */
711 for (i
= 0; i
< rep
->n_entries
; i
++) {
712 const QSPReportEntry
*e
= &rep
->entries
[i
];
713 size_t len
= strlen(e
->callsite_at
);
720 callsite_len
= MAX(max_len
, strlen("Call site"));
721 /* white space to leave to the right of "Call site" */
722 callsite_rspace
= callsite_len
- strlen("Call site");
724 qemu_printf("Type Object Call site%*s Wait Time (s) "
725 " Count Average (us)\n", callsite_rspace
, "");
727 /* build a horizontal rule with dashes */
728 n_dashes
= 79 + callsite_rspace
;
729 dashes
= g_malloc(n_dashes
+ 1);
730 memset(dashes
, '-', n_dashes
);
731 dashes
[n_dashes
] = '\0';
732 qemu_printf("%s\n", dashes
);
734 for (i
= 0; i
< rep
->n_entries
; i
++) {
735 const QSPReportEntry
*e
= &rep
->entries
[i
];
736 GString
*s
= g_string_new(NULL
);
738 g_string_append_printf(s
, "%-9s ", e
->typename
);
740 g_string_append_printf(s
, "[%12u]", e
->n_objs
);
742 g_string_append_printf(s
, "%14p", e
->obj
);
744 g_string_append_printf(s
, " %s%*s %13.5f %12" PRIu64
" %12.2f\n",
746 callsite_len
- (int)strlen(e
->callsite_at
), "",
747 e
->time_s
, e
->n_acqs
, e
->ns_avg
* 1e-3);
748 qemu_printf("%s", s
->str
);
749 g_string_free(s
, TRUE
);
752 qemu_printf("%s\n", dashes
);
756 static void report_destroy(QSPReport
*rep
)
760 for (i
= 0; i
< rep
->n_entries
; i
++) {
761 QSPReportEntry
*e
= &rep
->entries
[i
];
763 g_free(e
->callsite_at
);
765 g_free(rep
->entries
);
768 void qsp_report(size_t max
, enum QSPSortBy sort_by
,
769 bool callsite_coalesce
)
771 GTree
*tree
= g_tree_new_full(qsp_tree_cmp
, &sort_by
, g_free
, NULL
);
776 rep
.entries
= g_new0(QSPReportEntry
, max
);
778 rep
.max_n_entries
= max
;
780 qsp_mktree(tree
, callsite_coalesce
);
781 g_tree_foreach(tree
, qsp_tree_report
, &rep
);
782 g_tree_destroy(tree
);
785 report_destroy(&rep
);
788 static void qsp_snapshot_destroy(QSPSnapshot
*snap
)
790 qht_iter(&snap
->ht
, qsp_ht_delete
, NULL
);
791 qht_destroy(&snap
->ht
);
797 QSPSnapshot
*new = g_new(QSPSnapshot
, 1);
802 qht_init(&new->ht
, qsp_entry_cmp
, QSP_INITIAL_SIZE
,
803 QHT_MODE_AUTO_RESIZE
| QHT_MODE_RAW_MUTEXES
);
805 /* take a snapshot of the current state */
806 qht_iter(&qsp_ht
, qsp_aggregate
, &new->ht
);
808 /* replace the previous snapshot, if any */
809 old
= atomic_xchg(&qsp_snapshot
, new);
811 call_rcu(old
, qsp_snapshot_destroy
, rcu
);