2 * Copyright (C) 2021, Mahmoud Mandour <ma.mandourr@gmail.com>
4 * License: GNU GPL, version 2 or later.
5 * See the COPYING file in the top-level directory.
12 #include <qemu-plugin.h>
14 #define STRTOLL(x) g_ascii_strtoll(x, NULL, 10)
16 QEMU_PLUGIN_EXPORT
int qemu_plugin_version
= QEMU_PLUGIN_VERSION
;
18 static enum qemu_plugin_mem_rw rw
= QEMU_PLUGIN_MEM_RW
;
20 static GHashTable
*miss_ht
;
22 static GMutex hashtable_lock
;
34 enum EvictionPolicy policy
;
37 * A CacheSet is a set of cache blocks. A memory block that maps to a set can be
38 * put in any of the blocks inside the set. The number of block per set is
39 * called the associativity (assoc).
41 * Each block contains the stored tag and a valid bit. Since this is not
42 * a functional simulator, the data itself is not stored. We only identify
43 * whether a block is in the cache or not by searching for its tag.
45 * In order to search for memory data in the cache, the set identifier and tag
46 * are extracted from the address and the set is probed to see whether a tag
49 * An address is logically divided into three portions: The block offset,
50 * the set number, and the tag.
52 * The set number is used to identify the set in which the block may exist.
53 * The tag is compared against all the tags of a set to search for a match. If a
54 * match is found, then the access is a hit.
56 * The CacheSet also contains bookkeaping information about eviction details.
66 uint64_t *lru_priorities
;
67 uint64_t lru_gen_counter
;
92 void (*update_hit
)(Cache
*cache
, int set
, int blk
);
93 void (*update_miss
)(Cache
*cache
, int set
, int blk
);
95 void (*metadata_init
)(Cache
*cache
);
96 void (*metadata_destroy
)(Cache
*cache
);
99 static Cache
**l1_dcaches
, **l1_icaches
;
102 static Cache
**l2_ucaches
;
104 static GMutex
*l1_dcache_locks
;
105 static GMutex
*l1_icache_locks
;
106 static GMutex
*l2_ucache_locks
;
108 static uint64_t l1_dmem_accesses
;
109 static uint64_t l1_imem_accesses
;
110 static uint64_t l1_imisses
;
111 static uint64_t l1_dmisses
;
113 static uint64_t l2_mem_accesses
;
114 static uint64_t l2_misses
;
116 static int pow_of_two(int num
)
118 g_assert((num
& (num
- 1)) == 0);
127 * LRU evection policy: For each set, a generation counter is maintained
128 * alongside a priority array.
130 * On each set access, the generation counter is incremented.
132 * On a cache hit: The hit-block is assigned the current generation counter,
133 * indicating that it is the most recently used block.
135 * On a cache miss: The block with the least priority is searched and replaced
136 * with the newly-cached block, of which the priority is set to the current
140 static void lru_priorities_init(Cache
*cache
)
144 for (i
= 0; i
< cache
->num_sets
; i
++) {
145 cache
->sets
[i
].lru_priorities
= g_new0(uint64_t, cache
->assoc
);
146 cache
->sets
[i
].lru_gen_counter
= 0;
150 static void lru_update_blk(Cache
*cache
, int set_idx
, int blk_idx
)
152 CacheSet
*set
= &cache
->sets
[set_idx
];
153 set
->lru_priorities
[blk_idx
] = cache
->sets
[set_idx
].lru_gen_counter
;
154 set
->lru_gen_counter
++;
157 static int lru_get_lru_block(Cache
*cache
, int set_idx
)
159 int i
, min_idx
, min_priority
;
161 min_priority
= cache
->sets
[set_idx
].lru_priorities
[0];
164 for (i
= 1; i
< cache
->assoc
; i
++) {
165 if (cache
->sets
[set_idx
].lru_priorities
[i
] < min_priority
) {
166 min_priority
= cache
->sets
[set_idx
].lru_priorities
[i
];
173 static void lru_priorities_destroy(Cache
*cache
)
177 for (i
= 0; i
< cache
->num_sets
; i
++) {
178 g_free(cache
->sets
[i
].lru_priorities
);
183 * FIFO eviction policy: a FIFO queue is maintained for each CacheSet that
184 * stores accesses to the cache.
186 * On a compulsory miss: The block index is enqueued to the fifo_queue to
187 * indicate that it's the latest cached block.
189 * On a conflict miss: The first-in block is removed from the cache and the new
190 * block is put in its place and enqueued to the FIFO queue.
193 static void fifo_init(Cache
*cache
)
197 for (i
= 0; i
< cache
->num_sets
; i
++) {
198 cache
->sets
[i
].fifo_queue
= g_queue_new();
202 static int fifo_get_first_block(Cache
*cache
, int set
)
204 GQueue
*q
= cache
->sets
[set
].fifo_queue
;
205 return GPOINTER_TO_INT(g_queue_pop_tail(q
));
208 static void fifo_update_on_miss(Cache
*cache
, int set
, int blk_idx
)
210 GQueue
*q
= cache
->sets
[set
].fifo_queue
;
211 g_queue_push_head(q
, GINT_TO_POINTER(blk_idx
));
214 static void fifo_destroy(Cache
*cache
)
218 for (i
= 0; i
< cache
->num_sets
; i
++) {
219 g_queue_free(cache
->sets
[i
].fifo_queue
);
223 static inline uint64_t extract_tag(Cache
*cache
, uint64_t addr
)
225 return addr
& cache
->tag_mask
;
228 static inline uint64_t extract_set(Cache
*cache
, uint64_t addr
)
230 return (addr
& cache
->set_mask
) >> cache
->blksize_shift
;
233 static const char *cache_config_error(int blksize
, int assoc
, int cachesize
)
235 if (cachesize
% blksize
!= 0) {
236 return "cache size must be divisible by block size";
237 } else if (cachesize
% (blksize
* assoc
) != 0) {
238 return "cache size must be divisible by set size (assoc * block size)";
244 static bool bad_cache_params(int blksize
, int assoc
, int cachesize
)
246 return (cachesize
% blksize
) != 0 || (cachesize
% (blksize
* assoc
) != 0);
249 static Cache
*cache_init(int blksize
, int assoc
, int cachesize
)
256 * This function shall not be called directly, and hence expects suitable
259 g_assert(!bad_cache_params(blksize
, assoc
, cachesize
));
261 cache
= g_new(Cache
, 1);
262 cache
->assoc
= assoc
;
263 cache
->cachesize
= cachesize
;
264 cache
->num_sets
= cachesize
/ (blksize
* assoc
);
265 cache
->sets
= g_new(CacheSet
, cache
->num_sets
);
266 cache
->blksize_shift
= pow_of_two(blksize
);
270 for (i
= 0; i
< cache
->num_sets
; i
++) {
271 cache
->sets
[i
].blocks
= g_new0(CacheBlock
, assoc
);
274 blk_mask
= blksize
- 1;
275 cache
->set_mask
= ((cache
->num_sets
- 1) << cache
->blksize_shift
);
276 cache
->tag_mask
= ~(cache
->set_mask
| blk_mask
);
279 metadata_init(cache
);
285 static Cache
**caches_init(int blksize
, int assoc
, int cachesize
)
290 if (bad_cache_params(blksize
, assoc
, cachesize
)) {
294 caches
= g_new(Cache
*, cores
);
296 for (i
= 0; i
< cores
; i
++) {
297 caches
[i
] = cache_init(blksize
, assoc
, cachesize
);
303 static int get_invalid_block(Cache
*cache
, uint64_t set
)
307 for (i
= 0; i
< cache
->assoc
; i
++) {
308 if (!cache
->sets
[set
].blocks
[i
].valid
) {
316 static int get_replaced_block(Cache
*cache
, int set
)
320 return g_rand_int_range(rng
, 0, cache
->assoc
);
322 return lru_get_lru_block(cache
, set
);
324 return fifo_get_first_block(cache
, set
);
326 g_assert_not_reached();
330 static int in_cache(Cache
*cache
, uint64_t addr
)
335 tag
= extract_tag(cache
, addr
);
336 set
= extract_set(cache
, addr
);
338 for (i
= 0; i
< cache
->assoc
; i
++) {
339 if (cache
->sets
[set
].blocks
[i
].tag
== tag
&&
340 cache
->sets
[set
].blocks
[i
].valid
) {
349 * access_cache(): Simulate a cache access
350 * @cache: The cache under simulation
351 * @addr: The address of the requested memory location
353 * Returns true if the requested data is hit in the cache and false when missed.
354 * The cache is updated on miss for the next access.
356 static bool access_cache(Cache
*cache
, uint64_t addr
)
358 int hit_blk
, replaced_blk
;
361 tag
= extract_tag(cache
, addr
);
362 set
= extract_set(cache
, addr
);
364 hit_blk
= in_cache(cache
, addr
);
367 update_hit(cache
, set
, hit_blk
);
372 replaced_blk
= get_invalid_block(cache
, set
);
374 if (replaced_blk
== -1) {
375 replaced_blk
= get_replaced_block(cache
, set
);
379 update_miss(cache
, set
, replaced_blk
);
382 cache
->sets
[set
].blocks
[replaced_blk
].tag
= tag
;
383 cache
->sets
[set
].blocks
[replaced_blk
].valid
= true;
388 static void vcpu_mem_access(unsigned int vcpu_index
, qemu_plugin_meminfo_t info
,
389 uint64_t vaddr
, void *userdata
)
391 uint64_t effective_addr
;
392 struct qemu_plugin_hwaddr
*hwaddr
;
397 hwaddr
= qemu_plugin_get_hwaddr(info
, vaddr
);
398 if (hwaddr
&& qemu_plugin_hwaddr_is_io(hwaddr
)) {
402 effective_addr
= hwaddr
? qemu_plugin_hwaddr_phys_addr(hwaddr
) : vaddr
;
403 cache_idx
= vcpu_index
% cores
;
405 g_mutex_lock(&l1_dcache_locks
[cache_idx
]);
406 hit_in_l1
= access_cache(l1_dcaches
[cache_idx
], effective_addr
);
409 __atomic_fetch_add(&insn
->l1_dmisses
, 1, __ATOMIC_SEQ_CST
);
410 l1_dcaches
[cache_idx
]->misses
++;
412 l1_dcaches
[cache_idx
]->accesses
++;
413 g_mutex_unlock(&l1_dcache_locks
[cache_idx
]);
415 if (hit_in_l1
|| !use_l2
) {
416 /* No need to access L2 */
420 g_mutex_lock(&l2_ucache_locks
[cache_idx
]);
421 if (!access_cache(l2_ucaches
[cache_idx
], effective_addr
)) {
423 __atomic_fetch_add(&insn
->l2_misses
, 1, __ATOMIC_SEQ_CST
);
424 l2_ucaches
[cache_idx
]->misses
++;
426 l2_ucaches
[cache_idx
]->accesses
++;
427 g_mutex_unlock(&l2_ucache_locks
[cache_idx
]);
430 static void vcpu_insn_exec(unsigned int vcpu_index
, void *userdata
)
437 insn_addr
= ((InsnData
*) userdata
)->addr
;
439 cache_idx
= vcpu_index
% cores
;
440 g_mutex_lock(&l1_icache_locks
[cache_idx
]);
441 hit_in_l1
= access_cache(l1_icaches
[cache_idx
], insn_addr
);
444 __atomic_fetch_add(&insn
->l1_imisses
, 1, __ATOMIC_SEQ_CST
);
445 l1_icaches
[cache_idx
]->misses
++;
447 l1_icaches
[cache_idx
]->accesses
++;
448 g_mutex_unlock(&l1_icache_locks
[cache_idx
]);
450 if (hit_in_l1
|| !use_l2
) {
451 /* No need to access L2 */
455 g_mutex_lock(&l2_ucache_locks
[cache_idx
]);
456 if (!access_cache(l2_ucaches
[cache_idx
], insn_addr
)) {
458 __atomic_fetch_add(&insn
->l2_misses
, 1, __ATOMIC_SEQ_CST
);
459 l2_ucaches
[cache_idx
]->misses
++;
461 l2_ucaches
[cache_idx
]->accesses
++;
462 g_mutex_unlock(&l2_ucache_locks
[cache_idx
]);
465 static void vcpu_tb_trans(qemu_plugin_id_t id
, struct qemu_plugin_tb
*tb
)
471 n_insns
= qemu_plugin_tb_n_insns(tb
);
472 for (i
= 0; i
< n_insns
; i
++) {
473 struct qemu_plugin_insn
*insn
= qemu_plugin_tb_get_insn(tb
, i
);
474 uint64_t effective_addr
;
477 effective_addr
= (uint64_t) qemu_plugin_insn_haddr(insn
);
479 effective_addr
= (uint64_t) qemu_plugin_insn_vaddr(insn
);
483 * Instructions might get translated multiple times, we do not create
484 * new entries for those instructions. Instead, we fetch the same
485 * entry from the hash table and register it for the callback again.
487 g_mutex_lock(&hashtable_lock
);
488 data
= g_hash_table_lookup(miss_ht
, GUINT_TO_POINTER(effective_addr
));
490 data
= g_new0(InsnData
, 1);
491 data
->disas_str
= qemu_plugin_insn_disas(insn
);
492 data
->symbol
= qemu_plugin_insn_symbol(insn
);
493 data
->addr
= effective_addr
;
494 g_hash_table_insert(miss_ht
, GUINT_TO_POINTER(effective_addr
),
497 g_mutex_unlock(&hashtable_lock
);
499 qemu_plugin_register_vcpu_mem_cb(insn
, vcpu_mem_access
,
500 QEMU_PLUGIN_CB_NO_REGS
,
503 qemu_plugin_register_vcpu_insn_exec_cb(insn
, vcpu_insn_exec
,
504 QEMU_PLUGIN_CB_NO_REGS
, data
);
508 static void insn_free(gpointer data
)
510 InsnData
*insn
= (InsnData
*) data
;
511 g_free(insn
->disas_str
);
515 static void cache_free(Cache
*cache
)
517 for (int i
= 0; i
< cache
->num_sets
; i
++) {
518 g_free(cache
->sets
[i
].blocks
);
521 if (metadata_destroy
) {
522 metadata_destroy(cache
);
529 static void caches_free(Cache
**caches
)
533 for (i
= 0; i
< cores
; i
++) {
534 cache_free(caches
[i
]);
538 static void append_stats_line(GString
*line
,
539 uint64_t l1_daccess
, uint64_t l1_dmisses
,
540 uint64_t l1_iaccess
, uint64_t l1_imisses
,
541 uint64_t l2_access
, uint64_t l2_misses
)
543 double l1_dmiss_rate
= ((double) l1_dmisses
) / (l1_daccess
) * 100.0;
544 double l1_imiss_rate
= ((double) l1_imisses
) / (l1_iaccess
) * 100.0;
546 g_string_append_printf(line
, "%-14" PRIu64
" %-12" PRIu64
" %9.4lf%%"
547 " %-14" PRIu64
" %-12" PRIu64
" %9.4lf%%",
550 l1_daccess
? l1_dmiss_rate
: 0.0,
553 l1_iaccess
? l1_imiss_rate
: 0.0);
555 if (l2_access
&& l2_misses
) {
556 double l2_miss_rate
= ((double) l2_misses
) / (l2_access
) * 100.0;
557 g_string_append_printf(line
,
558 " %-12" PRIu64
" %-11" PRIu64
" %10.4lf%%",
561 l2_access
? l2_miss_rate
: 0.0);
564 g_string_append(line
, "\n");
567 static void sum_stats(void)
572 for (i
= 0; i
< cores
; i
++) {
573 l1_imisses
+= l1_icaches
[i
]->misses
;
574 l1_dmisses
+= l1_dcaches
[i
]->misses
;
575 l1_imem_accesses
+= l1_icaches
[i
]->accesses
;
576 l1_dmem_accesses
+= l1_dcaches
[i
]->accesses
;
579 l2_misses
+= l2_ucaches
[i
]->misses
;
580 l2_mem_accesses
+= l2_ucaches
[i
]->accesses
;
585 static int dcmp(gconstpointer a
, gconstpointer b
)
587 InsnData
*insn_a
= (InsnData
*) a
;
588 InsnData
*insn_b
= (InsnData
*) b
;
590 return insn_a
->l1_dmisses
< insn_b
->l1_dmisses
? 1 : -1;
593 static int icmp(gconstpointer a
, gconstpointer b
)
595 InsnData
*insn_a
= (InsnData
*) a
;
596 InsnData
*insn_b
= (InsnData
*) b
;
598 return insn_a
->l1_imisses
< insn_b
->l1_imisses
? 1 : -1;
601 static int l2_cmp(gconstpointer a
, gconstpointer b
)
603 InsnData
*insn_a
= (InsnData
*) a
;
604 InsnData
*insn_b
= (InsnData
*) b
;
606 return insn_a
->l2_misses
< insn_b
->l2_misses
? 1 : -1;
609 static void log_stats(void)
612 Cache
*icache
, *dcache
, *l2_cache
;
614 g_autoptr(GString
) rep
= g_string_new("core #, data accesses, data misses,"
615 " dmiss rate, insn accesses,"
616 " insn misses, imiss rate");
619 g_string_append(rep
, ", l2 accesses, l2 misses, l2 miss rate");
622 g_string_append(rep
, "\n");
624 for (i
= 0; i
< cores
; i
++) {
625 g_string_append_printf(rep
, "%-8d", i
);
626 dcache
= l1_dcaches
[i
];
627 icache
= l1_icaches
[i
];
628 l2_cache
= use_l2
? l2_ucaches
[i
] : NULL
;
629 append_stats_line(rep
, dcache
->accesses
, dcache
->misses
,
630 icache
->accesses
, icache
->misses
,
631 l2_cache
? l2_cache
->accesses
: 0,
632 l2_cache
? l2_cache
->misses
: 0);
637 g_string_append_printf(rep
, "%-8s", "sum");
638 append_stats_line(rep
, l1_dmem_accesses
, l1_dmisses
,
639 l1_imem_accesses
, l1_imisses
,
640 l2_cache
? l2_mem_accesses
: 0, l2_cache
? l2_misses
: 0);
643 g_string_append(rep
, "\n");
644 qemu_plugin_outs(rep
->str
);
647 static void log_top_insns(void)
650 GList
*curr
, *miss_insns
;
653 miss_insns
= g_hash_table_get_values(miss_ht
);
654 miss_insns
= g_list_sort(miss_insns
, dcmp
);
655 g_autoptr(GString
) rep
= g_string_new("");
656 g_string_append_printf(rep
, "%s", "address, data misses, instruction\n");
658 for (curr
= miss_insns
, i
= 0; curr
&& i
< limit
; i
++, curr
= curr
->next
) {
659 insn
= (InsnData
*) curr
->data
;
660 g_string_append_printf(rep
, "0x%" PRIx64
, insn
->addr
);
662 g_string_append_printf(rep
, " (%s)", insn
->symbol
);
664 g_string_append_printf(rep
, ", %" PRId64
", %s\n",
665 insn
->l1_dmisses
, insn
->disas_str
);
668 miss_insns
= g_list_sort(miss_insns
, icmp
);
669 g_string_append_printf(rep
, "%s", "\naddress, fetch misses, instruction\n");
671 for (curr
= miss_insns
, i
= 0; curr
&& i
< limit
; i
++, curr
= curr
->next
) {
672 insn
= (InsnData
*) curr
->data
;
673 g_string_append_printf(rep
, "0x%" PRIx64
, insn
->addr
);
675 g_string_append_printf(rep
, " (%s)", insn
->symbol
);
677 g_string_append_printf(rep
, ", %" PRId64
", %s\n",
678 insn
->l1_imisses
, insn
->disas_str
);
685 miss_insns
= g_list_sort(miss_insns
, l2_cmp
);
686 g_string_append_printf(rep
, "%s", "\naddress, L2 misses, instruction\n");
688 for (curr
= miss_insns
, i
= 0; curr
&& i
< limit
; i
++, curr
= curr
->next
) {
689 insn
= (InsnData
*) curr
->data
;
690 g_string_append_printf(rep
, "0x%" PRIx64
, insn
->addr
);
692 g_string_append_printf(rep
, " (%s)", insn
->symbol
);
694 g_string_append_printf(rep
, ", %" PRId64
", %s\n",
695 insn
->l2_misses
, insn
->disas_str
);
699 qemu_plugin_outs(rep
->str
);
700 g_list_free(miss_insns
);
703 static void plugin_exit(qemu_plugin_id_t id
, void *p
)
708 caches_free(l1_dcaches
);
709 caches_free(l1_icaches
);
711 g_free(l1_dcache_locks
);
712 g_free(l1_icache_locks
);
715 caches_free(l2_ucaches
);
716 g_free(l2_ucache_locks
);
719 g_hash_table_destroy(miss_ht
);
722 static void policy_init(void)
726 update_hit
= lru_update_blk
;
727 update_miss
= lru_update_blk
;
728 metadata_init
= lru_priorities_init
;
729 metadata_destroy
= lru_priorities_destroy
;
732 update_miss
= fifo_update_on_miss
;
733 metadata_init
= fifo_init
;
734 metadata_destroy
= fifo_destroy
;
740 g_assert_not_reached();
745 int qemu_plugin_install(qemu_plugin_id_t id
, const qemu_info_t
*info
,
746 int argc
, char **argv
)
749 int l1_iassoc
, l1_iblksize
, l1_icachesize
;
750 int l1_dassoc
, l1_dblksize
, l1_dcachesize
;
751 int l2_assoc
, l2_blksize
, l2_cachesize
;
754 sys
= info
->system_emulation
;
758 l1_dcachesize
= l1_dblksize
* l1_dassoc
* 32;
762 l1_icachesize
= l1_iblksize
* l1_iassoc
* 32;
766 l2_cachesize
= l2_assoc
* l2_blksize
* 2048;
770 cores
= sys
? qemu_plugin_n_vcpus() : 1;
772 for (i
= 0; i
< argc
; i
++) {
774 g_auto(GStrv
) tokens
= g_strsplit(opt
, "=", 2);
776 if (g_strcmp0(tokens
[0], "iblksize") == 0) {
777 l1_iblksize
= STRTOLL(tokens
[1]);
778 } else if (g_strcmp0(tokens
[0], "iassoc") == 0) {
779 l1_iassoc
= STRTOLL(tokens
[1]);
780 } else if (g_strcmp0(tokens
[0], "icachesize") == 0) {
781 l1_icachesize
= STRTOLL(tokens
[1]);
782 } else if (g_strcmp0(tokens
[0], "dblksize") == 0) {
783 l1_dblksize
= STRTOLL(tokens
[1]);
784 } else if (g_strcmp0(tokens
[0], "dassoc") == 0) {
785 l1_dassoc
= STRTOLL(tokens
[1]);
786 } else if (g_strcmp0(tokens
[0], "dcachesize") == 0) {
787 l1_dcachesize
= STRTOLL(tokens
[1]);
788 } else if (g_strcmp0(tokens
[0], "limit") == 0) {
789 limit
= STRTOLL(tokens
[1]);
790 } else if (g_strcmp0(tokens
[0], "cores") == 0) {
791 cores
= STRTOLL(tokens
[1]);
792 } else if (g_strcmp0(tokens
[0], "l2cachesize") == 0) {
794 l2_cachesize
= STRTOLL(tokens
[1]);
795 } else if (g_strcmp0(tokens
[0], "l2blksize") == 0) {
797 l2_blksize
= STRTOLL(tokens
[1]);
798 } else if (g_strcmp0(tokens
[0], "l2assoc") == 0) {
800 l2_assoc
= STRTOLL(tokens
[1]);
801 } else if (g_strcmp0(tokens
[0], "l2") == 0) {
802 if (!qemu_plugin_bool_parse(tokens
[0], tokens
[1], &use_l2
)) {
803 fprintf(stderr
, "boolean argument parsing failed: %s\n", opt
);
806 } else if (g_strcmp0(tokens
[0], "evict") == 0) {
807 if (g_strcmp0(tokens
[1], "rand") == 0) {
809 } else if (g_strcmp0(tokens
[1], "lru") == 0) {
811 } else if (g_strcmp0(tokens
[1], "fifo") == 0) {
814 fprintf(stderr
, "invalid eviction policy: %s\n", opt
);
818 fprintf(stderr
, "option parsing failed: %s\n", opt
);
825 l1_dcaches
= caches_init(l1_dblksize
, l1_dassoc
, l1_dcachesize
);
827 const char *err
= cache_config_error(l1_dblksize
, l1_dassoc
, l1_dcachesize
);
828 fprintf(stderr
, "dcache cannot be constructed from given parameters\n");
829 fprintf(stderr
, "%s\n", err
);
833 l1_icaches
= caches_init(l1_iblksize
, l1_iassoc
, l1_icachesize
);
835 const char *err
= cache_config_error(l1_iblksize
, l1_iassoc
, l1_icachesize
);
836 fprintf(stderr
, "icache cannot be constructed from given parameters\n");
837 fprintf(stderr
, "%s\n", err
);
841 l2_ucaches
= use_l2
? caches_init(l2_blksize
, l2_assoc
, l2_cachesize
) : NULL
;
842 if (!l2_ucaches
&& use_l2
) {
843 const char *err
= cache_config_error(l2_blksize
, l2_assoc
, l2_cachesize
);
844 fprintf(stderr
, "L2 cache cannot be constructed from given parameters\n");
845 fprintf(stderr
, "%s\n", err
);
849 l1_dcache_locks
= g_new0(GMutex
, cores
);
850 l1_icache_locks
= g_new0(GMutex
, cores
);
851 l2_ucache_locks
= use_l2
? g_new0(GMutex
, cores
) : NULL
;
853 qemu_plugin_register_vcpu_tb_trans_cb(id
, vcpu_tb_trans
);
854 qemu_plugin_register_atexit_cb(id
, plugin_exit
, NULL
);
856 miss_ht
= g_hash_table_new_full(NULL
, g_direct_equal
, NULL
, insn_free
);