2 * Copyright (C) 2021, Mahmoud Mandour <ma.mandourr@gmail.com>
4 * License: GNU GPL, version 2 or later.
5 * See the COPYING file in the top-level directory.
12 #include <qemu-plugin.h>
14 #define STRTOLL(x) g_ascii_strtoll(x, NULL, 10)
16 QEMU_PLUGIN_EXPORT
int qemu_plugin_version
= QEMU_PLUGIN_VERSION
;
18 static enum qemu_plugin_mem_rw rw
= QEMU_PLUGIN_MEM_RW
;
20 static GHashTable
*miss_ht
;
22 static GMutex hashtable_lock
;
34 enum EvictionPolicy policy
;
37 * A CacheSet is a set of cache blocks. A memory block that maps to a set can be
38 * put in any of the blocks inside the set. The number of block per set is
39 * called the associativity (assoc).
41 * Each block contains the the stored tag and a valid bit. Since this is not
42 * a functional simulator, the data itself is not stored. We only identify
43 * whether a block is in the cache or not by searching for its tag.
45 * In order to search for memory data in the cache, the set identifier and tag
46 * are extracted from the address and the set is probed to see whether a tag
49 * An address is logically divided into three portions: The block offset,
50 * the set number, and the tag.
52 * The set number is used to identify the set in which the block may exist.
53 * The tag is compared against all the tags of a set to search for a match. If a
54 * match is found, then the access is a hit.
56 * The CacheSet also contains bookkeaping information about eviction details.
66 uint64_t *lru_priorities
;
67 uint64_t lru_gen_counter
;
92 void (*update_hit
)(Cache
*cache
, int set
, int blk
);
93 void (*update_miss
)(Cache
*cache
, int set
, int blk
);
95 void (*metadata_init
)(Cache
*cache
);
96 void (*metadata_destroy
)(Cache
*cache
);
99 static Cache
**l1_dcaches
, **l1_icaches
;
102 static Cache
**l2_ucaches
;
104 static GMutex
*l1_dcache_locks
;
105 static GMutex
*l1_icache_locks
;
106 static GMutex
*l2_ucache_locks
;
108 static uint64_t l1_dmem_accesses
;
109 static uint64_t l1_imem_accesses
;
110 static uint64_t l1_imisses
;
111 static uint64_t l1_dmisses
;
113 static uint64_t l2_mem_accesses
;
114 static uint64_t l2_misses
;
116 static int pow_of_two(int num
)
118 g_assert((num
& (num
- 1)) == 0);
127 * LRU evection policy: For each set, a generation counter is maintained
128 * alongside a priority array.
130 * On each set access, the generation counter is incremented.
132 * On a cache hit: The hit-block is assigned the current generation counter,
133 * indicating that it is the most recently used block.
135 * On a cache miss: The block with the least priority is searched and replaced
136 * with the newly-cached block, of which the priority is set to the current
140 static void lru_priorities_init(Cache
*cache
)
144 for (i
= 0; i
< cache
->num_sets
; i
++) {
145 cache
->sets
[i
].lru_priorities
= g_new0(uint64_t, cache
->assoc
);
146 cache
->sets
[i
].lru_gen_counter
= 0;
150 static void lru_update_blk(Cache
*cache
, int set_idx
, int blk_idx
)
152 CacheSet
*set
= &cache
->sets
[set_idx
];
153 set
->lru_priorities
[blk_idx
] = cache
->sets
[set_idx
].lru_gen_counter
;
154 set
->lru_gen_counter
++;
157 static int lru_get_lru_block(Cache
*cache
, int set_idx
)
159 int i
, min_idx
, min_priority
;
161 min_priority
= cache
->sets
[set_idx
].lru_priorities
[0];
164 for (i
= 1; i
< cache
->assoc
; i
++) {
165 if (cache
->sets
[set_idx
].lru_priorities
[i
] < min_priority
) {
166 min_priority
= cache
->sets
[set_idx
].lru_priorities
[i
];
173 static void lru_priorities_destroy(Cache
*cache
)
177 for (i
= 0; i
< cache
->num_sets
; i
++) {
178 g_free(cache
->sets
[i
].lru_priorities
);
183 * FIFO eviction policy: a FIFO queue is maintained for each CacheSet that
184 * stores accesses to the cache.
186 * On a compulsory miss: The block index is enqueued to the fifo_queue to
187 * indicate that it's the latest cached block.
189 * On a conflict miss: The first-in block is removed from the cache and the new
190 * block is put in its place and enqueued to the FIFO queue.
193 static void fifo_init(Cache
*cache
)
197 for (i
= 0; i
< cache
->num_sets
; i
++) {
198 cache
->sets
[i
].fifo_queue
= g_queue_new();
202 static int fifo_get_first_block(Cache
*cache
, int set
)
204 GQueue
*q
= cache
->sets
[set
].fifo_queue
;
205 return GPOINTER_TO_INT(g_queue_pop_tail(q
));
208 static void fifo_update_on_miss(Cache
*cache
, int set
, int blk_idx
)
210 GQueue
*q
= cache
->sets
[set
].fifo_queue
;
211 g_queue_push_head(q
, GINT_TO_POINTER(blk_idx
));
214 static void fifo_destroy(Cache
*cache
)
218 for (i
= 0; i
< cache
->num_sets
; i
++) {
219 g_queue_free(cache
->sets
[i
].fifo_queue
);
223 static inline uint64_t extract_tag(Cache
*cache
, uint64_t addr
)
225 return addr
& cache
->tag_mask
;
228 static inline uint64_t extract_set(Cache
*cache
, uint64_t addr
)
230 return (addr
& cache
->set_mask
) >> cache
->blksize_shift
;
233 static const char *cache_config_error(int blksize
, int assoc
, int cachesize
)
235 if (cachesize
% blksize
!= 0) {
236 return "cache size must be divisible by block size";
237 } else if (cachesize
% (blksize
* assoc
) != 0) {
238 return "cache size must be divisible by set size (assoc * block size)";
244 static bool bad_cache_params(int blksize
, int assoc
, int cachesize
)
246 return (cachesize
% blksize
) != 0 || (cachesize
% (blksize
* assoc
) != 0);
249 static Cache
*cache_init(int blksize
, int assoc
, int cachesize
)
256 * This function shall not be called directly, and hence expects suitable
259 g_assert(!bad_cache_params(blksize
, assoc
, cachesize
));
261 cache
= g_new(Cache
, 1);
262 cache
->assoc
= assoc
;
263 cache
->cachesize
= cachesize
;
264 cache
->num_sets
= cachesize
/ (blksize
* assoc
);
265 cache
->sets
= g_new(CacheSet
, cache
->num_sets
);
266 cache
->blksize_shift
= pow_of_two(blksize
);
270 for (i
= 0; i
< cache
->num_sets
; i
++) {
271 cache
->sets
[i
].blocks
= g_new0(CacheBlock
, assoc
);
274 blk_mask
= blksize
- 1;
275 cache
->set_mask
= ((cache
->num_sets
- 1) << cache
->blksize_shift
);
276 cache
->tag_mask
= ~(cache
->set_mask
| blk_mask
);
279 metadata_init(cache
);
285 static Cache
**caches_init(int blksize
, int assoc
, int cachesize
)
290 if (bad_cache_params(blksize
, assoc
, cachesize
)) {
294 caches
= g_new(Cache
*, cores
);
296 for (i
= 0; i
< cores
; i
++) {
297 caches
[i
] = cache_init(blksize
, assoc
, cachesize
);
303 static int get_invalid_block(Cache
*cache
, uint64_t set
)
307 for (i
= 0; i
< cache
->assoc
; i
++) {
308 if (!cache
->sets
[set
].blocks
[i
].valid
) {
316 static int get_replaced_block(Cache
*cache
, int set
)
320 return g_rand_int_range(rng
, 0, cache
->assoc
);
322 return lru_get_lru_block(cache
, set
);
324 return fifo_get_first_block(cache
, set
);
326 g_assert_not_reached();
330 static int in_cache(Cache
*cache
, uint64_t addr
)
335 tag
= extract_tag(cache
, addr
);
336 set
= extract_set(cache
, addr
);
338 for (i
= 0; i
< cache
->assoc
; i
++) {
339 if (cache
->sets
[set
].blocks
[i
].tag
== tag
&&
340 cache
->sets
[set
].blocks
[i
].valid
) {
349 * access_cache(): Simulate a cache access
350 * @cache: The cache under simulation
351 * @addr: The address of the requested memory location
353 * Returns true if the requsted data is hit in the cache and false when missed.
354 * The cache is updated on miss for the next access.
356 static bool access_cache(Cache
*cache
, uint64_t addr
)
358 int hit_blk
, replaced_blk
;
361 tag
= extract_tag(cache
, addr
);
362 set
= extract_set(cache
, addr
);
364 hit_blk
= in_cache(cache
, addr
);
367 update_hit(cache
, set
, hit_blk
);
372 replaced_blk
= get_invalid_block(cache
, set
);
374 if (replaced_blk
== -1) {
375 replaced_blk
= get_replaced_block(cache
, set
);
379 update_miss(cache
, set
, replaced_blk
);
382 cache
->sets
[set
].blocks
[replaced_blk
].tag
= tag
;
383 cache
->sets
[set
].blocks
[replaced_blk
].valid
= true;
388 static void vcpu_mem_access(unsigned int vcpu_index
, qemu_plugin_meminfo_t info
,
389 uint64_t vaddr
, void *userdata
)
391 uint64_t effective_addr
;
392 struct qemu_plugin_hwaddr
*hwaddr
;
397 hwaddr
= qemu_plugin_get_hwaddr(info
, vaddr
);
398 if (hwaddr
&& qemu_plugin_hwaddr_is_io(hwaddr
)) {
402 effective_addr
= hwaddr
? qemu_plugin_hwaddr_phys_addr(hwaddr
) : vaddr
;
403 cache_idx
= vcpu_index
% cores
;
405 g_mutex_lock(&l1_dcache_locks
[cache_idx
]);
406 hit_in_l1
= access_cache(l1_dcaches
[cache_idx
], effective_addr
);
408 insn
= (InsnData
*) userdata
;
409 __atomic_fetch_add(&insn
->l1_dmisses
, 1, __ATOMIC_SEQ_CST
);
410 l1_dcaches
[cache_idx
]->misses
++;
412 l1_dcaches
[cache_idx
]->accesses
++;
413 g_mutex_unlock(&l1_dcache_locks
[cache_idx
]);
415 if (hit_in_l1
|| !use_l2
) {
416 /* No need to access L2 */
420 g_mutex_lock(&l2_ucache_locks
[cache_idx
]);
421 if (!access_cache(l2_ucaches
[cache_idx
], effective_addr
)) {
422 insn
= (InsnData
*) userdata
;
423 __atomic_fetch_add(&insn
->l2_misses
, 1, __ATOMIC_SEQ_CST
);
424 l2_ucaches
[cache_idx
]->misses
++;
426 l2_ucaches
[cache_idx
]->accesses
++;
427 g_mutex_unlock(&l2_ucache_locks
[cache_idx
]);
430 static void vcpu_insn_exec(unsigned int vcpu_index
, void *userdata
)
437 insn_addr
= ((InsnData
*) userdata
)->addr
;
439 cache_idx
= vcpu_index
% cores
;
440 g_mutex_lock(&l1_icache_locks
[cache_idx
]);
441 hit_in_l1
= access_cache(l1_icaches
[cache_idx
], insn_addr
);
443 insn
= (InsnData
*) userdata
;
444 __atomic_fetch_add(&insn
->l1_imisses
, 1, __ATOMIC_SEQ_CST
);
445 l1_icaches
[cache_idx
]->misses
++;
447 l1_icaches
[cache_idx
]->accesses
++;
448 g_mutex_unlock(&l1_icache_locks
[cache_idx
]);
450 if (hit_in_l1
|| !use_l2
) {
451 /* No need to access L2 */
455 g_mutex_lock(&l2_ucache_locks
[cache_idx
]);
456 if (!access_cache(l2_ucaches
[cache_idx
], insn_addr
)) {
457 insn
= (InsnData
*) userdata
;
458 __atomic_fetch_add(&insn
->l2_misses
, 1, __ATOMIC_SEQ_CST
);
459 l2_ucaches
[cache_idx
]->misses
++;
461 l2_ucaches
[cache_idx
]->accesses
++;
462 g_mutex_unlock(&l2_ucache_locks
[cache_idx
]);
465 static void vcpu_tb_trans(qemu_plugin_id_t id
, struct qemu_plugin_tb
*tb
)
471 n_insns
= qemu_plugin_tb_n_insns(tb
);
472 for (i
= 0; i
< n_insns
; i
++) {
473 struct qemu_plugin_insn
*insn
= qemu_plugin_tb_get_insn(tb
, i
);
474 uint64_t effective_addr
;
477 effective_addr
= (uint64_t) qemu_plugin_insn_haddr(insn
);
479 effective_addr
= (uint64_t) qemu_plugin_insn_vaddr(insn
);
483 * Instructions might get translated multiple times, we do not create
484 * new entries for those instructions. Instead, we fetch the same
485 * entry from the hash table and register it for the callback again.
487 g_mutex_lock(&hashtable_lock
);
488 data
= g_hash_table_lookup(miss_ht
, GUINT_TO_POINTER(effective_addr
));
490 data
= g_new0(InsnData
, 1);
491 data
->disas_str
= qemu_plugin_insn_disas(insn
);
492 data
->symbol
= qemu_plugin_insn_symbol(insn
);
493 data
->addr
= effective_addr
;
494 g_hash_table_insert(miss_ht
, GUINT_TO_POINTER(effective_addr
),
497 g_mutex_unlock(&hashtable_lock
);
499 qemu_plugin_register_vcpu_mem_cb(insn
, vcpu_mem_access
,
500 QEMU_PLUGIN_CB_NO_REGS
,
503 qemu_plugin_register_vcpu_insn_exec_cb(insn
, vcpu_insn_exec
,
504 QEMU_PLUGIN_CB_NO_REGS
, data
);
508 static void insn_free(gpointer data
)
510 InsnData
*insn
= (InsnData
*) data
;
511 g_free(insn
->disas_str
);
515 static void cache_free(Cache
*cache
)
517 for (int i
= 0; i
< cache
->num_sets
; i
++) {
518 g_free(cache
->sets
[i
].blocks
);
521 if (metadata_destroy
) {
522 metadata_destroy(cache
);
529 static void caches_free(Cache
**caches
)
533 for (i
= 0; i
< cores
; i
++) {
534 cache_free(caches
[i
]);
538 static void append_stats_line(GString
*line
, uint64_t l1_daccess
,
539 uint64_t l1_dmisses
, uint64_t l1_iaccess
,
540 uint64_t l1_imisses
, uint64_t l2_access
,
543 double l1_dmiss_rate
, l1_imiss_rate
, l2_miss_rate
;
545 l1_dmiss_rate
= ((double) l1_dmisses
) / (l1_daccess
) * 100.0;
546 l1_imiss_rate
= ((double) l1_imisses
) / (l1_iaccess
) * 100.0;
548 g_string_append_printf(line
, "%-14lu %-12lu %9.4lf%% %-14lu %-12lu"
552 l1_daccess
? l1_dmiss_rate
: 0.0,
555 l1_iaccess
? l1_imiss_rate
: 0.0);
558 l2_miss_rate
= ((double) l2_misses
) / (l2_access
) * 100.0;
559 g_string_append_printf(line
, " %-12lu %-11lu %10.4lf%%",
562 l2_access
? l2_miss_rate
: 0.0);
565 g_string_append(line
, "\n");
568 static void sum_stats(void)
573 for (i
= 0; i
< cores
; i
++) {
574 l1_imisses
+= l1_icaches
[i
]->misses
;
575 l1_dmisses
+= l1_dcaches
[i
]->misses
;
576 l1_imem_accesses
+= l1_icaches
[i
]->accesses
;
577 l1_dmem_accesses
+= l1_dcaches
[i
]->accesses
;
580 l2_misses
+= l2_ucaches
[i
]->misses
;
581 l2_mem_accesses
+= l2_ucaches
[i
]->accesses
;
586 static int dcmp(gconstpointer a
, gconstpointer b
)
588 InsnData
*insn_a
= (InsnData
*) a
;
589 InsnData
*insn_b
= (InsnData
*) b
;
591 return insn_a
->l1_dmisses
< insn_b
->l1_dmisses
? 1 : -1;
594 static int icmp(gconstpointer a
, gconstpointer b
)
596 InsnData
*insn_a
= (InsnData
*) a
;
597 InsnData
*insn_b
= (InsnData
*) b
;
599 return insn_a
->l1_imisses
< insn_b
->l1_imisses
? 1 : -1;
602 static int l2_cmp(gconstpointer a
, gconstpointer b
)
604 InsnData
*insn_a
= (InsnData
*) a
;
605 InsnData
*insn_b
= (InsnData
*) b
;
607 return insn_a
->l2_misses
< insn_b
->l2_misses
? 1 : -1;
610 static void log_stats(void)
613 Cache
*icache
, *dcache
, *l2_cache
;
615 g_autoptr(GString
) rep
= g_string_new("core #, data accesses, data misses,"
616 " dmiss rate, insn accesses,"
617 " insn misses, imiss rate");
620 g_string_append(rep
, ", l2 accesses, l2 misses, l2 miss rate");
623 g_string_append(rep
, "\n");
625 for (i
= 0; i
< cores
; i
++) {
626 g_string_append_printf(rep
, "%-8d", i
);
627 dcache
= l1_dcaches
[i
];
628 icache
= l1_icaches
[i
];
629 l2_cache
= use_l2
? l2_ucaches
[i
] : NULL
;
630 append_stats_line(rep
, dcache
->accesses
, dcache
->misses
,
631 icache
->accesses
, icache
->misses
,
632 l2_cache
? l2_cache
->accesses
: 0,
633 l2_cache
? l2_cache
->misses
: 0);
638 g_string_append_printf(rep
, "%-8s", "sum");
639 append_stats_line(rep
, l1_dmem_accesses
, l1_dmisses
,
640 l1_imem_accesses
, l1_imisses
,
641 l2_cache
? l2_mem_accesses
: 0, l2_cache
? l2_misses
: 0);
644 g_string_append(rep
, "\n");
645 qemu_plugin_outs(rep
->str
);
648 static void log_top_insns(void)
651 GList
*curr
, *miss_insns
;
654 miss_insns
= g_hash_table_get_values(miss_ht
);
655 miss_insns
= g_list_sort(miss_insns
, dcmp
);
656 g_autoptr(GString
) rep
= g_string_new("");
657 g_string_append_printf(rep
, "%s", "address, data misses, instruction\n");
659 for (curr
= miss_insns
, i
= 0; curr
&& i
< limit
; i
++, curr
= curr
->next
) {
660 insn
= (InsnData
*) curr
->data
;
661 g_string_append_printf(rep
, "0x%" PRIx64
, insn
->addr
);
663 g_string_append_printf(rep
, " (%s)", insn
->symbol
);
665 g_string_append_printf(rep
, ", %ld, %s\n", insn
->l1_dmisses
,
669 miss_insns
= g_list_sort(miss_insns
, icmp
);
670 g_string_append_printf(rep
, "%s", "\naddress, fetch misses, instruction\n");
672 for (curr
= miss_insns
, i
= 0; curr
&& i
< limit
; i
++, curr
= curr
->next
) {
673 insn
= (InsnData
*) curr
->data
;
674 g_string_append_printf(rep
, "0x%" PRIx64
, insn
->addr
);
676 g_string_append_printf(rep
, " (%s)", insn
->symbol
);
678 g_string_append_printf(rep
, ", %ld, %s\n", insn
->l1_imisses
,
686 miss_insns
= g_list_sort(miss_insns
, l2_cmp
);
687 g_string_append_printf(rep
, "%s", "\naddress, L2 misses, instruction\n");
689 for (curr
= miss_insns
, i
= 0; curr
&& i
< limit
; i
++, curr
= curr
->next
) {
690 insn
= (InsnData
*) curr
->data
;
691 g_string_append_printf(rep
, "0x%" PRIx64
, insn
->addr
);
693 g_string_append_printf(rep
, " (%s)", insn
->symbol
);
695 g_string_append_printf(rep
, ", %ld, %s\n", insn
->l2_misses
,
700 qemu_plugin_outs(rep
->str
);
701 g_list_free(miss_insns
);
704 static void plugin_exit(qemu_plugin_id_t id
, void *p
)
709 caches_free(l1_dcaches
);
710 caches_free(l1_icaches
);
712 g_free(l1_dcache_locks
);
713 g_free(l1_icache_locks
);
716 caches_free(l2_ucaches
);
717 g_free(l2_ucache_locks
);
720 g_hash_table_destroy(miss_ht
);
723 static void policy_init(void)
727 update_hit
= lru_update_blk
;
728 update_miss
= lru_update_blk
;
729 metadata_init
= lru_priorities_init
;
730 metadata_destroy
= lru_priorities_destroy
;
733 update_miss
= fifo_update_on_miss
;
734 metadata_init
= fifo_init
;
735 metadata_destroy
= fifo_destroy
;
741 g_assert_not_reached();
746 int qemu_plugin_install(qemu_plugin_id_t id
, const qemu_info_t
*info
,
747 int argc
, char **argv
)
750 int l1_iassoc
, l1_iblksize
, l1_icachesize
;
751 int l1_dassoc
, l1_dblksize
, l1_dcachesize
;
752 int l2_assoc
, l2_blksize
, l2_cachesize
;
755 sys
= info
->system_emulation
;
759 l1_dcachesize
= l1_dblksize
* l1_dassoc
* 32;
763 l1_icachesize
= l1_iblksize
* l1_iassoc
* 32;
767 l2_cachesize
= l2_assoc
* l2_blksize
* 2048;
771 cores
= sys
? qemu_plugin_n_vcpus() : 1;
773 for (i
= 0; i
< argc
; i
++) {
775 g_autofree
char **tokens
= g_strsplit(opt
, "=", 2);
777 if (g_strcmp0(tokens
[0], "iblksize") == 0) {
778 l1_iblksize
= STRTOLL(tokens
[1]);
779 } else if (g_strcmp0(tokens
[0], "iassoc") == 0) {
780 l1_iassoc
= STRTOLL(tokens
[1]);
781 } else if (g_strcmp0(tokens
[0], "icachesize") == 0) {
782 l1_icachesize
= STRTOLL(tokens
[1]);
783 } else if (g_strcmp0(tokens
[0], "dblksize") == 0) {
784 l1_dblksize
= STRTOLL(tokens
[1]);
785 } else if (g_strcmp0(tokens
[0], "dassoc") == 0) {
786 l1_dassoc
= STRTOLL(tokens
[1]);
787 } else if (g_strcmp0(tokens
[0], "dcachesize") == 0) {
788 l1_dcachesize
= STRTOLL(tokens
[1]);
789 } else if (g_strcmp0(tokens
[0], "limit") == 0) {
790 limit
= STRTOLL(tokens
[1]);
791 } else if (g_strcmp0(tokens
[0], "cores") == 0) {
792 cores
= STRTOLL(tokens
[1]);
793 } else if (g_strcmp0(tokens
[0], "l2cachesize") == 0) {
795 l2_cachesize
= STRTOLL(tokens
[1]);
796 } else if (g_strcmp0(tokens
[0], "l2blksize") == 0) {
798 l2_blksize
= STRTOLL(tokens
[1]);
799 } else if (g_strcmp0(tokens
[0], "l2assoc") == 0) {
801 l2_assoc
= STRTOLL(tokens
[1]);
802 } else if (g_strcmp0(tokens
[0], "l2") == 0) {
803 if (!qemu_plugin_bool_parse(tokens
[0], tokens
[1], &use_l2
)) {
804 fprintf(stderr
, "boolean argument parsing failed: %s\n", opt
);
807 } else if (g_strcmp0(tokens
[0], "evict") == 0) {
808 if (g_strcmp0(tokens
[1], "rand") == 0) {
810 } else if (g_strcmp0(tokens
[1], "lru") == 0) {
812 } else if (g_strcmp0(tokens
[1], "fifo") == 0) {
815 fprintf(stderr
, "invalid eviction policy: %s\n", opt
);
819 fprintf(stderr
, "option parsing failed: %s\n", opt
);
826 l1_dcaches
= caches_init(l1_dblksize
, l1_dassoc
, l1_dcachesize
);
828 const char *err
= cache_config_error(l1_dblksize
, l1_dassoc
, l1_dcachesize
);
829 fprintf(stderr
, "dcache cannot be constructed from given parameters\n");
830 fprintf(stderr
, "%s\n", err
);
834 l1_icaches
= caches_init(l1_iblksize
, l1_iassoc
, l1_icachesize
);
836 const char *err
= cache_config_error(l1_iblksize
, l1_iassoc
, l1_icachesize
);
837 fprintf(stderr
, "icache cannot be constructed from given parameters\n");
838 fprintf(stderr
, "%s\n", err
);
842 l2_ucaches
= use_l2
? caches_init(l2_blksize
, l2_assoc
, l2_cachesize
) : NULL
;
843 if (!l2_ucaches
&& use_l2
) {
844 const char *err
= cache_config_error(l2_blksize
, l2_assoc
, l2_cachesize
);
845 fprintf(stderr
, "L2 cache cannot be constructed from given parameters\n");
846 fprintf(stderr
, "%s\n", err
);
850 l1_dcache_locks
= g_new0(GMutex
, cores
);
851 l1_icache_locks
= g_new0(GMutex
, cores
);
852 l2_ucache_locks
= use_l2
? g_new0(GMutex
, cores
) : NULL
;
854 qemu_plugin_register_vcpu_tb_trans_cb(id
, vcpu_tb_trans
);
855 qemu_plugin_register_atexit_cb(id
, plugin_exit
, NULL
);
857 miss_ht
= g_hash_table_new_full(NULL
, g_direct_equal
, NULL
, insn_free
);