2 * HAX memory mapping operations
4 * Copyright (c) 2015-16 Intel Corporation
5 * Copyright 2016 Google, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "exec/address-spaces.h"
14 #include "exec/exec-all.h"
15 #include "qemu/error-report.h"
17 #include "target/i386/hax-i386.h"
18 #include "qemu/queue.h"
20 #define DEBUG_HAX_MEM 0
22 #define DPRINTF(fmt, ...) \
24 if (DEBUG_HAX_MEM) { \
25 fprintf(stdout, fmt, ## __VA_ARGS__); \
30 * HAXMapping: describes a pending guest physical memory mapping
32 * @start_pa: a guest physical address marking the start of the region; must be
34 * @size: a guest physical address marking the end of the region; must be
36 * @host_va: the host virtual address of the start of the mapping
37 * @flags: mapping parameters e.g. HAX_RAM_INFO_ROM or HAX_RAM_INFO_INVALID
38 * @entry: additional fields for linking #HAXMapping instances together
40 typedef struct HAXMapping
{
45 QTAILQ_ENTRY(HAXMapping
) entry
;
49 * A doubly-linked list (actually a tail queue) of the pending page mappings
50 * for the ongoing memory transaction.
52 * It is used to optimize the number of page mapping updates done through the
53 * kernel module. For example, it's effective when a driver is digging an MMIO
54 * hole inside an existing memory mapping. It will get a deletion of the whole
55 * region, then the addition of the 2 remaining RAM areas around the hole and
56 * finally the memory transaction commit. During the commit, it will effectively
57 * send to the kernel only the removal of the pages from the MMIO hole after
58 * having computed locally the result of the deletion and additions.
60 static QTAILQ_HEAD(HAXMappingListHead
, HAXMapping
) mappings
=
61 QTAILQ_HEAD_INITIALIZER(mappings
);
64 * hax_mapping_dump_list: dumps @mappings to stdout (for debugging)
66 static void hax_mapping_dump_list(void)
70 DPRINTF("%s updates:\n", __func__
);
71 QTAILQ_FOREACH(entry
, &mappings
, entry
) {
72 DPRINTF("\t%c 0x%016" PRIx64
"->0x%016" PRIx64
" VA 0x%016" PRIx64
73 "%s\n", entry
->flags
& HAX_RAM_INFO_INVALID
? '-' : '+',
74 entry
->start_pa
, entry
->start_pa
+ entry
->size
, entry
->host_va
,
75 entry
->flags
& HAX_RAM_INFO_ROM
? " ROM" : "");
79 static void hax_insert_mapping_before(HAXMapping
*next
, uint64_t start_pa
,
80 uint32_t size
, uint64_t host_va
,
85 entry
= g_malloc0(sizeof(*entry
));
86 entry
->start_pa
= start_pa
;
88 entry
->host_va
= host_va
;
91 QTAILQ_INSERT_TAIL(&mappings
, entry
, entry
);
93 QTAILQ_INSERT_BEFORE(next
, entry
, entry
);
97 static bool hax_mapping_is_opposite(HAXMapping
*entry
, uint64_t host_va
,
100 /* removed then added without change for the read-only flag */
101 bool nop_flags
= (entry
->flags
^ flags
) == HAX_RAM_INFO_INVALID
;
103 return (entry
->host_va
== host_va
) && nop_flags
;
106 static void hax_update_mapping(uint64_t start_pa
, uint32_t size
,
107 uint64_t host_va
, uint8_t flags
)
109 uint64_t end_pa
= start_pa
+ size
;
110 HAXMapping
*entry
, *next
;
112 QTAILQ_FOREACH_SAFE(entry
, &mappings
, entry
, next
) {
114 if (start_pa
>= entry
->start_pa
+ entry
->size
) {
117 if (start_pa
< entry
->start_pa
) {
118 chunk_sz
= end_pa
<= entry
->start_pa
? size
119 : entry
->start_pa
- start_pa
;
120 hax_insert_mapping_before(entry
, start_pa
, chunk_sz
,
122 start_pa
+= chunk_sz
;
125 } else if (start_pa
> entry
->start_pa
) {
126 /* split the existing chunk at start_pa */
127 chunk_sz
= start_pa
- entry
->start_pa
;
128 hax_insert_mapping_before(entry
, entry
->start_pa
, chunk_sz
,
129 entry
->host_va
, entry
->flags
);
130 entry
->start_pa
+= chunk_sz
;
131 entry
->host_va
+= chunk_sz
;
132 entry
->size
-= chunk_sz
;
134 /* now start_pa == entry->start_pa */
135 chunk_sz
= MIN(size
, entry
->size
);
137 bool nop
= hax_mapping_is_opposite(entry
, host_va
, flags
);
138 bool partial
= chunk_sz
< entry
->size
;
140 /* remove the beginning of the existing chunk */
141 entry
->start_pa
+= chunk_sz
;
142 entry
->host_va
+= chunk_sz
;
143 entry
->size
-= chunk_sz
;
145 hax_insert_mapping_before(entry
, start_pa
, chunk_sz
,
148 } else { /* affects the full mapping entry */
149 if (nop
) { /* no change to this mapping, remove it */
150 QTAILQ_REMOVE(&mappings
, entry
, entry
);
152 } else { /* update mapping properties */
153 entry
->host_va
= host_va
;
154 entry
->flags
= flags
;
157 start_pa
+= chunk_sz
;
161 if (!size
) { /* we are done */
165 if (size
) { /* add the leftover */
166 hax_insert_mapping_before(NULL
, start_pa
, size
, host_va
, flags
);
170 static void hax_process_section(MemoryRegionSection
*section
, uint8_t flags
)
172 MemoryRegion
*mr
= section
->mr
;
173 hwaddr start_pa
= section
->offset_within_address_space
;
174 ram_addr_t size
= int128_get64(section
->size
);
178 /* We only care about RAM and ROM regions */
179 if (!memory_region_is_ram(mr
)) {
180 if (memory_region_is_romd(mr
)) {
181 /* HAXM kernel module does not support ROMD yet */
182 warn_report("Ignoring ROMD region 0x%016" PRIx64
"->0x%016" PRIx64
,
183 start_pa
, start_pa
+ size
);
188 /* Adjust start_pa and size so that they are page-aligned. (Cf
189 * kvm_set_phys_mem() in kvm-all.c).
191 delta
= qemu_real_host_page_size
- (start_pa
& ~qemu_real_host_page_mask
);
192 delta
&= ~qemu_real_host_page_mask
;
198 size
&= qemu_real_host_page_mask
;
199 if (!size
|| (start_pa
& ~qemu_real_host_page_mask
)) {
203 host_va
= (uintptr_t)memory_region_get_ram_ptr(mr
)
204 + section
->offset_within_region
+ delta
;
205 if (memory_region_is_rom(section
->mr
)) {
206 flags
|= HAX_RAM_INFO_ROM
;
209 /* the kernel module interface uses 32-bit sizes (but we could split...) */
210 g_assert(size
<= UINT32_MAX
);
212 hax_update_mapping(start_pa
, size
, host_va
, flags
);
215 static void hax_region_add(MemoryListener
*listener
,
216 MemoryRegionSection
*section
)
218 memory_region_ref(section
->mr
);
219 hax_process_section(section
, 0);
222 static void hax_region_del(MemoryListener
*listener
,
223 MemoryRegionSection
*section
)
225 hax_process_section(section
, HAX_RAM_INFO_INVALID
);
226 memory_region_unref(section
->mr
);
229 static void hax_transaction_begin(MemoryListener
*listener
)
231 g_assert(QTAILQ_EMPTY(&mappings
));
234 static void hax_transaction_commit(MemoryListener
*listener
)
236 if (!QTAILQ_EMPTY(&mappings
)) {
237 HAXMapping
*entry
, *next
;
240 hax_mapping_dump_list();
242 QTAILQ_FOREACH_SAFE(entry
, &mappings
, entry
, next
) {
243 if (entry
->flags
& HAX_RAM_INFO_INVALID
) {
244 /* for unmapping, put the values expected by the kernel */
245 entry
->flags
= HAX_RAM_INFO_INVALID
;
248 if (hax_set_ram(entry
->start_pa
, entry
->size
,
249 entry
->host_va
, entry
->flags
)) {
250 fprintf(stderr
, "%s: Failed mapping @0x%016" PRIx64
"+0x%"
251 PRIx32
" flags %02x\n", __func__
, entry
->start_pa
,
252 entry
->size
, entry
->flags
);
254 QTAILQ_REMOVE(&mappings
, entry
, entry
);
260 /* currently we fake the dirty bitmap sync, always dirty */
261 static void hax_log_sync(MemoryListener
*listener
,
262 MemoryRegionSection
*section
)
264 MemoryRegion
*mr
= section
->mr
;
266 if (!memory_region_is_ram(mr
)) {
267 /* Skip MMIO regions */
271 memory_region_set_dirty(mr
, 0, int128_get64(section
->size
));
274 static MemoryListener hax_memory_listener
= {
275 .begin
= hax_transaction_begin
,
276 .commit
= hax_transaction_commit
,
277 .region_add
= hax_region_add
,
278 .region_del
= hax_region_del
,
279 .log_sync
= hax_log_sync
,
283 static void hax_ram_block_added(RAMBlockNotifier
*n
, void *host
, size_t size
)
286 * In HAX, QEMU allocates the virtual address, and HAX kernel
287 * populates the memory with physical memory. Currently we have no
288 * paging, so user should make sure enough free memory in advance.
290 if (hax_populate_ram((uint64_t)(uintptr_t)host
, size
) < 0) {
291 fprintf(stderr
, "HAX failed to populate RAM");
296 static struct RAMBlockNotifier hax_ram_notifier
= {
297 .ram_block_added
= hax_ram_block_added
,
300 void hax_memory_init(void)
302 ram_block_notifier_add(&hax_ram_notifier
);
303 memory_listener_register(&hax_memory_listener
, &address_space_memory
);