2 * HAX memory mapping operations
4 * Copyright (c) 2015-16 Intel Corporation
5 * Copyright 2016 Google, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "exec/address-spaces.h"
14 #include "exec/exec-all.h"
16 #include "target/i386/hax-i386.h"
17 #include "qemu/queue.h"
19 #define DEBUG_HAX_MEM 0
21 #define DPRINTF(fmt, ...) \
23 if (DEBUG_HAX_MEM) { \
24 fprintf(stdout, fmt, ## __VA_ARGS__); \
29 * HAXMapping: describes a pending guest physical memory mapping
31 * @start_pa: a guest physical address marking the start of the region; must be
33 * @size: a guest physical address marking the end of the region; must be
35 * @host_va: the host virtual address of the start of the mapping
36 * @flags: mapping parameters e.g. HAX_RAM_INFO_ROM or HAX_RAM_INFO_INVALID
37 * @entry: additional fields for linking #HAXMapping instances together
39 typedef struct HAXMapping
{
44 QTAILQ_ENTRY(HAXMapping
) entry
;
48 * A doubly-linked list (actually a tail queue) of the pending page mappings
49 * for the ongoing memory transaction.
51 * It is used to optimize the number of page mapping updates done through the
52 * kernel module. For example, it's effective when a driver is digging an MMIO
53 * hole inside an existing memory mapping. It will get a deletion of the whole
54 * region, then the addition of the 2 remaining RAM areas around the hole and
55 * finally the memory transaction commit. During the commit, it will effectively
56 * send to the kernel only the removal of the pages from the MMIO hole after
57 * having computed locally the result of the deletion and additions.
59 static QTAILQ_HEAD(HAXMappingListHead
, HAXMapping
) mappings
=
60 QTAILQ_HEAD_INITIALIZER(mappings
);
63 * hax_mapping_dump_list: dumps @mappings to stdout (for debugging)
65 static void hax_mapping_dump_list(void)
69 DPRINTF("%s updates:\n", __func__
);
70 QTAILQ_FOREACH(entry
, &mappings
, entry
) {
71 DPRINTF("\t%c 0x%016" PRIx64
"->0x%016" PRIx64
" VA 0x%016" PRIx64
72 "%s\n", entry
->flags
& HAX_RAM_INFO_INVALID
? '-' : '+',
73 entry
->start_pa
, entry
->start_pa
+ entry
->size
, entry
->host_va
,
74 entry
->flags
& HAX_RAM_INFO_ROM
? " ROM" : "");
78 static void hax_insert_mapping_before(HAXMapping
*next
, uint64_t start_pa
,
79 uint32_t size
, uint64_t host_va
,
84 entry
= g_malloc0(sizeof(*entry
));
85 entry
->start_pa
= start_pa
;
87 entry
->host_va
= host_va
;
90 QTAILQ_INSERT_TAIL(&mappings
, entry
, entry
);
92 QTAILQ_INSERT_BEFORE(next
, entry
, entry
);
96 static bool hax_mapping_is_opposite(HAXMapping
*entry
, uint64_t host_va
,
99 /* removed then added without change for the read-only flag */
100 bool nop_flags
= (entry
->flags
^ flags
) == HAX_RAM_INFO_INVALID
;
102 return (entry
->host_va
== host_va
) && nop_flags
;
105 static void hax_update_mapping(uint64_t start_pa
, uint32_t size
,
106 uint64_t host_va
, uint8_t flags
)
108 uint64_t end_pa
= start_pa
+ size
;
109 HAXMapping
*entry
, *next
;
111 QTAILQ_FOREACH_SAFE(entry
, &mappings
, entry
, next
) {
113 if (start_pa
>= entry
->start_pa
+ entry
->size
) {
116 if (start_pa
< entry
->start_pa
) {
117 chunk_sz
= end_pa
<= entry
->start_pa
? size
118 : entry
->start_pa
- start_pa
;
119 hax_insert_mapping_before(entry
, start_pa
, chunk_sz
,
121 start_pa
+= chunk_sz
;
124 } else if (start_pa
> entry
->start_pa
) {
125 /* split the existing chunk at start_pa */
126 chunk_sz
= start_pa
- entry
->start_pa
;
127 hax_insert_mapping_before(entry
, entry
->start_pa
, chunk_sz
,
128 entry
->host_va
, entry
->flags
);
129 entry
->start_pa
+= chunk_sz
;
130 entry
->host_va
+= chunk_sz
;
131 entry
->size
-= chunk_sz
;
133 /* now start_pa == entry->start_pa */
134 chunk_sz
= MIN(size
, entry
->size
);
136 bool nop
= hax_mapping_is_opposite(entry
, host_va
, flags
);
137 bool partial
= chunk_sz
< entry
->size
;
139 /* remove the beginning of the existing chunk */
140 entry
->start_pa
+= chunk_sz
;
141 entry
->host_va
+= chunk_sz
;
142 entry
->size
-= chunk_sz
;
144 hax_insert_mapping_before(entry
, start_pa
, chunk_sz
,
147 } else { /* affects the full mapping entry */
148 if (nop
) { /* no change to this mapping, remove it */
149 QTAILQ_REMOVE(&mappings
, entry
, entry
);
151 } else { /* update mapping properties */
152 entry
->host_va
= host_va
;
153 entry
->flags
= flags
;
156 start_pa
+= chunk_sz
;
160 if (!size
) { /* we are done */
164 if (size
) { /* add the leftover */
165 hax_insert_mapping_before(NULL
, start_pa
, size
, host_va
, flags
);
169 static void hax_process_section(MemoryRegionSection
*section
, uint8_t flags
)
171 MemoryRegion
*mr
= section
->mr
;
172 hwaddr start_pa
= section
->offset_within_address_space
;
173 ram_addr_t size
= int128_get64(section
->size
);
177 /* We only care about RAM and ROM regions */
178 if (!memory_region_is_ram(mr
)) {
179 if (memory_region_is_romd(mr
)) {
180 /* HAXM kernel module does not support ROMD yet */
181 fprintf(stderr
, "%s: Warning: Ignoring ROMD region 0x%016" PRIx64
182 "->0x%016" PRIx64
"\n", __func__
, start_pa
,
188 /* Adjust start_pa and size so that they are page-aligned. (Cf
189 * kvm_set_phys_mem() in kvm-all.c).
191 delta
= qemu_real_host_page_size
- (start_pa
& ~qemu_real_host_page_mask
);
192 delta
&= ~qemu_real_host_page_mask
;
198 size
&= qemu_real_host_page_mask
;
199 if (!size
|| (start_pa
& ~qemu_real_host_page_mask
)) {
203 host_va
= (uintptr_t)memory_region_get_ram_ptr(mr
)
204 + section
->offset_within_region
+ delta
;
205 if (memory_region_is_rom(section
->mr
)) {
206 flags
|= HAX_RAM_INFO_ROM
;
209 /* the kernel module interface uses 32-bit sizes (but we could split...) */
210 g_assert(size
<= UINT32_MAX
);
212 hax_update_mapping(start_pa
, size
, host_va
, flags
);
215 static void hax_region_add(MemoryListener
*listener
,
216 MemoryRegionSection
*section
)
218 memory_region_ref(section
->mr
);
219 hax_process_section(section
, 0);
222 static void hax_region_del(MemoryListener
*listener
,
223 MemoryRegionSection
*section
)
225 hax_process_section(section
, HAX_RAM_INFO_INVALID
);
226 memory_region_unref(section
->mr
);
229 static void hax_transaction_begin(MemoryListener
*listener
)
231 g_assert(QTAILQ_EMPTY(&mappings
));
234 static void hax_transaction_commit(MemoryListener
*listener
)
236 if (!QTAILQ_EMPTY(&mappings
)) {
237 HAXMapping
*entry
, *next
;
240 hax_mapping_dump_list();
242 QTAILQ_FOREACH_SAFE(entry
, &mappings
, entry
, next
) {
243 if (entry
->flags
& HAX_RAM_INFO_INVALID
) {
244 /* for unmapping, put the values expected by the kernel */
245 entry
->flags
= HAX_RAM_INFO_INVALID
;
248 if (hax_set_ram(entry
->start_pa
, entry
->size
,
249 entry
->host_va
, entry
->flags
)) {
250 fprintf(stderr
, "%s: Failed mapping @0x%016" PRIx64
"+0x%"
251 PRIx32
" flags %02x\n", __func__
, entry
->start_pa
,
252 entry
->size
, entry
->flags
);
254 QTAILQ_REMOVE(&mappings
, entry
, entry
);
260 /* currently we fake the dirty bitmap sync, always dirty */
261 static void hax_log_sync(MemoryListener
*listener
,
262 MemoryRegionSection
*section
)
264 MemoryRegion
*mr
= section
->mr
;
266 if (!memory_region_is_ram(mr
)) {
267 /* Skip MMIO regions */
271 memory_region_set_dirty(mr
, 0, int128_get64(section
->size
));
274 static MemoryListener hax_memory_listener
= {
275 .begin
= hax_transaction_begin
,
276 .commit
= hax_transaction_commit
,
277 .region_add
= hax_region_add
,
278 .region_del
= hax_region_del
,
279 .log_sync
= hax_log_sync
,
283 static void hax_ram_block_added(RAMBlockNotifier
*n
, void *host
, size_t size
)
286 * In HAX, QEMU allocates the virtual address, and HAX kernel
287 * populates the memory with physical memory. Currently we have no
288 * paging, so user should make sure enough free memory in advance.
290 if (hax_populate_ram((uint64_t)(uintptr_t)host
, size
) < 0) {
291 fprintf(stderr
, "HAX failed to populate RAM");
296 static struct RAMBlockNotifier hax_ram_notifier
= {
297 .ram_block_added
= hax_ram_block_added
,
300 void hax_memory_init(void)
302 ram_block_notifier_add(&hax_ram_notifier
);
303 memory_listener_register(&hax_memory_listener
, &address_space_memory
);