2 * HAX memory mapping operations
4 * Copyright (c) 2015-16 Intel Corporation
5 * Copyright 2016 Google, Inc.
7 * This work is licensed under the terms of the GNU GPL, version 2. See
8 * the COPYING file in the top-level directory.
11 #include "qemu/osdep.h"
13 #include "exec/address-spaces.h"
14 #include "exec/exec-all.h"
16 #include "target/i386/hax-i386.h"
17 #include "qemu/queue.h"
19 #define DEBUG_HAX_MEM 0
21 #define DPRINTF(fmt, ...) \
23 if (DEBUG_HAX_MEM) { \
24 fprintf(stdout, fmt, ## __VA_ARGS__); \
29 * HAXMapping: describes a pending guest physical memory mapping
31 * @start_pa: a guest physical address marking the start of the region; must be
33 * @size: a guest physical address marking the end of the region; must be
35 * @host_va: the host virtual address of the start of the mapping
36 * @flags: mapping parameters e.g. HAX_RAM_INFO_ROM or HAX_RAM_INFO_INVALID
37 * @entry: additional fields for linking #HAXMapping instances together
39 typedef struct HAXMapping
{
44 QTAILQ_ENTRY(HAXMapping
) entry
;
48 * A doubly-linked list (actually a tail queue) of the pending page mappings
49 * for the ongoing memory transaction.
51 * It is used to optimize the number of page mapping updates done through the
52 * kernel module. For example, it's effective when a driver is digging an MMIO
53 * hole inside an existing memory mapping. It will get a deletion of the whole
54 * region, then the addition of the 2 remaining RAM areas around the hole and
55 * finally the memory transaction commit. During the commit, it will effectively
56 * send to the kernel only the removal of the pages from the MMIO hole after
57 * having computed locally the result of the deletion and additions.
59 static QTAILQ_HEAD(HAXMappingListHead
, HAXMapping
) mappings
=
60 QTAILQ_HEAD_INITIALIZER(mappings
);
63 * hax_mapping_dump_list: dumps @mappings to stdout (for debugging)
65 static void hax_mapping_dump_list(void)
69 DPRINTF("%s updates:\n", __func__
);
70 QTAILQ_FOREACH(entry
, &mappings
, entry
) {
71 DPRINTF("\t%c 0x%016" PRIx64
"->0x%016" PRIx64
" VA 0x%016" PRIx64
72 "%s\n", entry
->flags
& HAX_RAM_INFO_INVALID
? '-' : '+',
73 entry
->start_pa
, entry
->start_pa
+ entry
->size
, entry
->host_va
,
74 entry
->flags
& HAX_RAM_INFO_ROM
? " ROM" : "");
78 static void hax_insert_mapping_before(HAXMapping
*next
, uint64_t start_pa
,
79 uint32_t size
, uint64_t host_va
,
84 entry
= g_malloc0(sizeof(*entry
));
85 entry
->start_pa
= start_pa
;
87 entry
->host_va
= host_va
;
90 QTAILQ_INSERT_TAIL(&mappings
, entry
, entry
);
92 QTAILQ_INSERT_BEFORE(next
, entry
, entry
);
96 static bool hax_mapping_is_opposite(HAXMapping
*entry
, uint64_t host_va
,
99 /* removed then added without change for the read-only flag */
100 bool nop_flags
= (entry
->flags
^ flags
) == HAX_RAM_INFO_INVALID
;
102 return (entry
->host_va
== host_va
) && nop_flags
;
105 static void hax_update_mapping(uint64_t start_pa
, uint32_t size
,
106 uint64_t host_va
, uint8_t flags
)
108 uint64_t end_pa
= start_pa
+ size
;
110 HAXMapping
*entry
, *next
;
112 QTAILQ_FOREACH_SAFE(entry
, &mappings
, entry
, next
) {
113 if (start_pa
>= entry
->start_pa
+ entry
->size
) {
116 if (start_pa
< entry
->start_pa
) {
117 chunk_sz
= end_pa
<= entry
->start_pa
? size
118 : entry
->start_pa
- start_pa
;
119 hax_insert_mapping_before(entry
, start_pa
, chunk_sz
,
121 start_pa
+= chunk_sz
;
125 chunk_sz
= MIN(size
, entry
->size
);
127 bool nop
= hax_mapping_is_opposite(entry
, host_va
, flags
);
128 bool partial
= chunk_sz
< entry
->size
;
130 /* remove the beginning of the existing chunk */
131 entry
->start_pa
+= chunk_sz
;
132 entry
->host_va
+= chunk_sz
;
133 entry
->size
-= chunk_sz
;
135 hax_insert_mapping_before(entry
, start_pa
, chunk_sz
,
138 } else { /* affects the full mapping entry */
139 if (nop
) { /* no change to this mapping, remove it */
140 QTAILQ_REMOVE(&mappings
, entry
, entry
);
142 } else { /* update mapping properties */
143 entry
->host_va
= host_va
;
144 entry
->flags
= flags
;
147 start_pa
+= chunk_sz
;
151 if (!size
) { /* we are done */
155 if (size
) { /* add the leftover */
156 hax_insert_mapping_before(NULL
, start_pa
, size
, host_va
, flags
);
160 static void hax_process_section(MemoryRegionSection
*section
, uint8_t flags
)
162 MemoryRegion
*mr
= section
->mr
;
163 hwaddr start_pa
= section
->offset_within_address_space
;
164 ram_addr_t size
= int128_get64(section
->size
);
168 /* We only care about RAM pages */
169 if (!memory_region_is_ram(mr
)) {
173 /* Adjust start_pa and size so that they are page-aligned. (Cf
174 * kvm_set_phys_mem() in kvm-all.c).
176 delta
= qemu_real_host_page_size
- (start_pa
& ~qemu_real_host_page_mask
);
177 delta
&= ~qemu_real_host_page_mask
;
183 size
&= qemu_real_host_page_mask
;
184 if (!size
|| (start_pa
& ~qemu_real_host_page_mask
)) {
188 host_va
= (uintptr_t)memory_region_get_ram_ptr(mr
)
189 + section
->offset_within_region
+ delta
;
190 if (memory_region_is_rom(section
->mr
)) {
191 flags
|= HAX_RAM_INFO_ROM
;
194 /* the kernel module interface uses 32-bit sizes (but we could split...) */
195 g_assert(size
<= UINT32_MAX
);
197 hax_update_mapping(start_pa
, size
, host_va
, flags
);
200 static void hax_region_add(MemoryListener
*listener
,
201 MemoryRegionSection
*section
)
203 memory_region_ref(section
->mr
);
204 hax_process_section(section
, 0);
207 static void hax_region_del(MemoryListener
*listener
,
208 MemoryRegionSection
*section
)
210 hax_process_section(section
, HAX_RAM_INFO_INVALID
);
211 memory_region_unref(section
->mr
);
214 static void hax_transaction_begin(MemoryListener
*listener
)
216 g_assert(QTAILQ_EMPTY(&mappings
));
219 static void hax_transaction_commit(MemoryListener
*listener
)
221 if (!QTAILQ_EMPTY(&mappings
)) {
222 HAXMapping
*entry
, *next
;
225 hax_mapping_dump_list();
227 QTAILQ_FOREACH_SAFE(entry
, &mappings
, entry
, next
) {
228 if (entry
->flags
& HAX_RAM_INFO_INVALID
) {
229 /* for unmapping, put the values expected by the kernel */
230 entry
->flags
= HAX_RAM_INFO_INVALID
;
233 if (hax_set_ram(entry
->start_pa
, entry
->size
,
234 entry
->host_va
, entry
->flags
)) {
235 fprintf(stderr
, "%s: Failed mapping @0x%016" PRIx64
"+0x%"
236 PRIx32
" flags %02x\n", __func__
, entry
->start_pa
,
237 entry
->size
, entry
->flags
);
239 QTAILQ_REMOVE(&mappings
, entry
, entry
);
245 /* currently we fake the dirty bitmap sync, always dirty */
246 static void hax_log_sync(MemoryListener
*listener
,
247 MemoryRegionSection
*section
)
249 MemoryRegion
*mr
= section
->mr
;
251 if (!memory_region_is_ram(mr
)) {
252 /* Skip MMIO regions */
256 memory_region_set_dirty(mr
, 0, int128_get64(section
->size
));
259 static MemoryListener hax_memory_listener
= {
260 .begin
= hax_transaction_begin
,
261 .commit
= hax_transaction_commit
,
262 .region_add
= hax_region_add
,
263 .region_del
= hax_region_del
,
264 .log_sync
= hax_log_sync
,
268 static void hax_ram_block_added(RAMBlockNotifier
*n
, void *host
, size_t size
)
271 * In HAX, QEMU allocates the virtual address, and HAX kernel
272 * populates the memory with physical memory. Currently we have no
273 * paging, so user should make sure enough free memory in advance.
275 if (hax_populate_ram((uint64_t)(uintptr_t)host
, size
) < 0) {
276 fprintf(stderr
, "HAX failed to populate RAM");
281 static struct RAMBlockNotifier hax_ram_notifier
= {
282 .ram_block_added
= hax_ram_block_added
,
285 void hax_memory_init(void)
287 ram_block_notifier_add(&hax_ram_notifier
);
288 memory_listener_register(&hax_memory_listener
, &address_space_memory
);