4 * Copyright Fujitsu, Corp. 2011, 2012
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
17 #include "sysemu/memory_mapping.h"
18 #include "exec/memory.h"
19 #include "exec/address-spaces.h"
21 //#define DEBUG_GUEST_PHYS_REGION_ADD
23 static void memory_mapping_list_add_mapping_sorted(MemoryMappingList
*list
,
24 MemoryMapping
*mapping
)
28 QTAILQ_FOREACH(p
, &list
->head
, next
) {
29 if (p
->phys_addr
>= mapping
->phys_addr
) {
30 QTAILQ_INSERT_BEFORE(p
, mapping
, next
);
34 QTAILQ_INSERT_TAIL(&list
->head
, mapping
, next
);
37 static void create_new_memory_mapping(MemoryMappingList
*list
,
42 MemoryMapping
*memory_mapping
;
44 memory_mapping
= g_malloc(sizeof(MemoryMapping
));
45 memory_mapping
->phys_addr
= phys_addr
;
46 memory_mapping
->virt_addr
= virt_addr
;
47 memory_mapping
->length
= length
;
48 list
->last_mapping
= memory_mapping
;
50 memory_mapping_list_add_mapping_sorted(list
, memory_mapping
);
53 static inline bool mapping_contiguous(MemoryMapping
*map
,
57 return phys_addr
== map
->phys_addr
+ map
->length
&&
58 virt_addr
== map
->virt_addr
+ map
->length
;
62 * [map->phys_addr, map->phys_addr + map->length) and
63 * [phys_addr, phys_addr + length) have intersection?
65 static inline bool mapping_have_same_region(MemoryMapping
*map
,
69 return !(phys_addr
+ length
< map
->phys_addr
||
70 phys_addr
>= map
->phys_addr
+ map
->length
);
74 * [map->phys_addr, map->phys_addr + map->length) and
75 * [phys_addr, phys_addr + length) have intersection. The virtual address in the
76 * intersection are the same?
78 static inline bool mapping_conflict(MemoryMapping
*map
,
82 return virt_addr
- map
->virt_addr
!= phys_addr
- map
->phys_addr
;
86 * [map->virt_addr, map->virt_addr + map->length) and
87 * [virt_addr, virt_addr + length) have intersection. And the physical address
88 * in the intersection are the same.
90 static inline void mapping_merge(MemoryMapping
*map
,
94 if (virt_addr
< map
->virt_addr
) {
95 map
->length
+= map
->virt_addr
- virt_addr
;
96 map
->virt_addr
= virt_addr
;
99 if ((virt_addr
+ length
) >
100 (map
->virt_addr
+ map
->length
)) {
101 map
->length
= virt_addr
+ length
- map
->virt_addr
;
105 void memory_mapping_list_add_merge_sorted(MemoryMappingList
*list
,
110 MemoryMapping
*memory_mapping
, *last_mapping
;
112 if (QTAILQ_EMPTY(&list
->head
)) {
113 create_new_memory_mapping(list
, phys_addr
, virt_addr
, length
);
117 last_mapping
= list
->last_mapping
;
119 if (mapping_contiguous(last_mapping
, phys_addr
, virt_addr
)) {
120 last_mapping
->length
+= length
;
125 QTAILQ_FOREACH(memory_mapping
, &list
->head
, next
) {
126 if (mapping_contiguous(memory_mapping
, phys_addr
, virt_addr
)) {
127 memory_mapping
->length
+= length
;
128 list
->last_mapping
= memory_mapping
;
132 if (phys_addr
+ length
< memory_mapping
->phys_addr
) {
133 /* create a new region before memory_mapping */
137 if (mapping_have_same_region(memory_mapping
, phys_addr
, length
)) {
138 if (mapping_conflict(memory_mapping
, phys_addr
, virt_addr
)) {
142 /* merge this region into memory_mapping */
143 mapping_merge(memory_mapping
, virt_addr
, length
);
144 list
->last_mapping
= memory_mapping
;
149 /* this region can not be merged into any existed memory mapping. */
150 create_new_memory_mapping(list
, phys_addr
, virt_addr
, length
);
153 void memory_mapping_list_free(MemoryMappingList
*list
)
155 MemoryMapping
*p
, *q
;
157 QTAILQ_FOREACH_SAFE(p
, &list
->head
, next
, q
) {
158 QTAILQ_REMOVE(&list
->head
, p
, next
);
163 list
->last_mapping
= NULL
;
166 void memory_mapping_list_init(MemoryMappingList
*list
)
169 list
->last_mapping
= NULL
;
170 QTAILQ_INIT(&list
->head
);
173 void guest_phys_blocks_free(GuestPhysBlockList
*list
)
175 GuestPhysBlock
*p
, *q
;
177 QTAILQ_FOREACH_SAFE(p
, &list
->head
, next
, q
) {
178 QTAILQ_REMOVE(&list
->head
, p
, next
);
179 memory_region_unref(p
->mr
);
185 void guest_phys_blocks_init(GuestPhysBlockList
*list
)
188 QTAILQ_INIT(&list
->head
);
191 typedef struct GuestPhysListener
{
192 GuestPhysBlockList
*list
;
193 MemoryListener listener
;
196 static void guest_phys_block_add_section(GuestPhysListener
*g
,
197 MemoryRegionSection
*section
)
199 const hwaddr target_start
= section
->offset_within_address_space
;
200 const hwaddr target_end
= target_start
+ int128_get64(section
->size
);
201 uint8_t *host_addr
= memory_region_get_ram_ptr(section
->mr
) +
202 section
->offset_within_region
;
203 GuestPhysBlock
*predecessor
= NULL
;
205 /* find continuity in guest physical address space */
206 if (!QTAILQ_EMPTY(&g
->list
->head
)) {
207 hwaddr predecessor_size
;
209 predecessor
= QTAILQ_LAST(&g
->list
->head
);
210 predecessor_size
= predecessor
->target_end
- predecessor
->target_start
;
212 /* the memory API guarantees monotonically increasing traversal */
213 g_assert(predecessor
->target_end
<= target_start
);
215 /* we want continuity in both guest-physical and host-virtual memory */
216 if (predecessor
->target_end
< target_start
||
217 predecessor
->host_addr
+ predecessor_size
!= host_addr
||
218 predecessor
->mr
!= section
->mr
) {
223 if (predecessor
== NULL
) {
224 /* isolated mapping, allocate it and add it to the list */
225 GuestPhysBlock
*block
= g_malloc0(sizeof *block
);
227 block
->target_start
= target_start
;
228 block
->target_end
= target_end
;
229 block
->host_addr
= host_addr
;
230 block
->mr
= section
->mr
;
231 memory_region_ref(section
->mr
);
233 QTAILQ_INSERT_TAIL(&g
->list
->head
, block
, next
);
236 /* expand predecessor until @target_end; predecessor's start doesn't
239 predecessor
->target_end
= target_end
;
242 #ifdef DEBUG_GUEST_PHYS_REGION_ADD
243 fprintf(stderr
, "%s: target_start=" TARGET_FMT_plx
" target_end="
244 TARGET_FMT_plx
": %s (count: %u)\n", __func__
, target_start
,
245 target_end
, predecessor
? "joined" : "added", g
->list
->num
);
249 static int guest_phys_ram_populate_cb(MemoryRegionSection
*section
,
252 GuestPhysListener
*g
= opaque
;
254 guest_phys_block_add_section(g
, section
);
258 static void guest_phys_blocks_region_add(MemoryListener
*listener
,
259 MemoryRegionSection
*section
)
261 GuestPhysListener
*g
= container_of(listener
, GuestPhysListener
, listener
);
263 /* we only care about RAM */
264 if (!memory_region_is_ram(section
->mr
) ||
265 memory_region_is_ram_device(section
->mr
) ||
266 memory_region_is_nonvolatile(section
->mr
)) {
270 /* for special sparse regions, only add populated parts */
271 if (memory_region_has_ram_discard_manager(section
->mr
)) {
272 RamDiscardManager
*rdm
;
274 rdm
= memory_region_get_ram_discard_manager(section
->mr
);
275 ram_discard_manager_replay_populated(rdm
, section
,
276 guest_phys_ram_populate_cb
, g
);
280 guest_phys_block_add_section(g
, section
);
283 void guest_phys_blocks_append(GuestPhysBlockList
*list
)
285 GuestPhysListener g
= { 0 };
288 g
.listener
.region_add
= &guest_phys_blocks_region_add
;
289 memory_listener_register(&g
.listener
, &address_space_memory
);
290 memory_listener_unregister(&g
.listener
);
293 static CPUState
*find_paging_enabled_cpu(CPUState
*start_cpu
)
298 if (cpu_paging_enabled(cpu
)) {
306 void qemu_get_guest_memory_mapping(MemoryMappingList
*list
,
307 const GuestPhysBlockList
*guest_phys_blocks
,
310 CPUState
*cpu
, *first_paging_enabled_cpu
;
311 GuestPhysBlock
*block
;
312 ram_addr_t offset
, length
;
314 first_paging_enabled_cpu
= find_paging_enabled_cpu(first_cpu
);
315 if (first_paging_enabled_cpu
) {
316 for (cpu
= first_paging_enabled_cpu
; cpu
!= NULL
;
317 cpu
= CPU_NEXT(cpu
)) {
319 cpu_get_memory_mapping(cpu
, list
, &err
);
321 error_propagate(errp
, err
);
329 * If the guest doesn't use paging, the virtual address is equal to physical
332 QTAILQ_FOREACH(block
, &guest_phys_blocks
->head
, next
) {
333 offset
= block
->target_start
;
334 length
= block
->target_end
- block
->target_start
;
335 create_new_memory_mapping(list
, offset
, offset
, length
);
339 void qemu_get_guest_simple_memory_mapping(MemoryMappingList
*list
,
340 const GuestPhysBlockList
*guest_phys_blocks
)
342 GuestPhysBlock
*block
;
344 QTAILQ_FOREACH(block
, &guest_phys_blocks
->head
, next
) {
345 create_new_memory_mapping(list
, block
->target_start
, 0,
346 block
->target_end
- block
->target_start
);
350 void memory_mapping_filter(MemoryMappingList
*list
, int64_t begin
,
353 MemoryMapping
*cur
, *next
;
355 QTAILQ_FOREACH_SAFE(cur
, &list
->head
, next
, next
) {
356 if (cur
->phys_addr
>= begin
+ length
||
357 cur
->phys_addr
+ cur
->length
<= begin
) {
358 QTAILQ_REMOVE(&list
->head
, cur
, next
);
364 if (cur
->phys_addr
< begin
) {
365 cur
->length
-= begin
- cur
->phys_addr
;
366 if (cur
->virt_addr
) {
367 cur
->virt_addr
+= begin
- cur
->phys_addr
;
369 cur
->phys_addr
= begin
;
372 if (cur
->phys_addr
+ cur
->length
> begin
+ length
) {
373 cur
->length
-= cur
->phys_addr
+ cur
->length
- begin
- length
;