4 * Copyright Fujitsu, Corp. 2011, 2012
7 * Wen Congyang <wency@cn.fujitsu.com>
9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
10 * See the COPYING file in the top-level directory.
14 #include "qemu/osdep.h"
15 #include "qapi/error.h"
17 #include "qemu-common.h"
19 #include "sysemu/memory_mapping.h"
20 #include "exec/memory.h"
21 #include "exec/address-spaces.h"
23 //#define DEBUG_GUEST_PHYS_REGION_ADD
25 static void memory_mapping_list_add_mapping_sorted(MemoryMappingList
*list
,
26 MemoryMapping
*mapping
)
30 QTAILQ_FOREACH(p
, &list
->head
, next
) {
31 if (p
->phys_addr
>= mapping
->phys_addr
) {
32 QTAILQ_INSERT_BEFORE(p
, mapping
, next
);
36 QTAILQ_INSERT_TAIL(&list
->head
, mapping
, next
);
39 static void create_new_memory_mapping(MemoryMappingList
*list
,
44 MemoryMapping
*memory_mapping
;
46 memory_mapping
= g_malloc(sizeof(MemoryMapping
));
47 memory_mapping
->phys_addr
= phys_addr
;
48 memory_mapping
->virt_addr
= virt_addr
;
49 memory_mapping
->length
= length
;
50 list
->last_mapping
= memory_mapping
;
52 memory_mapping_list_add_mapping_sorted(list
, memory_mapping
);
55 static inline bool mapping_contiguous(MemoryMapping
*map
,
59 return phys_addr
== map
->phys_addr
+ map
->length
&&
60 virt_addr
== map
->virt_addr
+ map
->length
;
64 * [map->phys_addr, map->phys_addr + map->length) and
65 * [phys_addr, phys_addr + length) have intersection?
67 static inline bool mapping_have_same_region(MemoryMapping
*map
,
71 return !(phys_addr
+ length
< map
->phys_addr
||
72 phys_addr
>= map
->phys_addr
+ map
->length
);
76 * [map->phys_addr, map->phys_addr + map->length) and
77 * [phys_addr, phys_addr + length) have intersection. The virtual address in the
78 * intersection are the same?
80 static inline bool mapping_conflict(MemoryMapping
*map
,
84 return virt_addr
- map
->virt_addr
!= phys_addr
- map
->phys_addr
;
88 * [map->virt_addr, map->virt_addr + map->length) and
89 * [virt_addr, virt_addr + length) have intersection. And the physical address
90 * in the intersection are the same.
92 static inline void mapping_merge(MemoryMapping
*map
,
96 if (virt_addr
< map
->virt_addr
) {
97 map
->length
+= map
->virt_addr
- virt_addr
;
98 map
->virt_addr
= virt_addr
;
101 if ((virt_addr
+ length
) >
102 (map
->virt_addr
+ map
->length
)) {
103 map
->length
= virt_addr
+ length
- map
->virt_addr
;
107 void memory_mapping_list_add_merge_sorted(MemoryMappingList
*list
,
112 MemoryMapping
*memory_mapping
, *last_mapping
;
114 if (QTAILQ_EMPTY(&list
->head
)) {
115 create_new_memory_mapping(list
, phys_addr
, virt_addr
, length
);
119 last_mapping
= list
->last_mapping
;
121 if (mapping_contiguous(last_mapping
, phys_addr
, virt_addr
)) {
122 last_mapping
->length
+= length
;
127 QTAILQ_FOREACH(memory_mapping
, &list
->head
, next
) {
128 if (mapping_contiguous(memory_mapping
, phys_addr
, virt_addr
)) {
129 memory_mapping
->length
+= length
;
130 list
->last_mapping
= memory_mapping
;
134 if (phys_addr
+ length
< memory_mapping
->phys_addr
) {
135 /* create a new region before memory_mapping */
139 if (mapping_have_same_region(memory_mapping
, phys_addr
, length
)) {
140 if (mapping_conflict(memory_mapping
, phys_addr
, virt_addr
)) {
144 /* merge this region into memory_mapping */
145 mapping_merge(memory_mapping
, virt_addr
, length
);
146 list
->last_mapping
= memory_mapping
;
151 /* this region can not be merged into any existed memory mapping. */
152 create_new_memory_mapping(list
, phys_addr
, virt_addr
, length
);
155 void memory_mapping_list_free(MemoryMappingList
*list
)
157 MemoryMapping
*p
, *q
;
159 QTAILQ_FOREACH_SAFE(p
, &list
->head
, next
, q
) {
160 QTAILQ_REMOVE(&list
->head
, p
, next
);
165 list
->last_mapping
= NULL
;
168 void memory_mapping_list_init(MemoryMappingList
*list
)
171 list
->last_mapping
= NULL
;
172 QTAILQ_INIT(&list
->head
);
175 void guest_phys_blocks_free(GuestPhysBlockList
*list
)
177 GuestPhysBlock
*p
, *q
;
179 QTAILQ_FOREACH_SAFE(p
, &list
->head
, next
, q
) {
180 QTAILQ_REMOVE(&list
->head
, p
, next
);
181 memory_region_unref(p
->mr
);
187 void guest_phys_blocks_init(GuestPhysBlockList
*list
)
190 QTAILQ_INIT(&list
->head
);
193 typedef struct GuestPhysListener
{
194 GuestPhysBlockList
*list
;
195 MemoryListener listener
;
198 static void guest_phys_blocks_region_add(MemoryListener
*listener
,
199 MemoryRegionSection
*section
)
201 GuestPhysListener
*g
;
202 uint64_t section_size
;
203 hwaddr target_start
, target_end
;
205 GuestPhysBlock
*predecessor
;
207 /* we only care about RAM */
208 if (!memory_region_is_ram(section
->mr
) ||
209 memory_region_is_ram_device(section
->mr
) ||
210 memory_region_is_nonvolatile(section
->mr
)) {
214 g
= container_of(listener
, GuestPhysListener
, listener
);
215 section_size
= int128_get64(section
->size
);
216 target_start
= section
->offset_within_address_space
;
217 target_end
= target_start
+ section_size
;
218 host_addr
= memory_region_get_ram_ptr(section
->mr
) +
219 section
->offset_within_region
;
222 /* find continuity in guest physical address space */
223 if (!QTAILQ_EMPTY(&g
->list
->head
)) {
224 hwaddr predecessor_size
;
226 predecessor
= QTAILQ_LAST(&g
->list
->head
);
227 predecessor_size
= predecessor
->target_end
- predecessor
->target_start
;
229 /* the memory API guarantees monotonically increasing traversal */
230 g_assert(predecessor
->target_end
<= target_start
);
232 /* we want continuity in both guest-physical and host-virtual memory */
233 if (predecessor
->target_end
< target_start
||
234 predecessor
->host_addr
+ predecessor_size
!= host_addr
) {
239 if (predecessor
== NULL
) {
240 /* isolated mapping, allocate it and add it to the list */
241 GuestPhysBlock
*block
= g_malloc0(sizeof *block
);
243 block
->target_start
= target_start
;
244 block
->target_end
= target_end
;
245 block
->host_addr
= host_addr
;
246 block
->mr
= section
->mr
;
247 memory_region_ref(section
->mr
);
249 QTAILQ_INSERT_TAIL(&g
->list
->head
, block
, next
);
252 /* expand predecessor until @target_end; predecessor's start doesn't
255 predecessor
->target_end
= target_end
;
258 #ifdef DEBUG_GUEST_PHYS_REGION_ADD
259 fprintf(stderr
, "%s: target_start=" TARGET_FMT_plx
" target_end="
260 TARGET_FMT_plx
": %s (count: %u)\n", __func__
, target_start
,
261 target_end
, predecessor
? "joined" : "added", g
->list
->num
);
265 void guest_phys_blocks_append(GuestPhysBlockList
*list
)
267 GuestPhysListener g
= { 0 };
270 g
.listener
.region_add
= &guest_phys_blocks_region_add
;
271 memory_listener_register(&g
.listener
, &address_space_memory
);
272 memory_listener_unregister(&g
.listener
);
275 static CPUState
*find_paging_enabled_cpu(CPUState
*start_cpu
)
280 if (cpu_paging_enabled(cpu
)) {
288 void qemu_get_guest_memory_mapping(MemoryMappingList
*list
,
289 const GuestPhysBlockList
*guest_phys_blocks
,
292 CPUState
*cpu
, *first_paging_enabled_cpu
;
293 GuestPhysBlock
*block
;
294 ram_addr_t offset
, length
;
296 first_paging_enabled_cpu
= find_paging_enabled_cpu(first_cpu
);
297 if (first_paging_enabled_cpu
) {
298 for (cpu
= first_paging_enabled_cpu
; cpu
!= NULL
;
299 cpu
= CPU_NEXT(cpu
)) {
301 cpu_get_memory_mapping(cpu
, list
, &err
);
303 error_propagate(errp
, err
);
311 * If the guest doesn't use paging, the virtual address is equal to physical
314 QTAILQ_FOREACH(block
, &guest_phys_blocks
->head
, next
) {
315 offset
= block
->target_start
;
316 length
= block
->target_end
- block
->target_start
;
317 create_new_memory_mapping(list
, offset
, offset
, length
);
321 void qemu_get_guest_simple_memory_mapping(MemoryMappingList
*list
,
322 const GuestPhysBlockList
*guest_phys_blocks
)
324 GuestPhysBlock
*block
;
326 QTAILQ_FOREACH(block
, &guest_phys_blocks
->head
, next
) {
327 create_new_memory_mapping(list
, block
->target_start
, 0,
328 block
->target_end
- block
->target_start
);
332 void memory_mapping_filter(MemoryMappingList
*list
, int64_t begin
,
335 MemoryMapping
*cur
, *next
;
337 QTAILQ_FOREACH_SAFE(cur
, &list
->head
, next
, next
) {
338 if (cur
->phys_addr
>= begin
+ length
||
339 cur
->phys_addr
+ cur
->length
<= begin
) {
340 QTAILQ_REMOVE(&list
->head
, cur
, next
);
346 if (cur
->phys_addr
< begin
) {
347 cur
->length
-= begin
- cur
->phys_addr
;
348 if (cur
->virt_addr
) {
349 cur
->virt_addr
+= begin
- cur
->phys_addr
;
351 cur
->phys_addr
= begin
;
354 if (cur
->phys_addr
+ cur
->length
> begin
+ length
) {
355 cur
->length
-= cur
->phys_addr
+ cur
->length
- begin
- length
;