memory: implement dirty tracking
[qemu.git] / memory.c
blobd858b477391a67a81365876dd2c3d483a9896f5e
1 /*
2 * Physical memory management
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
14 #include "memory.h"
15 #include <assert.h>
17 typedef struct AddrRange AddrRange;
19 struct AddrRange {
20 uint64_t start;
21 uint64_t size;
24 static AddrRange addrrange_make(uint64_t start, uint64_t size)
26 return (AddrRange) { start, size };
29 static bool addrrange_equal(AddrRange r1, AddrRange r2)
31 return r1.start == r2.start && r1.size == r2.size;
34 static uint64_t addrrange_end(AddrRange r)
36 return r.start + r.size;
39 static AddrRange addrrange_shift(AddrRange range, int64_t delta)
41 range.start += delta;
42 return range;
45 static bool addrrange_intersects(AddrRange r1, AddrRange r2)
47 return (r1.start >= r2.start && r1.start < r2.start + r2.size)
48 || (r2.start >= r1.start && r2.start < r1.start + r1.size);
51 static AddrRange addrrange_intersection(AddrRange r1, AddrRange r2)
53 uint64_t start = MAX(r1.start, r2.start);
54 /* off-by-one arithmetic to prevent overflow */
55 uint64_t end = MIN(addrrange_end(r1) - 1, addrrange_end(r2) - 1);
56 return addrrange_make(start, end - start + 1);
59 struct CoalescedMemoryRange {
60 AddrRange addr;
61 QTAILQ_ENTRY(CoalescedMemoryRange) link;
64 typedef struct FlatRange FlatRange;
65 typedef struct FlatView FlatView;
67 /* Range of memory in the global map. Addresses are absolute. */
68 struct FlatRange {
69 MemoryRegion *mr;
70 target_phys_addr_t offset_in_region;
71 AddrRange addr;
72 uint8_t dirty_log_mask;
75 /* Flattened global view of current active memory hierarchy. Kept in sorted
76 * order.
78 struct FlatView {
79 FlatRange *ranges;
80 unsigned nr;
81 unsigned nr_allocated;
84 #define FOR_EACH_FLAT_RANGE(var, view) \
85 for (var = (view)->ranges; var < (view)->ranges + (view)->nr; ++var)
87 static FlatView current_memory_map;
88 static MemoryRegion *root_memory_region;
90 static bool flatrange_equal(FlatRange *a, FlatRange *b)
92 return a->mr == b->mr
93 && addrrange_equal(a->addr, b->addr)
94 && a->offset_in_region == b->offset_in_region;
97 static void flatview_init(FlatView *view)
99 view->ranges = NULL;
100 view->nr = 0;
101 view->nr_allocated = 0;
104 /* Insert a range into a given position. Caller is responsible for maintaining
105 * sorting order.
107 static void flatview_insert(FlatView *view, unsigned pos, FlatRange *range)
109 if (view->nr == view->nr_allocated) {
110 view->nr_allocated = MAX(2 * view->nr, 10);
111 view->ranges = qemu_realloc(view->ranges,
112 view->nr_allocated * sizeof(*view->ranges));
114 memmove(view->ranges + pos + 1, view->ranges + pos,
115 (view->nr - pos) * sizeof(FlatRange));
116 view->ranges[pos] = *range;
117 ++view->nr;
120 static void flatview_destroy(FlatView *view)
122 qemu_free(view->ranges);
125 /* Render a memory region into the global view. Ranges in @view obscure
126 * ranges in @mr.
128 static void render_memory_region(FlatView *view,
129 MemoryRegion *mr,
130 target_phys_addr_t base,
131 AddrRange clip)
133 MemoryRegion *subregion;
134 unsigned i;
135 target_phys_addr_t offset_in_region;
136 uint64_t remain;
137 uint64_t now;
138 FlatRange fr;
139 AddrRange tmp;
141 base += mr->addr;
143 tmp = addrrange_make(base, mr->size);
145 if (!addrrange_intersects(tmp, clip)) {
146 return;
149 clip = addrrange_intersection(tmp, clip);
151 if (mr->alias) {
152 base -= mr->alias->addr;
153 base -= mr->alias_offset;
154 render_memory_region(view, mr->alias, base, clip);
155 return;
158 /* Render subregions in priority order. */
159 QTAILQ_FOREACH(subregion, &mr->subregions, subregions_link) {
160 render_memory_region(view, subregion, base, clip);
163 if (!mr->has_ram_addr) {
164 return;
167 offset_in_region = clip.start - base;
168 base = clip.start;
169 remain = clip.size;
171 /* Render the region itself into any gaps left by the current view. */
172 for (i = 0; i < view->nr && remain; ++i) {
173 if (base >= addrrange_end(view->ranges[i].addr)) {
174 continue;
176 if (base < view->ranges[i].addr.start) {
177 now = MIN(remain, view->ranges[i].addr.start - base);
178 fr.mr = mr;
179 fr.offset_in_region = offset_in_region;
180 fr.addr = addrrange_make(base, now);
181 fr.dirty_log_mask = mr->dirty_log_mask;
182 flatview_insert(view, i, &fr);
183 ++i;
184 base += now;
185 offset_in_region += now;
186 remain -= now;
188 if (base == view->ranges[i].addr.start) {
189 now = MIN(remain, view->ranges[i].addr.size);
190 base += now;
191 offset_in_region += now;
192 remain -= now;
195 if (remain) {
196 fr.mr = mr;
197 fr.offset_in_region = offset_in_region;
198 fr.addr = addrrange_make(base, remain);
199 fr.dirty_log_mask = mr->dirty_log_mask;
200 flatview_insert(view, i, &fr);
204 /* Render a memory topology into a list of disjoint absolute ranges. */
205 static FlatView generate_memory_topology(MemoryRegion *mr)
207 FlatView view;
209 flatview_init(&view);
211 render_memory_region(&view, mr, 0, addrrange_make(0, UINT64_MAX));
213 return view;
216 static void memory_region_update_topology(void)
218 FlatView old_view = current_memory_map;
219 FlatView new_view = generate_memory_topology(root_memory_region);
220 unsigned iold, inew;
221 FlatRange *frold, *frnew;
222 ram_addr_t phys_offset, region_offset;
224 /* Generate a symmetric difference of the old and new memory maps.
225 * Kill ranges in the old map, and instantiate ranges in the new map.
227 iold = inew = 0;
228 while (iold < old_view.nr || inew < new_view.nr) {
229 if (iold < old_view.nr) {
230 frold = &old_view.ranges[iold];
231 } else {
232 frold = NULL;
234 if (inew < new_view.nr) {
235 frnew = &new_view.ranges[inew];
236 } else {
237 frnew = NULL;
240 if (frold
241 && (!frnew
242 || frold->addr.start < frnew->addr.start
243 || (frold->addr.start == frnew->addr.start
244 && !flatrange_equal(frold, frnew)))) {
245 /* In old, but (not in new, or in new but attributes changed). */
247 cpu_register_physical_memory(frold->addr.start, frold->addr.size,
248 IO_MEM_UNASSIGNED);
249 ++iold;
250 } else if (frold && frnew && flatrange_equal(frold, frnew)) {
251 /* In both (logging may have changed) */
253 if (frold->dirty_log_mask && !frnew->dirty_log_mask) {
254 cpu_physical_log_stop(frnew->addr.start, frnew->addr.size);
255 } else if (frnew->dirty_log_mask && !frold->dirty_log_mask) {
256 cpu_physical_log_start(frnew->addr.start, frnew->addr.size);
259 ++iold;
260 ++inew;
261 } else {
262 /* In new */
264 phys_offset = frnew->mr->ram_addr;
265 region_offset = frnew->offset_in_region;
266 /* cpu_register_physical_memory_log() wants region_offset for
267 * mmio, but prefers offseting phys_offset for RAM. Humour it.
269 if ((phys_offset & ~TARGET_PAGE_MASK) <= IO_MEM_ROM) {
270 phys_offset += region_offset;
271 region_offset = 0;
274 cpu_register_physical_memory_log(frnew->addr.start,
275 frnew->addr.size,
276 phys_offset,
277 region_offset,
278 frnew->dirty_log_mask);
279 ++inew;
282 current_memory_map = new_view;
283 flatview_destroy(&old_view);
286 void memory_region_init(MemoryRegion *mr,
287 const char *name,
288 uint64_t size)
290 mr->ops = NULL;
291 mr->parent = NULL;
292 mr->size = size;
293 mr->addr = 0;
294 mr->offset = 0;
295 mr->has_ram_addr = false;
296 mr->priority = 0;
297 mr->may_overlap = false;
298 mr->alias = NULL;
299 QTAILQ_INIT(&mr->subregions);
300 memset(&mr->subregions_link, 0, sizeof mr->subregions_link);
301 QTAILQ_INIT(&mr->coalesced);
302 mr->name = qemu_strdup(name);
303 mr->dirty_log_mask = 0;
306 static bool memory_region_access_valid(MemoryRegion *mr,
307 target_phys_addr_t addr,
308 unsigned size)
310 if (!mr->ops->valid.unaligned && (addr & (size - 1))) {
311 return false;
314 /* Treat zero as compatibility all valid */
315 if (!mr->ops->valid.max_access_size) {
316 return true;
319 if (size > mr->ops->valid.max_access_size
320 || size < mr->ops->valid.min_access_size) {
321 return false;
323 return true;
326 static uint32_t memory_region_read_thunk_n(void *_mr,
327 target_phys_addr_t addr,
328 unsigned size)
330 MemoryRegion *mr = _mr;
331 unsigned access_size, access_size_min, access_size_max;
332 uint64_t access_mask;
333 uint32_t data = 0, tmp;
334 unsigned i;
336 if (!memory_region_access_valid(mr, addr, size)) {
337 return -1U; /* FIXME: better signalling */
340 /* FIXME: support unaligned access */
342 access_size_min = mr->ops->impl.min_access_size;
343 if (!access_size_min) {
344 access_size_min = 1;
346 access_size_max = mr->ops->impl.max_access_size;
347 if (!access_size_max) {
348 access_size_max = 4;
350 access_size = MAX(MIN(size, access_size_max), access_size_min);
351 access_mask = -1ULL >> (64 - access_size * 8);
352 addr += mr->offset;
353 for (i = 0; i < size; i += access_size) {
354 /* FIXME: big-endian support */
355 tmp = mr->ops->read(mr->opaque, addr + i, access_size);
356 data |= (tmp & access_mask) << (i * 8);
359 return data;
362 static void memory_region_write_thunk_n(void *_mr,
363 target_phys_addr_t addr,
364 unsigned size,
365 uint64_t data)
367 MemoryRegion *mr = _mr;
368 unsigned access_size, access_size_min, access_size_max;
369 uint64_t access_mask;
370 unsigned i;
372 if (!memory_region_access_valid(mr, addr, size)) {
373 return; /* FIXME: better signalling */
376 /* FIXME: support unaligned access */
378 access_size_min = mr->ops->impl.min_access_size;
379 if (!access_size_min) {
380 access_size_min = 1;
382 access_size_max = mr->ops->impl.max_access_size;
383 if (!access_size_max) {
384 access_size_max = 4;
386 access_size = MAX(MIN(size, access_size_max), access_size_min);
387 access_mask = -1ULL >> (64 - access_size * 8);
388 addr += mr->offset;
389 for (i = 0; i < size; i += access_size) {
390 /* FIXME: big-endian support */
391 mr->ops->write(mr->opaque, addr + i, (data >> (i * 8)) & access_mask,
392 access_size);
396 static uint32_t memory_region_read_thunk_b(void *mr, target_phys_addr_t addr)
398 return memory_region_read_thunk_n(mr, addr, 1);
401 static uint32_t memory_region_read_thunk_w(void *mr, target_phys_addr_t addr)
403 return memory_region_read_thunk_n(mr, addr, 2);
406 static uint32_t memory_region_read_thunk_l(void *mr, target_phys_addr_t addr)
408 return memory_region_read_thunk_n(mr, addr, 4);
411 static void memory_region_write_thunk_b(void *mr, target_phys_addr_t addr,
412 uint32_t data)
414 memory_region_write_thunk_n(mr, addr, 1, data);
417 static void memory_region_write_thunk_w(void *mr, target_phys_addr_t addr,
418 uint32_t data)
420 memory_region_write_thunk_n(mr, addr, 2, data);
423 static void memory_region_write_thunk_l(void *mr, target_phys_addr_t addr,
424 uint32_t data)
426 memory_region_write_thunk_n(mr, addr, 4, data);
429 static CPUReadMemoryFunc * const memory_region_read_thunk[] = {
430 memory_region_read_thunk_b,
431 memory_region_read_thunk_w,
432 memory_region_read_thunk_l,
435 static CPUWriteMemoryFunc * const memory_region_write_thunk[] = {
436 memory_region_write_thunk_b,
437 memory_region_write_thunk_w,
438 memory_region_write_thunk_l,
441 void memory_region_init_io(MemoryRegion *mr,
442 const MemoryRegionOps *ops,
443 void *opaque,
444 const char *name,
445 uint64_t size)
447 memory_region_init(mr, name, size);
448 mr->ops = ops;
449 mr->opaque = opaque;
450 mr->has_ram_addr = true;
451 mr->ram_addr = cpu_register_io_memory(memory_region_read_thunk,
452 memory_region_write_thunk,
454 mr->ops->endianness);
457 void memory_region_init_ram(MemoryRegion *mr,
458 DeviceState *dev,
459 const char *name,
460 uint64_t size)
462 memory_region_init(mr, name, size);
463 mr->has_ram_addr = true;
464 mr->ram_addr = qemu_ram_alloc(dev, name, size);
467 void memory_region_init_ram_ptr(MemoryRegion *mr,
468 DeviceState *dev,
469 const char *name,
470 uint64_t size,
471 void *ptr)
473 memory_region_init(mr, name, size);
474 mr->has_ram_addr = true;
475 mr->ram_addr = qemu_ram_alloc_from_ptr(dev, name, size, ptr);
478 void memory_region_init_alias(MemoryRegion *mr,
479 const char *name,
480 MemoryRegion *orig,
481 target_phys_addr_t offset,
482 uint64_t size)
484 memory_region_init(mr, name, size);
485 mr->alias = orig;
486 mr->alias_offset = offset;
489 void memory_region_destroy(MemoryRegion *mr)
491 assert(QTAILQ_EMPTY(&mr->subregions));
492 memory_region_clear_coalescing(mr);
493 qemu_free((char *)mr->name);
496 uint64_t memory_region_size(MemoryRegion *mr)
498 return mr->size;
501 void memory_region_set_offset(MemoryRegion *mr, target_phys_addr_t offset)
503 mr->offset = offset;
506 void memory_region_set_log(MemoryRegion *mr, bool log, unsigned client)
508 uint8_t mask = 1 << client;
510 mr->dirty_log_mask = (mr->dirty_log_mask & ~mask) | (log * mask);
511 memory_region_update_topology();
514 bool memory_region_get_dirty(MemoryRegion *mr, target_phys_addr_t addr,
515 unsigned client)
517 assert(mr->has_ram_addr);
518 return cpu_physical_memory_get_dirty(mr->ram_addr + addr, 1 << client);
521 void memory_region_set_dirty(MemoryRegion *mr, target_phys_addr_t addr)
523 assert(mr->has_ram_addr);
524 return cpu_physical_memory_set_dirty(mr->ram_addr + addr);
527 void memory_region_sync_dirty_bitmap(MemoryRegion *mr)
529 FlatRange *fr;
531 FOR_EACH_FLAT_RANGE(fr, &current_memory_map) {
532 if (fr->mr == mr) {
533 cpu_physical_sync_dirty_bitmap(fr->addr.start,
534 fr->addr.start + fr->addr.size);
539 void memory_region_set_readonly(MemoryRegion *mr, bool readonly)
541 /* FIXME */
544 void memory_region_reset_dirty(MemoryRegion *mr, target_phys_addr_t addr,
545 target_phys_addr_t size, unsigned client)
547 assert(mr->has_ram_addr);
548 cpu_physical_memory_reset_dirty(mr->ram_addr + addr,
549 mr->ram_addr + addr + size,
550 1 << client);
553 void *memory_region_get_ram_ptr(MemoryRegion *mr)
555 if (mr->alias) {
556 return memory_region_get_ram_ptr(mr->alias) + mr->alias_offset;
559 assert(mr->has_ram_addr);
561 return qemu_get_ram_ptr(mr->ram_addr);
564 static void memory_region_update_coalesced_range(MemoryRegion *mr)
566 FlatRange *fr;
567 CoalescedMemoryRange *cmr;
568 AddrRange tmp;
570 FOR_EACH_FLAT_RANGE(fr, &current_memory_map) {
571 if (fr->mr == mr) {
572 qemu_unregister_coalesced_mmio(fr->addr.start, fr->addr.size);
573 QTAILQ_FOREACH(cmr, &mr->coalesced, link) {
574 tmp = addrrange_shift(cmr->addr,
575 fr->addr.start - fr->offset_in_region);
576 if (!addrrange_intersects(tmp, fr->addr)) {
577 continue;
579 tmp = addrrange_intersection(tmp, fr->addr);
580 qemu_register_coalesced_mmio(tmp.start, tmp.size);
586 void memory_region_set_coalescing(MemoryRegion *mr)
588 memory_region_clear_coalescing(mr);
589 memory_region_add_coalescing(mr, 0, mr->size);
592 void memory_region_add_coalescing(MemoryRegion *mr,
593 target_phys_addr_t offset,
594 uint64_t size)
596 CoalescedMemoryRange *cmr = qemu_malloc(sizeof(*cmr));
598 cmr->addr = addrrange_make(offset, size);
599 QTAILQ_INSERT_TAIL(&mr->coalesced, cmr, link);
600 memory_region_update_coalesced_range(mr);
603 void memory_region_clear_coalescing(MemoryRegion *mr)
605 CoalescedMemoryRange *cmr;
607 while (!QTAILQ_EMPTY(&mr->coalesced)) {
608 cmr = QTAILQ_FIRST(&mr->coalesced);
609 QTAILQ_REMOVE(&mr->coalesced, cmr, link);
610 qemu_free(cmr);
612 memory_region_update_coalesced_range(mr);
615 static void memory_region_add_subregion_common(MemoryRegion *mr,
616 target_phys_addr_t offset,
617 MemoryRegion *subregion)
619 MemoryRegion *other;
621 assert(!subregion->parent);
622 subregion->parent = mr;
623 subregion->addr = offset;
624 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
625 if (subregion->may_overlap || other->may_overlap) {
626 continue;
628 if (offset >= other->offset + other->size
629 || offset + subregion->size <= other->offset) {
630 continue;
632 printf("warning: subregion collision %llx/%llx vs %llx/%llx\n",
633 (unsigned long long)offset,
634 (unsigned long long)subregion->size,
635 (unsigned long long)other->offset,
636 (unsigned long long)other->size);
638 QTAILQ_FOREACH(other, &mr->subregions, subregions_link) {
639 if (subregion->priority >= other->priority) {
640 QTAILQ_INSERT_BEFORE(other, subregion, subregions_link);
641 goto done;
644 QTAILQ_INSERT_TAIL(&mr->subregions, subregion, subregions_link);
645 done:
646 memory_region_update_topology();
650 void memory_region_add_subregion(MemoryRegion *mr,
651 target_phys_addr_t offset,
652 MemoryRegion *subregion)
654 subregion->may_overlap = false;
655 subregion->priority = 0;
656 memory_region_add_subregion_common(mr, offset, subregion);
659 void memory_region_add_subregion_overlap(MemoryRegion *mr,
660 target_phys_addr_t offset,
661 MemoryRegion *subregion,
662 unsigned priority)
664 subregion->may_overlap = true;
665 subregion->priority = priority;
666 memory_region_add_subregion_common(mr, offset, subregion);
669 void memory_region_del_subregion(MemoryRegion *mr,
670 MemoryRegion *subregion)
672 assert(subregion->parent == mr);
673 subregion->parent = NULL;
674 QTAILQ_REMOVE(&mr->subregions, subregion, subregions_link);
675 memory_region_update_topology();