cpu: flush TB cache when loading VMState
[qemu/ar7.git] / hw / i386 / xen / xen-mapcache.c
blobefa35dc6e0e2bcfde4c41ca6a154d59186c62733
1 /*
2 * Copyright (C) 2011 Citrix Ltd.
4 * This work is licensed under the terms of the GNU GPL, version 2. See
5 * the COPYING file in the top-level directory.
7 * Contributions after 2012-01-13 are licensed under the terms of the
8 * GNU GPL, version 2 or (at your option) any later version.
9 */
11 #include "qemu/osdep.h"
12 #include "qemu/error-report.h"
14 #include <sys/resource.h>
16 #include "hw/xen/xen_backend.h"
17 #include "sysemu/blockdev.h"
18 #include "qemu/bitmap.h"
20 #include <xen/hvm/params.h>
22 #include "sysemu/xen-mapcache.h"
23 #include "trace.h"
26 //#define MAPCACHE_DEBUG
28 #ifdef MAPCACHE_DEBUG
29 # define DPRINTF(fmt, ...) do { \
30 fprintf(stderr, "xen_mapcache: " fmt, ## __VA_ARGS__); \
31 } while (0)
32 #else
33 # define DPRINTF(fmt, ...) do { } while (0)
34 #endif
36 #if HOST_LONG_BITS == 32
37 # define MCACHE_BUCKET_SHIFT 16
38 # define MCACHE_MAX_SIZE (1UL<<31) /* 2GB Cap */
39 #else
40 # define MCACHE_BUCKET_SHIFT 20
41 # define MCACHE_MAX_SIZE (1UL<<35) /* 32GB Cap */
42 #endif
43 #define MCACHE_BUCKET_SIZE (1UL << MCACHE_BUCKET_SHIFT)
45 /* This is the size of the virtual address space reserve to QEMU that will not
46 * be use by MapCache.
47 * From empirical tests I observed that qemu use 75MB more than the
48 * max_mcache_size.
50 #define NON_MCACHE_MEMORY_SIZE (80 * 1024 * 1024)
52 typedef struct MapCacheEntry {
53 hwaddr paddr_index;
54 uint8_t *vaddr_base;
55 unsigned long *valid_mapping;
56 uint8_t lock;
57 #define XEN_MAPCACHE_ENTRY_DUMMY (1 << 0)
58 uint8_t flags;
59 hwaddr size;
60 struct MapCacheEntry *next;
61 } MapCacheEntry;
63 typedef struct MapCacheRev {
64 uint8_t *vaddr_req;
65 hwaddr paddr_index;
66 hwaddr size;
67 QTAILQ_ENTRY(MapCacheRev) next;
68 bool dma;
69 } MapCacheRev;
71 typedef struct MapCache {
72 MapCacheEntry *entry;
73 unsigned long nr_buckets;
74 QTAILQ_HEAD(map_cache_head, MapCacheRev) locked_entries;
76 /* For most cases (>99.9%), the page address is the same. */
77 MapCacheEntry *last_entry;
78 unsigned long max_mcache_size;
79 unsigned int mcache_bucket_shift;
81 phys_offset_to_gaddr_t phys_offset_to_gaddr;
82 QemuMutex lock;
83 void *opaque;
84 } MapCache;
86 static MapCache *mapcache;
88 static inline void mapcache_lock(void)
90 qemu_mutex_lock(&mapcache->lock);
93 static inline void mapcache_unlock(void)
95 qemu_mutex_unlock(&mapcache->lock);
98 static inline int test_bits(int nr, int size, const unsigned long *addr)
100 unsigned long res = find_next_zero_bit(addr, size + nr, nr);
101 if (res >= nr + size)
102 return 1;
103 else
104 return 0;
107 void xen_map_cache_init(phys_offset_to_gaddr_t f, void *opaque)
109 unsigned long size;
110 struct rlimit rlimit_as;
112 mapcache = g_malloc0(sizeof (MapCache));
114 mapcache->phys_offset_to_gaddr = f;
115 mapcache->opaque = opaque;
116 qemu_mutex_init(&mapcache->lock);
118 QTAILQ_INIT(&mapcache->locked_entries);
120 if (geteuid() == 0) {
121 rlimit_as.rlim_cur = RLIM_INFINITY;
122 rlimit_as.rlim_max = RLIM_INFINITY;
123 mapcache->max_mcache_size = MCACHE_MAX_SIZE;
124 } else {
125 getrlimit(RLIMIT_AS, &rlimit_as);
126 rlimit_as.rlim_cur = rlimit_as.rlim_max;
128 if (rlimit_as.rlim_max != RLIM_INFINITY) {
129 warn_report("QEMU's maximum size of virtual"
130 " memory is not infinity");
132 if (rlimit_as.rlim_max < MCACHE_MAX_SIZE + NON_MCACHE_MEMORY_SIZE) {
133 mapcache->max_mcache_size = rlimit_as.rlim_max -
134 NON_MCACHE_MEMORY_SIZE;
135 } else {
136 mapcache->max_mcache_size = MCACHE_MAX_SIZE;
140 setrlimit(RLIMIT_AS, &rlimit_as);
142 mapcache->nr_buckets =
143 (((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
144 (1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
145 (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
147 size = mapcache->nr_buckets * sizeof (MapCacheEntry);
148 size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
149 DPRINTF("%s, nr_buckets = %lx size %lu\n", __func__,
150 mapcache->nr_buckets, size);
151 mapcache->entry = g_malloc0(size);
154 static void xen_remap_bucket(MapCacheEntry *entry,
155 void *vaddr,
156 hwaddr size,
157 hwaddr address_index,
158 bool dummy)
160 uint8_t *vaddr_base;
161 xen_pfn_t *pfns;
162 int *err;
163 unsigned int i;
164 hwaddr nb_pfn = size >> XC_PAGE_SHIFT;
166 trace_xen_remap_bucket(address_index);
168 pfns = g_malloc0(nb_pfn * sizeof (xen_pfn_t));
169 err = g_malloc0(nb_pfn * sizeof (int));
171 if (entry->vaddr_base != NULL) {
172 if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
173 ram_block_notify_remove(entry->vaddr_base, entry->size);
175 if (munmap(entry->vaddr_base, entry->size) != 0) {
176 perror("unmap fails");
177 exit(-1);
180 g_free(entry->valid_mapping);
181 entry->valid_mapping = NULL;
183 for (i = 0; i < nb_pfn; i++) {
184 pfns[i] = (address_index << (MCACHE_BUCKET_SHIFT-XC_PAGE_SHIFT)) + i;
187 if (!dummy) {
188 vaddr_base = xenforeignmemory_map2(xen_fmem, xen_domid, vaddr,
189 PROT_READ | PROT_WRITE, 0,
190 nb_pfn, pfns, err);
191 if (vaddr_base == NULL) {
192 perror("xenforeignmemory_map2");
193 exit(-1);
195 } else {
197 * We create dummy mappings where we are unable to create a foreign
198 * mapping immediately due to certain circumstances (i.e. on resume now)
200 vaddr_base = mmap(vaddr, size, PROT_READ | PROT_WRITE,
201 MAP_ANON | MAP_SHARED, -1, 0);
202 if (vaddr_base == MAP_FAILED) {
203 perror("mmap");
204 exit(-1);
208 if (!(entry->flags & XEN_MAPCACHE_ENTRY_DUMMY)) {
209 ram_block_notify_add(vaddr_base, size);
212 entry->vaddr_base = vaddr_base;
213 entry->paddr_index = address_index;
214 entry->size = size;
215 entry->valid_mapping = (unsigned long *) g_malloc0(sizeof(unsigned long) *
216 BITS_TO_LONGS(size >> XC_PAGE_SHIFT));
218 if (dummy) {
219 entry->flags |= XEN_MAPCACHE_ENTRY_DUMMY;
220 } else {
221 entry->flags &= ~(XEN_MAPCACHE_ENTRY_DUMMY);
224 bitmap_zero(entry->valid_mapping, nb_pfn);
225 for (i = 0; i < nb_pfn; i++) {
226 if (!err[i]) {
227 bitmap_set(entry->valid_mapping, i, 1);
231 g_free(pfns);
232 g_free(err);
235 static uint8_t *xen_map_cache_unlocked(hwaddr phys_addr, hwaddr size,
236 uint8_t lock, bool dma)
238 MapCacheEntry *entry, *pentry = NULL,
239 *free_entry = NULL, *free_pentry = NULL;
240 hwaddr address_index;
241 hwaddr address_offset;
242 hwaddr cache_size = size;
243 hwaddr test_bit_size;
244 bool translated G_GNUC_UNUSED = false;
245 bool dummy = false;
247 tryagain:
248 address_index = phys_addr >> MCACHE_BUCKET_SHIFT;
249 address_offset = phys_addr & (MCACHE_BUCKET_SIZE - 1);
251 trace_xen_map_cache(phys_addr);
253 /* test_bit_size is always a multiple of XC_PAGE_SIZE */
254 if (size) {
255 test_bit_size = size + (phys_addr & (XC_PAGE_SIZE - 1));
257 if (test_bit_size % XC_PAGE_SIZE) {
258 test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
260 } else {
261 test_bit_size = XC_PAGE_SIZE;
264 if (mapcache->last_entry != NULL &&
265 mapcache->last_entry->paddr_index == address_index &&
266 !lock && !size &&
267 test_bits(address_offset >> XC_PAGE_SHIFT,
268 test_bit_size >> XC_PAGE_SHIFT,
269 mapcache->last_entry->valid_mapping)) {
270 trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
271 return mapcache->last_entry->vaddr_base + address_offset;
274 /* size is always a multiple of MCACHE_BUCKET_SIZE */
275 if (size) {
276 cache_size = size + address_offset;
277 if (cache_size % MCACHE_BUCKET_SIZE) {
278 cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
280 } else {
281 cache_size = MCACHE_BUCKET_SIZE;
284 entry = &mapcache->entry[address_index % mapcache->nr_buckets];
286 while (entry && (lock || entry->lock) && entry->vaddr_base &&
287 (entry->paddr_index != address_index || entry->size != cache_size ||
288 !test_bits(address_offset >> XC_PAGE_SHIFT,
289 test_bit_size >> XC_PAGE_SHIFT,
290 entry->valid_mapping))) {
291 if (!free_entry && !entry->lock) {
292 free_entry = entry;
293 free_pentry = pentry;
295 pentry = entry;
296 entry = entry->next;
298 if (!entry && free_entry) {
299 entry = free_entry;
300 pentry = free_pentry;
302 if (!entry) {
303 entry = g_malloc0(sizeof (MapCacheEntry));
304 pentry->next = entry;
305 xen_remap_bucket(entry, NULL, cache_size, address_index, dummy);
306 } else if (!entry->lock) {
307 if (!entry->vaddr_base || entry->paddr_index != address_index ||
308 entry->size != cache_size ||
309 !test_bits(address_offset >> XC_PAGE_SHIFT,
310 test_bit_size >> XC_PAGE_SHIFT,
311 entry->valid_mapping)) {
312 xen_remap_bucket(entry, NULL, cache_size, address_index, dummy);
316 if(!test_bits(address_offset >> XC_PAGE_SHIFT,
317 test_bit_size >> XC_PAGE_SHIFT,
318 entry->valid_mapping)) {
319 mapcache->last_entry = NULL;
320 #ifdef XEN_COMPAT_PHYSMAP
321 if (!translated && mapcache->phys_offset_to_gaddr) {
322 phys_addr = mapcache->phys_offset_to_gaddr(phys_addr, size, mapcache->opaque);
323 translated = true;
324 goto tryagain;
326 #endif
327 if (!dummy && runstate_check(RUN_STATE_INMIGRATE)) {
328 dummy = true;
329 goto tryagain;
331 trace_xen_map_cache_return(NULL);
332 return NULL;
335 mapcache->last_entry = entry;
336 if (lock) {
337 MapCacheRev *reventry = g_malloc0(sizeof(MapCacheRev));
338 entry->lock++;
339 reventry->dma = dma;
340 reventry->vaddr_req = mapcache->last_entry->vaddr_base + address_offset;
341 reventry->paddr_index = mapcache->last_entry->paddr_index;
342 reventry->size = entry->size;
343 QTAILQ_INSERT_HEAD(&mapcache->locked_entries, reventry, next);
346 trace_xen_map_cache_return(mapcache->last_entry->vaddr_base + address_offset);
347 return mapcache->last_entry->vaddr_base + address_offset;
350 uint8_t *xen_map_cache(hwaddr phys_addr, hwaddr size,
351 uint8_t lock, bool dma)
353 uint8_t *p;
355 mapcache_lock();
356 p = xen_map_cache_unlocked(phys_addr, size, lock, dma);
357 mapcache_unlock();
358 return p;
361 ram_addr_t xen_ram_addr_from_mapcache(void *ptr)
363 MapCacheEntry *entry = NULL;
364 MapCacheRev *reventry;
365 hwaddr paddr_index;
366 hwaddr size;
367 ram_addr_t raddr;
368 int found = 0;
370 mapcache_lock();
371 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
372 if (reventry->vaddr_req == ptr) {
373 paddr_index = reventry->paddr_index;
374 size = reventry->size;
375 found = 1;
376 break;
379 if (!found) {
380 fprintf(stderr, "%s, could not find %p\n", __func__, ptr);
381 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
382 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index,
383 reventry->vaddr_req);
385 abort();
386 return 0;
389 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
390 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
391 entry = entry->next;
393 if (!entry) {
394 DPRINTF("Trying to find address %p that is not in the mapcache!\n", ptr);
395 raddr = 0;
396 } else {
397 raddr = (reventry->paddr_index << MCACHE_BUCKET_SHIFT) +
398 ((unsigned long) ptr - (unsigned long) entry->vaddr_base);
400 mapcache_unlock();
401 return raddr;
404 static void xen_invalidate_map_cache_entry_unlocked(uint8_t *buffer)
406 MapCacheEntry *entry = NULL, *pentry = NULL;
407 MapCacheRev *reventry;
408 hwaddr paddr_index;
409 hwaddr size;
410 int found = 0;
412 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
413 if (reventry->vaddr_req == buffer) {
414 paddr_index = reventry->paddr_index;
415 size = reventry->size;
416 found = 1;
417 break;
420 if (!found) {
421 DPRINTF("%s, could not find %p\n", __func__, buffer);
422 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
423 DPRINTF(" "TARGET_FMT_plx" -> %p is present\n", reventry->paddr_index, reventry->vaddr_req);
425 return;
427 QTAILQ_REMOVE(&mapcache->locked_entries, reventry, next);
428 g_free(reventry);
430 if (mapcache->last_entry != NULL &&
431 mapcache->last_entry->paddr_index == paddr_index) {
432 mapcache->last_entry = NULL;
435 entry = &mapcache->entry[paddr_index % mapcache->nr_buckets];
436 while (entry && (entry->paddr_index != paddr_index || entry->size != size)) {
437 pentry = entry;
438 entry = entry->next;
440 if (!entry) {
441 DPRINTF("Trying to unmap address %p that is not in the mapcache!\n", buffer);
442 return;
444 entry->lock--;
445 if (entry->lock > 0 || pentry == NULL) {
446 return;
449 pentry->next = entry->next;
450 ram_block_notify_remove(entry->vaddr_base, entry->size);
451 if (munmap(entry->vaddr_base, entry->size) != 0) {
452 perror("unmap fails");
453 exit(-1);
455 g_free(entry->valid_mapping);
456 g_free(entry);
459 void xen_invalidate_map_cache_entry(uint8_t *buffer)
461 mapcache_lock();
462 xen_invalidate_map_cache_entry_unlocked(buffer);
463 mapcache_unlock();
466 void xen_invalidate_map_cache(void)
468 unsigned long i;
469 MapCacheRev *reventry;
471 /* Flush pending AIO before destroying the mapcache */
472 bdrv_drain_all();
474 mapcache_lock();
476 QTAILQ_FOREACH(reventry, &mapcache->locked_entries, next) {
477 if (!reventry->dma) {
478 continue;
480 fprintf(stderr, "Locked DMA mapping while invalidating mapcache!"
481 " "TARGET_FMT_plx" -> %p is present\n",
482 reventry->paddr_index, reventry->vaddr_req);
485 for (i = 0; i < mapcache->nr_buckets; i++) {
486 MapCacheEntry *entry = &mapcache->entry[i];
488 if (entry->vaddr_base == NULL) {
489 continue;
491 if (entry->lock > 0) {
492 continue;
495 if (munmap(entry->vaddr_base, entry->size) != 0) {
496 perror("unmap fails");
497 exit(-1);
500 entry->paddr_index = 0;
501 entry->vaddr_base = NULL;
502 entry->size = 0;
503 g_free(entry->valid_mapping);
504 entry->valid_mapping = NULL;
507 mapcache->last_entry = NULL;
509 mapcache_unlock();
512 static uint8_t *xen_replace_cache_entry_unlocked(hwaddr old_phys_addr,
513 hwaddr new_phys_addr,
514 hwaddr size)
516 MapCacheEntry *entry;
517 hwaddr address_index, address_offset;
518 hwaddr test_bit_size, cache_size = size;
520 address_index = old_phys_addr >> MCACHE_BUCKET_SHIFT;
521 address_offset = old_phys_addr & (MCACHE_BUCKET_SIZE - 1);
523 assert(size);
524 /* test_bit_size is always a multiple of XC_PAGE_SIZE */
525 test_bit_size = size + (old_phys_addr & (XC_PAGE_SIZE - 1));
526 if (test_bit_size % XC_PAGE_SIZE) {
527 test_bit_size += XC_PAGE_SIZE - (test_bit_size % XC_PAGE_SIZE);
529 cache_size = size + address_offset;
530 if (cache_size % MCACHE_BUCKET_SIZE) {
531 cache_size += MCACHE_BUCKET_SIZE - (cache_size % MCACHE_BUCKET_SIZE);
534 entry = &mapcache->entry[address_index % mapcache->nr_buckets];
535 while (entry && !(entry->paddr_index == address_index &&
536 entry->size == cache_size)) {
537 entry = entry->next;
539 if (!entry) {
540 DPRINTF("Trying to update an entry for "TARGET_FMT_plx \
541 "that is not in the mapcache!\n", old_phys_addr);
542 return NULL;
545 address_index = new_phys_addr >> MCACHE_BUCKET_SHIFT;
546 address_offset = new_phys_addr & (MCACHE_BUCKET_SIZE - 1);
548 fprintf(stderr, "Replacing a dummy mapcache entry for "TARGET_FMT_plx \
549 " with "TARGET_FMT_plx"\n", old_phys_addr, new_phys_addr);
551 xen_remap_bucket(entry, entry->vaddr_base,
552 cache_size, address_index, false);
553 if (!test_bits(address_offset >> XC_PAGE_SHIFT,
554 test_bit_size >> XC_PAGE_SHIFT,
555 entry->valid_mapping)) {
556 DPRINTF("Unable to update a mapcache entry for "TARGET_FMT_plx"!\n",
557 old_phys_addr);
558 return NULL;
561 return entry->vaddr_base + address_offset;
564 uint8_t *xen_replace_cache_entry(hwaddr old_phys_addr,
565 hwaddr new_phys_addr,
566 hwaddr size)
568 uint8_t *p;
570 mapcache_lock();
571 p = xen_replace_cache_entry_unlocked(old_phys_addr, new_phys_addr, size);
572 mapcache_unlock();
573 return p;